1 changed files with 308 additions and 0 deletions
@ -0,0 +1,308 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "4c3c6553-daa4-4a03-8017-15d0cad8f280", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# About Mini Project\n", |
||||
"\n", |
||||
"Mini project for hearing impaired people, using tools, suggesting songs according to a certain genre and in sign language. Speech to text converter with multiple language support." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a32a79cb-3d16-4b3b-a029-a059bd0b1c0b", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Extra requirements\n", |
||||
"- pip install pydub simpleaudio speechrecognition pipwin pyaudio\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3e214aa3-a977-434f-a436-90a89b81a5ee", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d654cb96-9bcd-4b64-bd79-2d27fa6a62d0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b2d9214f-25d0-4f09-ba88-641beeaa20db", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful assistant for hearing impaired people. \"\n", |
||||
"system_message += \"Your mission is convert text to speech and speech to text. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3d9a1478-08bf-4195-8f38-34c29757012f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"songs_with_signs = {\n", |
||||
" \"electronic\": (\"God is a dj\", \"https://www.youtube.com/watch?v=bhSB8EEnCAM\", \"Faithless\"), \n", |
||||
" \"pop\": (\"Yitirmeden\", \"https://www.youtube.com/watch?v=aObdAXq1ZIo\", \"Pinhani\"), \n", |
||||
" \"rock\": (\"Bohemian Rhapsody\", \"https://www.youtube.com/watch?v=sjln9OMOw-0\", \"Queen\")\n", |
||||
"}\n", |
||||
"\n", |
||||
"def get_songs_with_sign_language(genre):\n", |
||||
" print(f\"Tool get_songs_with_sign_language called for {genre}\")\n", |
||||
" city = genre.lower()\n", |
||||
" return songs_with_signs.get(genre, \"Unknown\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "93a3d7ee-78c2-4e19-b7e4-8239b07aaecc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_songs_with_sign_language(\"rock\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7307aa61-86fe-4c46-9f9d-faa3d1fb1eb7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"song_function = {\n", |
||||
" \"name\": \"get_songs_with_sign_language\",\n", |
||||
" \"description\": \"Get the corresponding song information for the specified given music genre. Call this whenever you need to know the songs with specific genre and in sign language, for example when a customer asks 'Suggest me sign language supported songs'\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"genre\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The music genre that the customer wants to listen-watch to\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"genre\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "160d790c-dda6-4c6e-b814-8be64ca7086b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"tools = [{\"type\": \"function\", \"function\": song_function}]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "96cdf319-11cd-4be2-8830-097225047d65", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def handle_tool_call(message):\n", |
||||
" tool_call = message.tool_calls[0]\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" genre = arguments.get('genre')\n", |
||||
" song = get_songs_with_sign_language(genre)\n", |
||||
" song_info = song[2] + \": \" + song[1]\n", |
||||
" response = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"genre\": genre,\"song\": song_info}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" return response, song[1]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bbd8ad0c-135b-406f-8ab9-0e1f9b58538d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", |
||||
" genre = None\n", |
||||
" \n", |
||||
" if response.choices[0].finish_reason==\"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" response, genre = handle_tool_call(message)\n", |
||||
" messages.append(message)\n", |
||||
" messages.append(response)\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" \n", |
||||
" reply = response.choices[0].message.content\n", |
||||
" history += [{\"role\":\"assistant\", \"content\":reply}]\n", |
||||
" \n", |
||||
" return history, genre" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "69f43096-3557-4218-b0de-bd286237fdeb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import speech_recognition as sr\n", |
||||
"from pydub import AudioSegment\n", |
||||
"import simpleaudio as sa\n", |
||||
"\n", |
||||
"def listener():\n", |
||||
" recognizer = sr.Recognizer()\n", |
||||
" \n", |
||||
" with sr.Microphone() as source:\n", |
||||
" print(\"Listening... Speak now!\")\n", |
||||
" recognizer.adjust_for_ambient_noise(source) # Adjust for background noise\n", |
||||
" audio = recognizer.listen(source)\n", |
||||
" \n", |
||||
" try:\n", |
||||
" print(\"Processing speech...\")\n", |
||||
" text = recognizer.recognize_google(audio) # Use Google Speech-to-Text\n", |
||||
" print(f\"You said: {text}\")\n", |
||||
" return text\n", |
||||
" except sr.UnknownValueError:\n", |
||||
" print(\"Sorry, I could not understand what you said.\")\n", |
||||
" return None\n", |
||||
" except sr.RequestError:\n", |
||||
" print(\"Could not request results, please check your internet connection.\")\n", |
||||
" return None\n", |
||||
"\n", |
||||
"# Example usage:\n", |
||||
"text = listener() # Listen for speech\n", |
||||
"if text:\n", |
||||
" print(f\"You just said: {text}\") " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "23c9deeb-d9ad-439a-a39d-7eac9553bd5e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import gradio as gr\n", |
||||
"\n", |
||||
"convert = gr.State(False)\n", |
||||
"def toggle_convert(current_value):\n", |
||||
" return not current_value" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "32d3ea9f-fe3c-4cc5-9902-550c63c58a69", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import gradio as gr\n", |
||||
"\n", |
||||
"with gr.Blocks() as ui:\n", |
||||
" with gr.Tab(\"Chat\") as chat_interface:\n", |
||||
" with gr.Row():\n", |
||||
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n", |
||||
" video = gr.HTML(f\"<a href=''> Example song will appear here </a>\")\n", |
||||
" with gr.Row():\n", |
||||
" entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n", |
||||
" with gr.Row():\n", |
||||
" clear = gr.Button(\"Clear\")\n", |
||||
" \n", |
||||
" def do_entry(message, history):\n", |
||||
" history += [{\"role\":\"user\", \"content\":message}]\n", |
||||
" return \"\", history\n", |
||||
" \n", |
||||
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", |
||||
" chat, inputs=chatbot, outputs=[chatbot, video]\n", |
||||
" )\n", |
||||
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", |
||||
" with gr.Tab(\"Speech to text converter\") as speech_to_text:\n", |
||||
" text_output = gr.Markdown(\"Press the button to start voice recognition\")\n", |
||||
" listen_button = gr.Button(\"Convert Voice to Text\")\n", |
||||
" language = gr.Dropdown([\"English\", \"Turkish\", \"Greek\", \"Arabic\"], label=\"Select output language\", value=\"English\")\n", |
||||
"\n", |
||||
" def update_text(language):\n", |
||||
" \"\"\"Calls the listener and updates the markdown output in specific language.\"\"\"\n", |
||||
" text = listener() # Replace with real speech-to-text function\n", |
||||
" system_prompt = f\"You are a useful translator. Convert text to {language}. Do not add aditional data, only translate it.\"\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": text}\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" return f\"**Converted Text:** {response.choices[0].message.content}\"\n", |
||||
"\n", |
||||
" listen_button.click(update_text, inputs=[language], outputs=[text_output])\n", |
||||
"\n", |
||||
"ui.launch(inbrowser=True, share=True)\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "26814e88-ee29-414d-88a4-f19b2f94e6f4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.12.0" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
Loading…
Reference in new issue