diff --git a/week2/community-contributions/week2_exercise_translated_chatbot.ipynb b/week2/community-contributions/week2_exercise_translated_chatbot.ipynb new file mode 100644 index 0000000..7490107 --- /dev/null +++ b/week2/community-contributions/week2_exercise_translated_chatbot.ipynb @@ -0,0 +1,614 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", + "metadata": {}, + "source": [ + "# Additional End of week Exercise - week 2\n", + "\n", + "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", + "\n", + "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", + "\n", + "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", + "\n", + "I will publish a full solution here soon - unless someone beats me to it...\n", + "\n", + "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." + ] + }, + { + "cell_type": "markdown", + "id": "517b0427-1f18-4ce3-96a9-3bccde59ee60", + "metadata": {}, + "source": [ + "# Overview\n" + ] + }, + { + "cell_type": "markdown", + "id": "d1e934de-8fe4-4586-bc0e-0ecb0dd5068b", + "metadata": {}, + "source": [ + "## Multimodal question and answerer:\n", + " - The chatbot answers technical questions\n", + " - The user can enter their language of choice (that exists) and it would be translated real time on the second screen and English on the first screen.\n", + " - The user can also choose which tone/mood it would like the responses to have.\n", + " - The user can choose whether the ersponse will be read out loud or not. \n", + " - The user can send in an audio and get the response back in audio (automatically) if he so chooses." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "90459175-a0f9-421e-bbce-49eecd2cd16a", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "096cdd1d-073b-45b6-80f2-090fbdcb701c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI key exists!\n", + "Anthropic API key exists!\n" + ] + } + ], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv()\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\n", + "\n", + "if openai_api_key:\n", + " print(\"OpenAI key exists!\")\n", + "else:\n", + " print(\"OpenAI key not set!\")\n", + "\n", + "if anthropic_api_key:\n", + " print(\"Anthropic API key exists!\")\n", + "else:\n", + " print(\"Anthropic key not set\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "bca4c5ee-6c39-4c81-b8f9-9280c66b07c0", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()\n", + "\n", + "openai_4o_mini_model = \"gpt-4o-mini\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "67599cae-9a26-4180-959c-63c74157568f", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant that answers tchnical questions.\"\n", + "system_message += \"Always be accurate. If you don't know or not sure about some information, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "id": "5a04232e-f8a7-4e38-b22b-8fe179262920", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting deep_translator\n", + " Downloading deep_translator-1.11.4-py3-none-any.whl.metadata (30 kB)\n", + "Requirement already satisfied: beautifulsoup4<5.0.0,>=4.9.1 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from deep_translator) (4.12.3)\n", + "Requirement already satisfied: requests<3.0.0,>=2.23.0 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from deep_translator) (2.32.3)\n", + "Requirement already satisfied: soupsieve>1.2 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from beautifulsoup4<5.0.0,>=4.9.1->deep_translator) (2.5)\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3.0.0,>=2.23.0->deep_translator) (3.4.1)\n", + "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3.0.0,>=2.23.0->deep_translator) (2.10)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3.0.0,>=2.23.0->deep_translator) (2.3.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3.0.0,>=2.23.0->deep_translator) (2024.12.14)\n", + "Downloading deep_translator-1.11.4-py3-none-any.whl (42 kB)\n", + "Installing collected packages: deep_translator\n", + "Successfully installed deep_translator-1.11.4\n" + ] + } + ], + "source": [ + "!pip install deep_translator" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "id": "aa85d87e-9385-4a23-b114-eb64264ca497", + "metadata": {}, + "outputs": [], + "source": [ + "from deep_translator import GoogleTranslator\n", + "\n", + "# First install deep_translator:\n", + "# pip install deep_translator\n", + "\n", + "# Top 10 most spoken languages with their codes\n", + "LANGUAGES = {\n", + " \"English\": \"en\",\n", + " \"Mandarin Chinese\": \"zh-CN\",\n", + " \"Hindi\": \"hi\",\n", + " \"Spanish\": \"es\",\n", + " \"Arabic\": \"ar\",\n", + " \"Bengali\": \"bn\",\n", + " \"Portuguese\": \"pt\",\n", + " \"Russian\": \"ru\",\n", + " \"Japanese\": \"ja\",\n", + " \"German\": \"de\"\n", + "}\n" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "id": "25654699-763a-4574-91bf-08c6068d4cd3", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "class ChatState:\n", + " def __init__(self):\n", + " self.speak = True\n", + " self.target_lang = \"en\"\n", + "\n", + "chat_state = ChatState()" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "id": "18dc8c35-f97c-4635-834a-422b5aa552c8", + "metadata": {}, + "outputs": [], + "source": [ + "def translate_message(text, target_lang):\n", + " if target_lang == \"en\":\n", + " return text\n", + " try:\n", + " translator = GoogleTranslator(source='auto', target=target_lang)\n", + " return translator.translate(text)\n", + " except:\n", + " return f\"Translation error: {text}\"" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "id": "344f6594-335e-4900-b2dd-9509e90a1389", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " # Original chat processing\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}] \n", + " response = openai.chat.completions.create(model = openai_4o_mini_model, messages = messages)\n", + " response_text = response.choices[0].message.content\n", + " \n", + " if chat_state.speak:\n", + " talker(response_text)\n", + " \n", + " # Translate messages\n", + " translated_message = translate_message(message, chat_state.target_lang)\n", + " translated_response = translate_message(response_text, chat_state.target_lang)\n", + " \n", + " gr.Chatbot.update(value=[(translated_message, translated_response)], visible=True)\n", + " \n", + " return response_text\n" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "id": "5e9af0a3-8a8d-45ff-8579-c9fad759326b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting gTTS\n", + " Downloading gTTS-2.5.4-py3-none-any.whl.metadata (4.1 kB)\n", + "Requirement already satisfied: requests<3,>=2.27 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from gTTS) (2.32.3)\n", + "Requirement already satisfied: click<8.2,>=7.1 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from gTTS) (8.1.8)\n", + "Requirement already satisfied: colorama in c:\\users\\hp\\appdata\\roaming\\python\\python311\\site-packages (from click<8.2,>=7.1->gTTS) (0.4.6)\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3,>=2.27->gTTS) (3.4.1)\n", + "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3,>=2.27->gTTS) (2.10)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3,>=2.27->gTTS) (2.3.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\hp\\anaconda3\\envs\\llms\\lib\\site-packages (from requests<3,>=2.27->gTTS) (2024.12.14)\n", + "Downloading gTTS-2.5.4-py3-none-any.whl (29 kB)\n", + "Installing collected packages: gTTS\n", + "Successfully installed gTTS-2.5.4\n" + ] + } + ], + "source": [ + "!pip install gTTS" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "id": "1cd93430-0f3a-45ad-ac4c-917fd2a6c2a0", + "metadata": {}, + "outputs": [], + "source": [ + "from gtts import gTTS\n", + "import os\n", + "import tempfile\n", + "\n", + "def text_to_speech(text, lang_code):\n", + " try:\n", + " tts = gTTS(text=text, lang=lang_code)\n", + " # Create temporary file\n", + " with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as fp:\n", + " tts.save(fp.name)\n", + " return fp.name\n", + " except:\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "id": "a7b82670-f376-4069-9a11-7ca622fcf238", + "metadata": {}, + "outputs": [], + "source": [ + " def respond(message, history):\n", + " bot_response, history_original, history_translated = process_message(\n", + " message, \n", + " history, \n", + " 'translated' if speech_language.value.lower() == 'translated' else 'original'\n", + " )\n", + " return \"\", history_original, history_translated" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "9716f21b-9583-4eec-9fd0-bffc430fee56", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7882\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 79, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "def process_message(message, history, speech_mode):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}] \n", + " response = openai.chat.completions.create(model = openai_4o_mini_model, messages = messages)\n", + " response_text = response.choices[0].message.content\n", + " \n", + " # Create audio if speech is enabled\n", + " audio_path = None\n", + " if chat_state.speak:\n", + " if speech_mode == \"Translated\":\n", + " translated_text = translate_message(response_text, chat_state.target_lang)\n", + " audio_path = text_to_speech(translated_text, chat_state.target_lang)\n", + " else:\n", + " talker(response_text)\n", + " \n", + " # Translate messages for display\n", + " translated_message = translate_message(message, chat_state.target_lang)\n", + " translated_response = translate_message(response_text, chat_state.target_lang)\n", + " \n", + " history_original = history + [\n", + " {\"role\": \"user\", \"content\": message},\n", + " {\"role\": \"assistant\", \"content\": response_text}\n", + " ]\n", + " history_translated = [\n", + " {\"role\": \"user\", \"content\": translated_message},\n", + " {\"role\": \"assistant\", \"content\": translated_response}\n", + " ]\n", + " \n", + " return response_text, history_original, history_translated, audio_path\n", + "\n", + "with gr.Blocks() as demo:\n", + " speech_mode = gr.State(\"Original\")\n", + " \n", + " with gr.Row():\n", + " speak_checkbox = gr.Checkbox(\n", + " label=\"Read responses aloud\",\n", + " value=True,\n", + " interactive=True\n", + " )\n", + " language_dropdown = gr.Dropdown(\n", + " choices=list(LANGUAGES.keys()),\n", + " value=\"Spanish\",\n", + " label=\"Translation Language\",\n", + " interactive=True\n", + " )\n", + " speech_language = gr.Radio(\n", + " choices=[\"Original\", \"Translated\"],\n", + " value=\"Original\",\n", + " label=\"Speech Language\",\n", + " interactive=True\n", + " )\n", + " \n", + " # Add audio player\n", + " audio_player = gr.Audio(label=\"Response Audio\", visible=True)\n", + " \n", + " with gr.Row():\n", + " with gr.Column():\n", + " gr.Markdown(\"### Original Conversation\")\n", + " chatbot_original = gr.Chatbot(type=\"messages\")\n", + " msg_original = gr.Textbox(label=\"Message\")\n", + " send_btn = gr.Button(\"Send\")\n", + " \n", + " with gr.Column():\n", + " gr.Markdown(\"### Translated Conversation\")\n", + " chatbot_translated = gr.Chatbot(type=\"messages\")\n", + " \n", + " state = gr.State([])\n", + " \n", + " def respond(message, history, current_speech_mode):\n", + " bot_response, history_original, history_translated, audio_path = process_message(\n", + " message, \n", + " history,\n", + " current_speech_mode\n", + " )\n", + " \n", + " return \"\", history_original, history_translated, audio_path\n", + " \n", + " send_btn.click(\n", + " respond,\n", + " inputs=[msg_original, state, speech_language],\n", + " outputs=[msg_original, chatbot_original, chatbot_translated, audio_player],\n", + " )\n", + " \n", + " msg_original.submit(\n", + " respond,\n", + " inputs=[msg_original, state, speech_language],\n", + " outputs=[msg_original, chatbot_original, chatbot_translated, audio_player],\n", + " )\n", + " \n", + " speak_checkbox.change(fn=lambda x: setattr(chat_state, 'speak', x), inputs=[speak_checkbox])\n", + " language_dropdown.change(fn=update_language, inputs=[language_dropdown])\n", + "\n", + "demo.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af83a223-1930-4bad-9c31-44d227847610", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "76b51235-e23c-4aad-9c51-92f57d2febfb", + "metadata": {}, + "source": [ + "### Audio" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "d7a81b5d-165e-48f8-bfc6-f4aca1cb68f1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ffmpeg version 2025-01-30-git-1911a6ec26-full_build-www.gyan.dev Copyright (c) 2000-2025 the FFmpeg developers\n", + "built with gcc 14.2.0 (Rev1, Built by MSYS2 project)\n", + "configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-lcms2 --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-libdvdnav --enable-libdvdread --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libopenjpeg --enable-libquirc --enable-libuavs3d --enable-libxevd --enable-libzvbi --enable-libqrencode --enable-librav1e --enable-libsvtav1 --enable-libvvenc --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxeve --enable-libxvid --enable-libaom --enable-libjxl --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-ffnvcodec --enable-libvpl --enable-nvdec --enable-nvenc --enable-vaapi --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-liblc3 --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\n", + "libavutil 59. 56.100 / 59. 56.100\n", + "libavcodec 61. 31.101 / 61. 31.101\n", + "libavformat 61. 9.106 / 61. 9.106\n", + "libavdevice 61. 4.100 / 61. 4.100\n", + "libavfilter 10. 9.100 / 10. 9.100\n", + "libswscale 8. 13.100 / 8. 13.100\n", + "libswresample 5. 4.100 / 5. 4.100\n", + "libpostproc 58. 4.100 / 58. 4.100\n", + "ffprobe version 2025-01-30-git-1911a6ec26-full_build-www.gyan.dev Copyright (c) 2007-2025 the FFmpeg developers\n", + "built with gcc 14.2.0 (Rev1, Built by MSYS2 project)\n", + "configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-lcms2 --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-libdvdnav --enable-libdvdread --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libopenjpeg --enable-libquirc --enable-libuavs3d --enable-libxevd --enable-libzvbi --enable-libqrencode --enable-librav1e --enable-libsvtav1 --enable-libvvenc --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxeve --enable-libxvid --enable-libaom --enable-libjxl --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-ffnvcodec --enable-libvpl --enable-nvdec --enable-nvenc --enable-vaapi --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-liblc3 --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\n", + "libavutil 59. 56.100 / 59. 56.100\n", + "libavcodec 61. 31.101 / 61. 31.101\n", + "libavformat 61. 9.106 / 61. 9.106\n", + "libavdevice 61. 4.100 / 61. 4.100\n", + "libavfilter 10. 9.100 / 10. 9.100\n", + "libswscale 8. 13.100 / 8. 13.100\n", + "libswresample 5. 4.100 / 5. 4.100\n", + "libpostproc 58. 4.100 / 58. 4.100\n", + "ffplay version 2025-01-30-git-1911a6ec26-full_build-www.gyan.dev Copyright (c) 2003-2025 the FFmpeg developers\n", + "built with gcc 14.2.0 (Rev1, Built by MSYS2 project)\n", + "configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-lcms2 --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-libdvdnav --enable-libdvdread --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libopenjpeg --enable-libquirc --enable-libuavs3d --enable-libxevd --enable-libzvbi --enable-libqrencode --enable-librav1e --enable-libsvtav1 --enable-libvvenc --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxeve --enable-libxvid --enable-libaom --enable-libjxl --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-ffnvcodec --enable-libvpl --enable-nvdec --enable-nvenc --enable-vaapi --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-liblc3 --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\n", + "libavutil 59. 56.100 / 59. 56.100\n", + "libavcodec 61. 31.101 / 61. 31.101\n", + "libavformat 61. 9.106 / 61. 9.106\n", + "libavdevice 61. 4.100 / 61. 4.100\n", + "libavfilter 10. 9.100 / 10. 9.100\n", + "libswscale 8. 13.100 / 8. 13.100\n", + "libswresample 5. 4.100 / 5. 4.100\n", + "libpostproc 58. 4.100 / 58. 4.100\n" + ] + } + ], + "source": [ + "!ffmpeg -version\n", + "!ffprobe -version\n", + "!ffplay -version" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "1a04820d-ff9d-4868-8c98-969548c5402a", + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image\n", + "from IPython.display import Audio, display" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "7c05863a-631b-4e05-9bc0-267400595ac2", + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image\n", + "from IPython.display import Audio, display\n", + "\n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model = \"tts-1\",\n", + " voice = \"onyx\",\n", + " input = message\n", + " )\n", + "\n", + " audio_stream = BytesIO(response.content)\n", + " output_filename = \"output_audio.mp3\"\n", + " with open(output_filename, \"wb\") as f:\n", + " f.write(audio_stream.read())\n", + "\n", + " #Play the generated audio\n", + " display(Audio(output_filename, autoplay=True))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "fcf1bcf8-4a96-4243-950a-214bcd8b916b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "talker(\"Warm, wet, and wild?! There must be something in the water!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63fed0a2-5c2b-408d-9427-6e59b3f99e4f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}