Browse Source

Translator tested.

pull/166/head
Petri Alapiessa 3 months ago
parent
commit
b976e28d9d
  1. 72
      week2/community-contributions/week2-exercise-translator.ipynb

72
week2/community-contributions/week2-exercise-translator.ipynb

@ -15,7 +15,7 @@
"\n", "\n",
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
"\n", "\n",
"The assistant will transform your spoken English to text, then translate it German and speak it out." "The assistant will transform your spoken English to text, then translate it German and speak it out. The image on the UI is just decoration. This exercise was created on MacOS, Python 3.13."
] ]
}, },
{ {
@ -28,12 +28,13 @@
"# Install first PortAudio, in MacOS\n", "# Install first PortAudio, in MacOS\n",
"# brew install portaudio\n", "# brew install portaudio\n",
"\n", "\n",
"\n",
"!pip install openai speechrecognition pyaudio\n" "!pip install openai speechrecognition pyaudio\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"id": "dcae50aa", "id": "dcae50aa",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -70,7 +71,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": 3,
"id": "c5caad24", "id": "c5caad24",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -89,24 +90,13 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import speech_recognition as sr\n", "import speech_recognition as sr\n",
"from pydub import AudioSegment\n",
"from pydub.playback import play\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image\n",
"\n", "\n",
"\n", "\n",
"def recognize_speech():\n",
" recognizer = sr.Recognizer()\n",
" with sr.Microphone() as source:\n",
" print(\"Say something...\")\n",
" audio = recognizer.listen(source)\n",
" try:\n",
" text = recognizer.recognize_google(audio)\n",
" print(f\"You said: {text}\")\n",
" return text\n",
" except sr.UnknownValueError:\n",
" print(\"Google Speech Recognition could not understand audio\")\n",
" return None\n",
" except sr.RequestError as e:\n",
" print(f\"Could not request results from Google Speech Recognition service; {e}\")\n",
" return None\n",
"\n",
"def recognize_speech(audio_file):\n", "def recognize_speech(audio_file):\n",
" recognizer = sr.Recognizer()\n", " recognizer = sr.Recognizer()\n",
" with sr.AudioFile(audio_file) as source:\n", " with sr.AudioFile(audio_file) as source:\n",
@ -130,25 +120,24 @@
" )\n", " )\n",
" return response.choices[0].message.content.strip()\n", " return response.choices[0].message.content.strip()\n",
"\n", "\n",
"# If problem to find microphone, upload voice file\n",
"# To record a wav-file you can use Audacity:\n",
"# brew install --cask audacity\n",
"\n",
"def process_audio(audio_file):\n", "def process_audio(audio_file):\n",
" text = recognize_speech(audio_file)\n", " text = recognize_speech(audio_file)\n",
" if text:\n", " if text:\n",
" response = get_chatgpt_response(text)\n", " response = get_chatgpt_response(text)\n",
" talker(response)\n",
" return response\n", " return response\n",
" return \"Could not recognize speech.\"\n", " return \"Could not recognize speech.\"\n",
"\n", "\n",
"# This is the microphone version:\n", "def talker(message):\n",
"# \n", " response = openai.audio.speech.create(\n",
"# def process_audio():\n", " model=\"tts-1\",\n",
"# text = recognize_speech()\n", " voice=\"onyx\", # Also, try replacing onyx with alloy\n",
"# if text:\n", " input=message\n",
"# response = get_chatgpt_response(text)\n", " )\n",
"# return response\n", " \n",
"# return \"Could not recognize speech.\"\n" " audio_stream = BytesIO(response.content)\n",
" audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n",
" play(audio)"
] ]
}, },
{ {
@ -159,15 +148,20 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Create Gradio interface\n", "# Create Gradio interface\n",
"iface = gr.Interface(\n",
" fn=process_audio,\n",
" inputs=gr.Audio(type=\"filepath\"),\n",
" outputs=\"text\",\n",
" live=True, \n",
")\n",
"\n", "\n",
"if __name__ == \"__main__\":\n", "# some image decoration to UI, just a static picture\n",
" iface.launch()" "image_path =\"week2-exercise-translator-berlin.webp\"\n",
"\n",
"with gr.Blocks() as ui:\n",
" gr.Interface(\n",
" fn=process_audio,\n",
" inputs=gr.Audio(type=\"filepath\", label=\"Speak English. German translation in a moment:\"),\n",
" outputs=\"text\",\n",
" live=True, \n",
" )\n",
" gr.Image(value=image_path, label=\"Das ist Berlin\")\n",
" \n",
"ui.launch(inbrowser=True)\n"
] ]
}, },
{ {

Loading…
Cancel
Save