Browse Source

Translator tested.

pull/166/head
Petri Alapiessa 3 months ago
parent
commit
b976e28d9d
  1. 72
      week2/community-contributions/week2-exercise-translator.ipynb

72
week2/community-contributions/week2-exercise-translator.ipynb

@ -15,7 +15,7 @@
"\n",
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
"\n",
"The assistant will transform your spoken English to text, then translate it German and speak it out."
"The assistant will transform your spoken English to text, then translate it German and speak it out. The image on the UI is just decoration. This exercise was created on MacOS, Python 3.13."
]
},
{
@ -28,12 +28,13 @@
"# Install first PortAudio, in MacOS\n",
"# brew install portaudio\n",
"\n",
"\n",
"!pip install openai speechrecognition pyaudio\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "dcae50aa",
"metadata": {},
"outputs": [],
@ -70,7 +71,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 3,
"id": "c5caad24",
"metadata": {},
"outputs": [],
@ -89,24 +90,13 @@
"outputs": [],
"source": [
"import speech_recognition as sr\n",
"from pydub import AudioSegment\n",
"from pydub.playback import play\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image\n",
"\n",
"\n",
"def recognize_speech():\n",
" recognizer = sr.Recognizer()\n",
" with sr.Microphone() as source:\n",
" print(\"Say something...\")\n",
" audio = recognizer.listen(source)\n",
" try:\n",
" text = recognizer.recognize_google(audio)\n",
" print(f\"You said: {text}\")\n",
" return text\n",
" except sr.UnknownValueError:\n",
" print(\"Google Speech Recognition could not understand audio\")\n",
" return None\n",
" except sr.RequestError as e:\n",
" print(f\"Could not request results from Google Speech Recognition service; {e}\")\n",
" return None\n",
"\n",
"def recognize_speech(audio_file):\n",
" recognizer = sr.Recognizer()\n",
" with sr.AudioFile(audio_file) as source:\n",
@ -130,25 +120,24 @@
" )\n",
" return response.choices[0].message.content.strip()\n",
"\n",
"# If problem to find microphone, upload voice file\n",
"# To record a wav-file you can use Audacity:\n",
"# brew install --cask audacity\n",
"\n",
"def process_audio(audio_file):\n",
" text = recognize_speech(audio_file)\n",
" if text:\n",
" response = get_chatgpt_response(text)\n",
" talker(response)\n",
" return response\n",
" return \"Could not recognize speech.\"\n",
"\n",
"# This is the microphone version:\n",
"# \n",
"# def process_audio():\n",
"# text = recognize_speech()\n",
"# if text:\n",
"# response = get_chatgpt_response(text)\n",
"# return response\n",
"# return \"Could not recognize speech.\"\n"
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\", # Also, try replacing onyx with alloy\n",
" input=message\n",
" )\n",
" \n",
" audio_stream = BytesIO(response.content)\n",
" audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n",
" play(audio)"
]
},
{
@ -159,15 +148,20 @@
"outputs": [],
"source": [
"# Create Gradio interface\n",
"iface = gr.Interface(\n",
" fn=process_audio,\n",
" inputs=gr.Audio(type=\"filepath\"),\n",
" outputs=\"text\",\n",
" live=True, \n",
")\n",
"\n",
"if __name__ == \"__main__\":\n",
" iface.launch()"
"# some image decoration to UI, just a static picture\n",
"image_path =\"week2-exercise-translator-berlin.webp\"\n",
"\n",
"with gr.Blocks() as ui:\n",
" gr.Interface(\n",
" fn=process_audio,\n",
" inputs=gr.Audio(type=\"filepath\", label=\"Speak English. German translation in a moment:\"),\n",
" outputs=\"text\",\n",
" live=True, \n",
" )\n",
" gr.Image(value=image_path, label=\"Das ist Berlin\")\n",
" \n",
"ui.launch(inbrowser=True)\n"
]
},
{

Loading…
Cancel
Save