diff --git a/week2/community-contributions/day5-flight-booking-exercise-psxom3.ipynb b/week2/community-contributions/day5-flight-booking-exercise-psxom3.ipynb
index 4476279..e6654dc 100644
--- a/week2/community-contributions/day5-flight-booking-exercise-psxom3.ipynb
+++ b/week2/community-contributions/day5-flight-booking-exercise-psxom3.ipynb
@@ -446,16 +446,16 @@
"metadata": {},
"outputs": [],
"source": [
- "# Audio-to-text\n",
+ "# # Audio-to-text\n",
"\n",
- "def transcribe_audio(audio):\n",
- " with tempfile.NamedTemporaryFile(delete=False, suffix=\".wav\") as temp_audio:\n",
- " temp_audio.write(audio)\n",
- " temp_audio_path = temp_audio.name\n",
+ "# def transcribe_audio(audio):\n",
+ "# with tempfile.NamedTemporaryFile(delete=False, suffix=\".wav\") as temp_audio:\n",
+ "# temp_audio.write(audio)\n",
+ "# temp_audio_path = temp_audio.name\n",
"\n",
- " with open(temp_audio_path, \"rb\") as f:\n",
- " transcript = openai.audio.transcriptions.create(model=\"whisper-1\", file=f)\n",
- " return transcript.text\n"
+ "# with open(temp_audio_path, \"rb\") as f:\n",
+ "# transcript = openai.audio.transcriptions.create(model=\"whisper-1\", file=f)\n",
+ "# return transcript.text\n"
]
},
{
@@ -493,7 +493,7 @@
},
{
"cell_type": "code",
- "execution_count": 40,
+ "execution_count": 42,
"id": "6e820e50-ba7f-471d-9ac4-1a492cb41fdc",
"metadata": {},
"outputs": [
@@ -501,7 +501,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "* Running on local URL: http://127.0.0.1:7862\n",
+ "* Running on local URL: http://127.0.0.1:7864\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
@@ -509,7 +509,7 @@
{
"data": {
"text/html": [
- "
"
+ ""
],
"text/plain": [
""
@@ -522,114 +522,23 @@
"data": {
"text/plain": []
},
- "execution_count": 40,
+ "execution_count": 42,
"metadata": {},
"output_type": "execute_result"
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " "
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " "
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " "
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " "
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Tool get_ticket_price called for Paris\n"
- ]
- },
- {
- "data": {
- "text/html": [
- "\n",
- " \n",
- " "
- ],
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
}
],
"source": [
+ "import gradio as gr\n",
+ "\n",
"# Gradio UI\n",
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n",
" image_output = gr.Image(height=500)\n",
" translation_output = gr.Textbox(label=\"Translation\", lines=10)\n",
+ "\n",
" with gr.Row():\n",
" entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n",
- " with gr.Row():\n",
- " mic = gr.Audio(type=\"filepath\", label=\"Speak your query\")\n",
"\n",
" with gr.Row():\n",
" clear = gr.Button(\"Clear\")\n",
@@ -641,7 +550,7 @@
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
" chat, inputs=chatbot, outputs=[chatbot, image_output, translation_output]\n",
" )\n",
- " mic.change(fn=transcribe_audio, inputs=[mic], outputs=[entry])\n",
+ "\n",
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n",
"\n",
"ui.launch(inbrowser=True)\n"