From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
165 lines
5.1 KiB
165 lines
5.1 KiB
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "markdown", |
|
"id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", |
|
"metadata": {}, |
|
"source": [ |
|
"# Additional End of week Exercise - week 2\n", |
|
"\n", |
|
"Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", |
|
"\n", |
|
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", |
|
"\n", |
|
"If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", |
|
"\n", |
|
"I will publish a full solution here soon - unless someone beats me to it...\n", |
|
"\n", |
|
"There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "a07e7793-b8f5-44f4-aded-5562f633271a", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"import os\n", |
|
"from dotenv import load_dotenv\n", |
|
"\n", |
|
"import gradio as gr\n", |
|
"from IPython.display import Markdown, display, update_display\n", |
|
"from openai import OpenAI\n", |
|
"import base64\n", |
|
"from io import BytesIO\n", |
|
"from PIL import Image\n", |
|
"from IPython.display import Audio, display\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "5b9b086f-46bb-4801-a13d-d5574c2b8e97", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"MODEL_GPT = 'gpt-4o-mini'\n", |
|
"MODEL_LLAMA = 'llama3.2'" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "38584d6f-3bbd-4b52-9775-1650c9548884", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# set up environment\n", |
|
"load_dotenv(override=True)\n", |
|
"api_key = os.getenv(\"OPENAI_API_KEY\")\n", |
|
"\n", |
|
"# set up clients\n", |
|
"openai = OpenAI()\n", |
|
"ollama = OpenAI(base_url=\"http://localhost:11434/v1\" , api_key=\"ollama\")\n", |
|
"\n", |
|
"# set up system prompt\n", |
|
"system_prompt = \"You are a coding tutor. If the user asks you a question, answer it to the point. If you are asked to create a code snippet, generate the code in Python and then explain it shortly.\"" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "61851048-d2d1-43ab-86d5-0a4c37181be8", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"\n", |
|
"def speak(message):\n", |
|
" response = openai.audio.speech.create(\n", |
|
" model=\"tts-1\",\n", |
|
" voice=\"alloy\",\n", |
|
" input=message)\n", |
|
"\n", |
|
" audio_stream = BytesIO(response.content)\n", |
|
" output_filename = \"output_audio.mp3\"\n", |
|
" with open(output_filename, \"wb\") as f:\n", |
|
" f.write(audio_stream.read())\n", |
|
"\n", |
|
" # Play the generated audio\n", |
|
" display(Audio(output_filename, autoplay=True))" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "4b6678d6-de73-4bfd-82c0-4d213f9aa7cd", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"def chat(message, history, model, audio=False):\n", |
|
" messages = [{\"role\": \"system\", \"content\": system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
|
" \n", |
|
" if model == \"gpt-4o-mini\":\n", |
|
" client = openai\n", |
|
" elif model == \"llama3.2\":\n", |
|
" client = ollama\n", |
|
"\n", |
|
" stream = client.chat.completions.create(\n", |
|
" model = model,\n", |
|
" messages = messages,\n", |
|
" stream = True\n", |
|
" )\n", |
|
"\n", |
|
" response = \"\"\n", |
|
" for chunk in stream:\n", |
|
" response += chunk.choices[0].delta.content or \"\"\n", |
|
" yield response\n", |
|
"\n", |
|
" if audio:\n", |
|
" speak(response)\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "73606604-5462-4699-98b3-d92b4a5b8276", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"with gr.Blocks() as ui:\n", |
|
" model_selection = gr.Dropdown([\"gpt-4o-mini\", \"llama3.2\"], label=\"Select model\", value=\"llama3.2\")\n", |
|
" audio_enabled = gr.Checkbox(label=\"Audio enabled\", info=\"Check the box if you want me to speak to you\")\n", |
|
" \n", |
|
" gr.ChatInterface(\n", |
|
" title=\"Coding Friend\",\n", |
|
" fn=chat,\n", |
|
" additional_inputs=[model_selection, audio_enabled],\n", |
|
" type=\"messages\"\n", |
|
" )\n", |
|
"ui.launch(inbrowser=True)" |
|
] |
|
} |
|
], |
|
"metadata": { |
|
"kernelspec": { |
|
"display_name": "Python 3 (ipykernel)", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.11.11" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 5 |
|
}
|
|
|