From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
163 lines
5.8 KiB
163 lines
5.8 KiB
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"import anthropic\n", |
|
"import openai\n", |
|
"import ipywidgets as widgets\n", |
|
"from IPython.display import display, Markdown, update_display\n", |
|
"from dotenv import load_dotenv\n", |
|
"import requests\n", |
|
"import json\n", |
|
"\n", |
|
"MODEL_CLAUDE = 'claude-3-5-sonnet-20241022'\n", |
|
"MODEL_LLAMA = 'llama3.2'\n", |
|
"MODEL_GPT = 'gpt-4o-mini'\n", |
|
"\n", |
|
"load_dotenv()\n", |
|
"\n", |
|
"# Define models\n", |
|
"models = [\n", |
|
" ('Claude (Anthropic)', MODEL_CLAUDE),\n", |
|
" ('LLaMA (Meta)', MODEL_LLAMA),\n", |
|
" ('GPT (OpenAI)', MODEL_GPT)\n", |
|
"]\n", |
|
"\n", |
|
"model_dropdown = widgets.Dropdown(\n", |
|
" options=[('', None)] + [(model[0], model[0]) for model in models],\n", |
|
" value=None,\n", |
|
" placeholder='Choose a model',\n", |
|
" description='Model:',\n", |
|
" style={'description_width': 'initial'}\n", |
|
")\n", |
|
"\n", |
|
"selected_model = \"\"\n", |
|
"\n", |
|
"text = input(f\"Hello, I am your personal tutor. Please ask me a question regarding your code:\")\n", |
|
"\n", |
|
"system_prompt = \"You are a helpful technical tutor who answers questions about programming, software engineering, data science and LLMs\"\n", |
|
"user_prompt = \"Please give a detailed explanation to the following question: \" + text\n", |
|
"\n", |
|
"messages = [\n", |
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
|
" {\"role\": \"user\", \"content\": user_prompt}\n", |
|
"]\n", |
|
"\n", |
|
"# Get gpt-4o-mini to answer, with streaming\n", |
|
"def get_gpt_response():\n", |
|
" stream = openai.chat.completions.create(model=MODEL_GPT, messages=messages,stream=True)\n", |
|
" \n", |
|
" response = \"\"\n", |
|
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
|
" for chunk in stream:\n", |
|
" response += chunk.choices[0].delta.content or ''\n", |
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
|
" update_display(Markdown(f\"**Question:** {text}\\n\\n**Answer:** {response}\"), display_id=display_handle.display_id)\n", |
|
" return response\n", |
|
"\n", |
|
"# Get Llama 3.2 to answer, with streaming\n", |
|
"def get_llama_response():\n", |
|
" api_url = \"http://localhost:11434/api/chat\"\n", |
|
" payload = {\n", |
|
" \"model\": MODEL_LLAMA,\n", |
|
" \"messages\": messages,\n", |
|
" \"stream\": True\n", |
|
" }\n", |
|
" response = requests.post(api_url, json=payload, stream=True)\n", |
|
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
|
" result = \"\"\n", |
|
" \n", |
|
" for line in response.iter_lines():\n", |
|
" if line:\n", |
|
" json_response = json.loads(line)\n", |
|
" if \"message\" in json_response:\n", |
|
" content = json_response[\"message\"].get(\"content\", \"\")\n", |
|
" result += content\n", |
|
" update_display(Markdown(f\"**Question:** {text}\\n\\n**Answer:** {result}\"), display_id=display_handle.display_id)\n", |
|
" if json_response.get(\"done\", False):\n", |
|
" break\n", |
|
" \n", |
|
" return result\n", |
|
"\n", |
|
"# Get Claude 3.5 to answer, with streaming\n", |
|
"def get_claude_response():\n", |
|
" client = anthropic.Anthropic()\n", |
|
"\n", |
|
" response = client.messages.create(\n", |
|
" model=MODEL_CLAUDE,\n", |
|
" system=system_prompt,\n", |
|
" messages=[\n", |
|
" {\n", |
|
" \"role\": \"user\",\n", |
|
" \"content\": user_prompt\n", |
|
" }\n", |
|
" ],\n", |
|
" stream=True,\n", |
|
" max_tokens=8192,\n", |
|
" temperature=1,\n", |
|
" )\n", |
|
" result = \"\"\n", |
|
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
|
"\n", |
|
" for chunk in response:\n", |
|
" # Check if the chunk is a ContentBlockDeltaEvent\n", |
|
" if hasattr(chunk, 'delta') and hasattr(chunk.delta, 'text'):\n", |
|
" result += chunk.delta.text\n", |
|
" update_display(Markdown(f\"**Question:** {text}\\n\\n**Answer:** {result}\"), display_id=display_handle.display_id)\n", |
|
" return result\n", |
|
"\n", |
|
"def on_text_submit():\n", |
|
" try:\n", |
|
" if 'Claude' in selected_model:\n", |
|
" display(Markdown(f\"# **Selected model: {selected_model}**\"))\n", |
|
" get_claude_response()\n", |
|
" elif 'LLaMA' in selected_model:\n", |
|
" display(Markdown(f\"# **Selected model: {selected_model}**\"))\n", |
|
" get_llama_response()\n", |
|
" elif 'GPT' in selected_model:\n", |
|
" display(Markdown(f\"# **Selected model: {selected_model}**\"))\n", |
|
" get_gpt_response()\n", |
|
" except Exception as e:\n", |
|
" display(Markdown(f\"**Error:** {str(e)}\"))\n", |
|
"\n", |
|
"def on_model_select(change):\n", |
|
" global selected_model\n", |
|
"\n", |
|
" selected_model = change['new'].split(' ')[0]\n", |
|
" if selected_model is not None:\n", |
|
" on_text_submit()\n", |
|
" return change['new'].split(' ')[0]\n", |
|
"\n", |
|
"# Register callbacks\n", |
|
"model_dropdown.observe(on_model_select, names='value')\n", |
|
"\n", |
|
"display(model_dropdown)" |
|
] |
|
} |
|
], |
|
"metadata": { |
|
"kernelspec": { |
|
"display_name": "Python 3 (ipykernel)", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.11.11" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 4 |
|
}
|
|
|