Browse Source

finish last two sections

pull/17/head
Roger Gomez 6 months ago
parent
commit
3b77e82a13
  1. 238
      week2/community-contributions/day1-azure-aws-ollama.ipynb

238
week2/community-contributions/day1-azure-aws-ollama.ipynb

@ -81,7 +81,7 @@
},
{
"cell_type": "code",
"execution_count": 40,
"execution_count": null,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {},
"outputs": [],
@ -99,7 +99,7 @@
},
{
"cell_type": "code",
"execution_count": 41,
"execution_count": null,
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
"metadata": {},
"outputs": [],
@ -113,21 +113,10 @@
},
{
"cell_type": "code",
"execution_count": 42,
"execution_count": null,
"id": "c5c0df5e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"# load the environment variables\n",
"load_dotenv()"
@ -135,18 +124,10 @@
},
{
"cell_type": "code",
"execution_count": 43,
"execution_count": null,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hello! How can I assist you today?\n"
]
}
],
"outputs": [],
"source": [
"# Test that AZURE works\n",
"AZURE_MODEL = \"gpt-4o\"\n",
@ -167,18 +148,10 @@
},
{
"cell_type": "code",
"execution_count": 44,
"execution_count": null,
"id": "0d5fe363",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"I'm doing well, thanks for asking! I'm Claude, an AI assistant created by Anthropic.\n"
]
}
],
"outputs": [],
"source": [
"# Test that AWS works\n",
"AWS_MODEL = \"anthropic.claude-3-sonnet-20240229-v1:0\"\n",
@ -204,23 +177,17 @@
},
{
"cell_type": "code",
"execution_count": 45,
"execution_count": null,
"id": "a92f86d4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" pong\n"
]
}
],
"outputs": [],
"source": [
"# Test ollama using OpenAI API\n",
"OLLAMA_MODEL='qwen2.5'\n",
"print(os.getenv('OPENAI_BASE_URL'))\n",
"client_ollama = OpenAI(\n",
" base_url=os.getenv('OPENAI_BASE_URL')\n",
" base_url=os.getenv('OPENAI_BASE_URL'),\n",
" api_key='123'\n",
" )\n",
"response = client_ollama.chat.completions.create(model=OLLAMA_MODEL, messages=messages)\n",
"print(response.choices[0].message.content)"
@ -228,7 +195,7 @@
},
{
"cell_type": "code",
"execution_count": 46,
"execution_count": null,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {},
"outputs": [],
@ -263,7 +230,7 @@
},
{
"cell_type": "code",
"execution_count": 47,
"execution_count": null,
"id": "378a0296-59a2-45c6-82eb-941344d3eeff",
"metadata": {},
"outputs": [],
@ -274,7 +241,7 @@
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": null,
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4",
"metadata": {},
"outputs": [],
@ -287,20 +254,10 @@
},
{
"cell_type": "code",
"execution_count": 49,
"execution_count": null,
"id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist go broke?\n",
"\n",
"Because he couldn't find a good algorithm for saving!\n"
]
}
],
"outputs": [],
"source": [
"# GPT-4o\n",
"def call_azure(model=AZURE_MODEL, temp=0.5):\n",
@ -311,27 +268,15 @@
" )\n",
" completion = openai.chat.completions.create(model=model, messages=prompts, temperature=temp)\n",
" return completion.choices[0].message.content\n",
"print(call_azure('gpt-4o'))\n",
"# completion = client_azure.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n",
"# print(completion.choices[0].message.content)"
"print(call_azure('gpt-4o'))"
]
},
{
"cell_type": "code",
"execution_count": 50,
"execution_count": null,
"id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist bring a ladder to work?\n",
"\n",
"Because they wanted to reach new heights in their analysis!\n"
]
}
],
"outputs": [],
"source": [
"# GPT-4o-mini\n",
"# Temperature setting controls creativity\n",
@ -341,20 +286,10 @@
},
{
"cell_type": "code",
"execution_count": 51,
"execution_count": null,
"id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist go broke?\n",
"\n",
"Because he couldn't find any value in his SQL statements!\n"
]
}
],
"outputs": [],
"source": [
"# GPT-4o\n",
"\n",
@ -363,20 +298,10 @@
},
{
"cell_type": "code",
"execution_count": 52,
"execution_count": null,
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here's a light-hearted joke for an audience of Data Scientists:\n",
"\n",
"Why did the data scientist bring a ladder to work? Because they needed to access the higher-level data!\n"
]
}
],
"outputs": [],
"source": [
"# AWS with Claude 3.5 Sonnet\n",
"# API needs system message provided separately from user prompt\n",
@ -407,21 +332,10 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": null,
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here's a light-hearted joke for data scientists:\n",
"\n",
"Why did the data scientist get a puppy?\n",
"Because he wanted to train a naive dog."
]
}
],
"outputs": [],
"source": [
"# AWS with Claude 3.5 Sonnet\n",
"# Now let's add in streaming back results\n",
@ -453,25 +367,16 @@
},
{
"cell_type": "code",
"execution_count": 54,
"execution_count": null,
"id": "12374cd3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist name his algorithm \"gaussian\"?\n",
"\n",
"Because he was really normal!"
]
}
],
"outputs": [],
"source": [
"# Call Ollama\n",
"def call_ollama_stream(model=OLLAMA_MODEL, temp=0.5):\n",
" openai = OpenAI(\n",
" base_url=os.getenv('OPENAI_BASE_URL')\n",
" base_url=os.getenv('OPENAI_BASE_URL'),\n",
" api_key='123'\n",
" )\n",
" stream = openai.chat.completions.create(model=model, messages=prompts, temperature=temp, stream=True)\n",
" for chunk in stream:\n",
@ -483,21 +388,10 @@
},
{
"cell_type": "code",
"execution_count": 55,
"execution_count": null,
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why was the data scientist sad? \n",
"\n",
"Because they didn't get any arrays. (A-rays!)\n",
"\n"
]
}
],
"outputs": [],
"source": [
"# The API for Gemini has a slightly different structure\n",
"\n",
@ -511,7 +405,7 @@
},
{
"cell_type": "code",
"execution_count": 56,
"execution_count": null,
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573",
"metadata": {},
"outputs": [],
@ -526,35 +420,25 @@
},
{
"cell_type": "code",
"execution_count": 57,
"execution_count": null,
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2",
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'openai' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[57], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Have it stream back results in markdown\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m stream \u001b[38;5;241m=\u001b[39m \u001b[43mopenai\u001b[49m\u001b[38;5;241m.\u001b[39mchat\u001b[38;5;241m.\u001b[39mcompletions\u001b[38;5;241m.\u001b[39mcreate(\n\u001b[1;32m 4\u001b[0m model\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mgpt-4o\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 5\u001b[0m messages\u001b[38;5;241m=\u001b[39mprompts,\n\u001b[1;32m 6\u001b[0m temperature\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.7\u001b[39m,\n\u001b[1;32m 7\u001b[0m stream\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 8\u001b[0m )\n\u001b[1;32m 10\u001b[0m reply \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 11\u001b[0m display_handle \u001b[38;5;241m=\u001b[39m display(Markdown(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m), display_id\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n",
"\u001b[0;31mNameError\u001b[0m: name 'openai' is not defined"
]
}
],
"outputs": [],
"source": [
"# Have it stream back results in markdown\n",
"\n",
"stream = openai.chat.completions.create(\n",
" model='gpt-4o',\n",
" messages=prompts,\n",
" temperature=0.7,\n",
" stream=True\n",
"def call_azure_stream(model=AZURE_MODEL, temp=0.5):\n",
" openai = AzureOpenAI(\n",
" api_key=os.getenv('AZURE_OPENAI_API_KEY'),\n",
" azure_endpoint=os.getenv('AZURE_OPENAI_ENDPOINT'),\n",
" api_version=\"2024-08-01-preview\",\n",
" )\n",
"\n",
" return openai.chat.completions.create(model=model, messages=prompts, temperature=temp, stream=True)\n",
"stream = call_azure_stream('gpt-4o-mini', temp=0.7)\n",
"reply = \"\"\n",
"display_handle = display(Markdown(\"\"), display_id=True)\n",
"for chunk in stream:\n",
" if chunk.choices:\n",
" reply += chunk.choices[0].delta.content or ''\n",
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n",
" update_display(Markdown(reply), display_id=display_handle.display_id)"
@ -601,7 +485,7 @@
"# We're using cheap versions of models so the costs will be minimal\n",
"\n",
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"claude_model = \"anthropic.claude-3-sonnet-20240229-v1:0\"\n",
"\n",
"gpt_system = \"You are a chatbot who is very argumentative; \\\n",
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
@ -622,11 +506,16 @@
"outputs": [],
"source": [
"def call_gpt():\n",
" azure_client = AzureOpenAI(\n",
" api_key=os.getenv('AZURE_OPENAI_API_KEY'),\n",
" azure_endpoint=os.getenv('AZURE_OPENAI_ENDPOINT'),\n",
" api_version=\"2024-08-01-preview\",\n",
" )\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" completion = openai.chat.completions.create(\n",
" completion = azure_client.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
@ -651,18 +540,23 @@
"outputs": [],
"source": [
"def call_claude():\n",
" session = boto3.Session()\n",
" bedrock = session.client(service_name='bedrock-runtime', region_name='us-east-1')\n",
" messages = []\n",
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
" messages.append({\"role\": \"user\", \"content\": [{\"text\": gpt }]})\n",
" messages.append({\"role\": \"assistant\", \"content\": [{\"text\": claude_message }]})\n",
" messages.append({\"role\": \"user\", \"content\": [{\"text\": gpt_messages[-1] }]})\n",
" response = bedrock.converse(\n",
" modelId=claude_model,\n",
" system=[{\"text\":claude_system}],\n",
" messages=messages,\n",
" max_tokens=500\n",
" inferenceConfig={\n",
" \"maxTokens\": 2000,\n",
" \"temperature\": 0\n",
" },\n",
" )\n",
" return message.content[0].text"
" return response['output']['message']['content'][0]['text']"
]
},
{
@ -773,7 +667,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": ".venv",
"language": "python",
"name": "python3"
},
@ -787,7 +681,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.9.6"
}
},
"nbformat": 4,

Loading…
Cancel
Save