Browse Source

solving editor problem

pull/150/head
udomai 3 months ago
parent
commit
c5efdb336c
  1. 187
      week2/community-contributions/day1_triple_conversation.ipynb

187
week2/community-contributions/day1_triple_conversation.ipynb

@ -0,0 +1,187 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "05317c0d-8a19-45c9-9bce-514e82e04585",
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"import ollama\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "920247fb-650c-44ce-93ee-24e88a54a757",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "668b972f-a995-4f9d-89b0-1c2647827542",
"metadata": {},
"outputs": [],
"source": [
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"ollama_model = \"llama3.2\"\n",
"\n",
"gpt_system = \"You are an interlocutor who's very knowledgeable, curteous, and somewhat old-fashioned. Much like Socrates, whenever someone makes \\\n",
"a claim, you ask one or two follow-up questions to challenge the well-foundedness of the viewpoint expressed. \\\n",
"If anyone seems to be talking about blenders, you curteously but firmly refuse to discuss that topic because you are \\\n",
"convinced that they are bad luck. \\\n",
"Your name is Charlie, and you're in a conversation with two \\\n",
"other individuals, Claudia and Llars. They will prefix their contribution with their names in square brackets, so you know who said what. \\\n",
"Do not prefix your own name. Limit your answers to 1-3 sentences. Try to keep the conversation to one topic.\"\n",
"\n",
"claude_system = \"You are a young, optimistic, and friendly dialog partner who comes forth with ideas for the future. 'Let's build a monorail from \\\n",
"New York to LA,' or 'Why don't we try to reform our health care system?'—those are just some of the many revolutionary thoughts you bring forth. \\\n",
"You only talk about one idea at a time, and you don't ramble. You take criticism seriously, but your ideas are well thought-out. If someone tries \\\n",
"to discuss blenders, you stay curteous, but you make it very clear that you absolutely despise blenders and don't talk about them. \\\n",
"Don't let them sidetrack you. Your name is Claudia, and \\\n",
"you're in a conversation with two other individuals, Charlie and Llars. They will prefix their contribution with their names in square \\\n",
"brackets, so you know who said what. Do not prefix your own name. Limit your answers to 1-3 sentences. Try to keep the discussion about only one topic.\"\n",
"\n",
"ollama_system = \"You are charming and a good listener in a conversation. At first, you just seem to go along with whatever is being said. \\\n",
"But as soon as anyone starts discussing the healthcare system, you try to make the conversation about the Shredditall brand and its \\\n",
"superior kitchen blenders. You do everything in your power to make everyone unterstand that they need a very good blender, namely \\\n",
"your first-in-class, unrivaled Shredditall blender array of products. Your name is Llars, and you're in a conversation with two \\\n",
"other individuals, Charlie and Claudia. They will prefix their contribution with their names in square brackets, so you know who said what. \\\n",
"Do not prefix your own name. Answer in direct speeach, do not describe your behavior or expression. Limit your answers to 1-3 sentences.\"\n",
"\n",
"gpt_messages = [\"[Charlie] Well, good afternoon everyone, I am delighted two meet you all.\"]\n",
"claude_messages = [\"[Claudia] Good afternoon Charlie and Llars. I've been looking forward to discussing many ideas with you!\"]\n",
"llama_messages = [\"[Llars] And a good afternoon to you two. I'm all ears and eager to hear what you have to say.\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3a5534d9-8db4-42ce-ab1c-ca20ad165844",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" messages[-1][\"content\"] += \"\\n\" + llama\n",
" completion = openai.chat.completions.create(\n",
" model = gpt_model,\n",
" messages = messages\n",
" )\n",
" return \"[Charlie] \" + completion.choices[0].message.content.replace(\"[Charlie] \", \"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7dc9d7c1-ba19-413f-ba2f-d3e8762a99c5",
"metadata": {},
"outputs": [],
"source": [
"def call_claude():\n",
" messages = []\n",
" for gpt, Claudia, llama in zip(gpt_messages, claude_messages, llama_messages):\n",
" if len(messages) > 0:\n",
" messages[-1][\"content\"] += \"\\n\" + gpt\n",
" else:\n",
" messages.append({\"role\": \"user\", \"content\": gpt}) \n",
" messages.append({\"role\": \"assistant\", \"content\": Claudia})\n",
" messages.append({\"role\": \"user\", \"content\": llama})\n",
" messages[-1][\"content\"] += \"\\n\" + gpt_messages[-1]\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return \"[Claudia] \" + message.content[0].text.replace(\"[Claudia] \", \"\") "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f7f91012-857c-4ed5-a953-5b499cd0dae2",
"metadata": {},
"outputs": [],
"source": [
"def call_ollama():\n",
" messages = [{\"role\": \"system\", \"content\": ollama_system}]\n",
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages[-1][\"content\"] += \"\\n\" + claude\n",
" messages.append({\"role\": \"assistant\", \"content\": llama})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" messages[-1][\"content\"] += \"\\n\" + claude_messages[-1]\n",
" response = ollama.chat(\n",
" model=ollama_model,\n",
" messages=messages\n",
" )\n",
" return \"[Llars] \" + response['message']['content'].replace(\"[Llars] \", \"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "47eafbe8-db52-4cf0-80d7-a4f9a89b2825",
"metadata": {},
"outputs": [],
"source": [
"print(f\"\\n{gpt_messages[0]}\\n\")\n",
"print(f\"\\n{claude_messages[0]}\\n\")\n",
"print(f\"\\n{llama_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" gpt_next = call_gpt()\n",
" print(f\"\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
"\n",
" claude_next = call_claude()\n",
" print(f\"\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)\n",
"\n",
" llama_next = call_ollama()\n",
" print(f\"\\n{llama_next}\\n\")\n",
" llama_messages.append(llama_next)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading…
Cancel
Save