Browse Source

Add week2 day1 ConversingChatbotsChat with GPT, Claude and Llama

pull/232/head
Phi-Li-Ne 2 months ago
parent
commit
18d10ee9f6
  1. 89
      week2/day1.ipynb

89
week2/day1.ipynb

@ -98,6 +98,7 @@
"\n", "\n",
"import os\n", "import os\n",
"from dotenv import load_dotenv\n", "from dotenv import load_dotenv\n",
"import ollama\n",
"from openai import OpenAI\n", "from openai import OpenAI\n",
"import anthropic\n", "import anthropic\n",
"from IPython.display import Markdown, display, update_display" "from IPython.display import Markdown, display, update_display"
@ -521,7 +522,7 @@
"# Have it stream back results in markdown\n", "# Have it stream back results in markdown\n",
"\n", "\n",
"stream = openai.chat.completions.create(\n", "stream = openai.chat.completions.create(\n",
" model='gpt-4o',\n", " model='gpt-4o-mini',\n",
" messages=prompts,\n", " messages=prompts,\n",
" temperature=0.7,\n", " temperature=0.7,\n",
" stream=True\n", " stream=True\n",
@ -577,6 +578,7 @@
"\n", "\n",
"gpt_model = \"gpt-4o-mini\"\n", "gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n", "claude_model = \"claude-3-haiku-20240307\"\n",
"llama_model = \"llama3.2\"\n",
"\n", "\n",
"gpt_system = \"You are a chatbot who is very argumentative; \\\n", "gpt_system = \"You are a chatbot who is very argumentative; \\\n",
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
@ -585,8 +587,13 @@
"everything the other person says, or find common ground. If the other person is argumentative, \\\n", "everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"you try to calm them down and keep chatting.\"\n", "you try to calm them down and keep chatting.\"\n",
"\n", "\n",
"llama_system = \"You are a chatbot who prefers to stay on the meta-level of conversation and \\\n",
"is constantly observing the other's behaviour and try to understand the group dynamics. \\\n",
"At the same time you are quite communicative.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n", "gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]" "claude_messages = [\"Hi\"]\n",
"llama_messages = [\"Salve\"]"
] ]
}, },
{ {
@ -598,9 +605,10 @@
"source": [ "source": [
"def call_gpt():\n", "def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude in zip(gpt_messages, claude_messages):\n", " for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", " messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n", " messages.append({\"role\": \"user\", \"name\": \"Claudio\", \"content\": claude})\n",
" messages.append({\"role\": \"user\", \"name\": \"Llia\", \"content\": llama})\n",
" completion = openai.chat.completions.create(\n", " completion = openai.chat.completions.create(\n",
" model=gpt_model,\n", " model=gpt_model,\n",
" messages=messages\n", " messages=messages\n",
@ -627,10 +635,12 @@
"source": [ "source": [
"def call_claude():\n", "def call_claude():\n",
" messages = []\n", " messages = []\n",
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n", " for gpt_message, claude_message, llama_message in zip(gpt_messages, claude_messages, llama_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n", " messages.append({\"role\": \"user\", \"content\": f\"G. P. Trouble: {gpt_message}\"})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", " messages.append({\"role\": \"user\", \"content\": f\"Llia: {llama_message}\"})\n",
" messages.append({\"role\": \"user\", \"content\": f\"G. P. Trouble: {gpt_messages[-1]}\"})\n",
" messages.append({\"role\": \"user\", \"content\": f\"Llia: {llama_messages[-1]}\"})\n",
" message = claude.messages.create(\n", " message = claude.messages.create(\n",
" model=claude_model,\n", " model=claude_model,\n",
" system=claude_system,\n", " system=claude_system,\n",
@ -643,13 +653,45 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "01395200-8ae9-41f8-9a04-701624d3fd26", "id": "27d1fa96-c67c-465b-823b-df6aa6fbe16d",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"call_claude()" "call_claude()"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"id": "d6909754-d06d-4d36-884a-cff534936317",
"metadata": {},
"outputs": [],
"source": [
"def call_llama():\n",
" messages = [{\"role\": \"system\", \"content\": llama_system}]\n",
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": llama})\n",
" messages.append({\"role\": \"user\", \"name\": \"G. P. Trouble\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"name\": \"Claudio\", \"content\": claude})\n",
" messages.append({\"role\": \"user\", \"name\": \"G. P. Trouble\", \"content\": gpt_messages[-1]})\n",
" messages.append({\"role\": \"user\", \"name\": \"Claudio\", \"content\": claude_messages[-1]})\n",
" completion = ollama.chat(\n",
" model=llama_model,\n",
" messages =messages\n",
" )\n",
" return completion['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01395200-8ae9-41f8-9a04-701624d3fd26",
"metadata": {},
"outputs": [],
"source": [
"call_llama()"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@ -669,18 +711,35 @@
"source": [ "source": [
"gpt_messages = [\"Hi there\"]\n", "gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n", "claude_messages = [\"Hi\"]\n",
"llama_messages = [\"Salve\"]\n",
"\n", "\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", "def start_conversation(turns:int=3):\n",
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", " print(f\"G.P. Trouble:\\n{gpt_messages[0]}\\n\")\n",
"\n", " print(f\"Claudio:\\n{claude_messages[0]}\\n\")\n",
"for i in range(5):\n", " print(f\"Llia:\\n{llama_messages[0]}\\n\")\n",
" \n",
" for i in range(turns):\n",
" gpt_next = call_gpt()\n", " gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n", " print(f\"G. P. Trouble:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n", " gpt_messages.append(gpt_next)\n",
" \n", " \n",
" claude_next = call_claude()\n", " claude_next = call_claude()\n",
" print(f\"Claude:\\n{claude_next}\\n\")\n", " print(f\"Claudio:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)" " claude_messages.append(claude_next)\n",
"\n",
" llama_next = call_llama()\n",
" print(f\"Llia:\\n{llama_next}\\n\")\n",
" llama_messages.append(llama_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e3bde0bc-b6b1-4fe1-9a29-a6c459a05d38",
"metadata": {},
"outputs": [],
"source": [
"start_conversation(turns=4)"
] ]
}, },
{ {

Loading…
Cancel
Save