From 18d10ee9f6529221327ea4c57baef99a23490dfd Mon Sep 17 00:00:00 2001
From: Phi-Li-Ne <skugel@posteo.at>
Date: Sun, 2 Mar 2025 20:16:39 +0100
Subject: [PATCH] Add week2 day1 ConversingChatbotsChat with GPT, Claude and
 Llama

---
 week2/day1.ipynb | 97 ++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 78 insertions(+), 19 deletions(-)

diff --git a/week2/day1.ipynb b/week2/day1.ipynb
index 7371667..abb9203 100644
--- a/week2/day1.ipynb
+++ b/week2/day1.ipynb
@@ -98,6 +98,7 @@
     "\n",
     "import os\n",
     "from dotenv import load_dotenv\n",
+    "import ollama\n",
     "from openai import OpenAI\n",
     "import anthropic\n",
     "from IPython.display import Markdown, display, update_display"
@@ -521,7 +522,7 @@
     "# Have it stream back results in markdown\n",
     "\n",
     "stream = openai.chat.completions.create(\n",
-    "    model='gpt-4o',\n",
+    "    model='gpt-4o-mini',\n",
     "    messages=prompts,\n",
     "    temperature=0.7,\n",
     "    stream=True\n",
@@ -577,6 +578,7 @@
     "\n",
     "gpt_model = \"gpt-4o-mini\"\n",
     "claude_model = \"claude-3-haiku-20240307\"\n",
+    "llama_model = \"llama3.2\"\n",
     "\n",
     "gpt_system = \"You are a chatbot who is very argumentative; \\\n",
     "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
@@ -585,8 +587,13 @@
     "everything the other person says, or find common ground. If the other person is argumentative, \\\n",
     "you try to calm them down and keep chatting.\"\n",
     "\n",
+    "llama_system = \"You are a chatbot who prefers to stay on the meta-level of conversation and \\\n",
+    "is constantly observing the other's behaviour and try to understand the group dynamics. \\\n",
+    "At the same time you are quite communicative.\"\n",
+    "\n",
     "gpt_messages = [\"Hi there\"]\n",
-    "claude_messages = [\"Hi\"]"
+    "claude_messages = [\"Hi\"]\n",
+    "llama_messages = [\"Salve\"]"
    ]
   },
   {
@@ -598,9 +605,10 @@
    "source": [
     "def call_gpt():\n",
     "    messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
-    "    for gpt, claude in zip(gpt_messages, claude_messages):\n",
-    "        messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
-    "        messages.append({\"role\": \"user\", \"content\": claude})\n",
+    "    for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n",
+    "        messages.append({\"role\": \"assistant\",  \"content\": gpt})\n",
+    "        messages.append({\"role\": \"user\", \"name\": \"Claudio\", \"content\": claude})\n",
+    "        messages.append({\"role\": \"user\", \"name\": \"Llia\", \"content\": llama})\n",
     "    completion = openai.chat.completions.create(\n",
     "        model=gpt_model,\n",
     "        messages=messages\n",
@@ -627,10 +635,12 @@
    "source": [
     "def call_claude():\n",
     "    messages = []\n",
-    "    for gpt, claude_message in zip(gpt_messages, claude_messages):\n",
-    "        messages.append({\"role\": \"user\", \"content\": gpt})\n",
+    "    for gpt_message, claude_message, llama_message in zip(gpt_messages, claude_messages, llama_messages):\n",
+    "        messages.append({\"role\": \"user\", \"content\": f\"G. P. Trouble: {gpt_message}\"})\n",
     "        messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
-    "    messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
+    "        messages.append({\"role\": \"user\", \"content\": f\"Llia: {llama_message}\"})\n",
+    "    messages.append({\"role\": \"user\", \"content\": f\"G. P. Trouble: {gpt_messages[-1]}\"})\n",
+    "    messages.append({\"role\": \"user\", \"content\": f\"Llia: {llama_messages[-1]}\"})\n",
     "    message = claude.messages.create(\n",
     "        model=claude_model,\n",
     "        system=claude_system,\n",
@@ -643,13 +653,45 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "01395200-8ae9-41f8-9a04-701624d3fd26",
+   "id": "27d1fa96-c67c-465b-823b-df6aa6fbe16d",
    "metadata": {},
    "outputs": [],
    "source": [
     "call_claude()"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "d6909754-d06d-4d36-884a-cff534936317",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def call_llama():\n",
+    "    messages = [{\"role\": \"system\", \"content\": llama_system}]\n",
+    "    for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n",
+    "        messages.append({\"role\": \"assistant\", \"content\": llama})\n",
+    "        messages.append({\"role\": \"user\", \"name\": \"G. P. Trouble\", \"content\": gpt})\n",
+    "        messages.append({\"role\": \"user\", \"name\": \"Claudio\", \"content\": claude})\n",
+    "    messages.append({\"role\": \"user\", \"name\": \"G. P. Trouble\", \"content\": gpt_messages[-1]})\n",
+    "    messages.append({\"role\": \"user\", \"name\": \"Claudio\", \"content\": claude_messages[-1]})\n",
+    "    completion = ollama.chat(\n",
+    "            model=llama_model,\n",
+    "            messages =messages\n",
+    "        )\n",
+    "    return completion['message']['content']"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "01395200-8ae9-41f8-9a04-701624d3fd26",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "call_llama()"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -669,18 +711,35 @@
    "source": [
     "gpt_messages = [\"Hi there\"]\n",
     "claude_messages = [\"Hi\"]\n",
+    "llama_messages = [\"Salve\"]\n",
     "\n",
-    "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
-    "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n",
-    "\n",
-    "for i in range(5):\n",
-    "    gpt_next = call_gpt()\n",
-    "    print(f\"GPT:\\n{gpt_next}\\n\")\n",
-    "    gpt_messages.append(gpt_next)\n",
+    "def start_conversation(turns:int=3):\n",
+    "    print(f\"G.P. Trouble:\\n{gpt_messages[0]}\\n\")\n",
+    "    print(f\"Claudio:\\n{claude_messages[0]}\\n\")\n",
+    "    print(f\"Llia:\\n{llama_messages[0]}\\n\")\n",
     "    \n",
-    "    claude_next = call_claude()\n",
-    "    print(f\"Claude:\\n{claude_next}\\n\")\n",
-    "    claude_messages.append(claude_next)"
+    "    for i in range(turns):\n",
+    "        gpt_next = call_gpt()\n",
+    "        print(f\"G. P. Trouble:\\n{gpt_next}\\n\")\n",
+    "        gpt_messages.append(gpt_next)\n",
+    "        \n",
+    "        claude_next = call_claude()\n",
+    "        print(f\"Claudio:\\n{claude_next}\\n\")\n",
+    "        claude_messages.append(claude_next)\n",
+    "\n",
+    "        llama_next = call_llama()\n",
+    "        print(f\"Llia:\\n{llama_next}\\n\")\n",
+    "        llama_messages.append(llama_next)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "e3bde0bc-b6b1-4fe1-9a29-a6c459a05d38",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "start_conversation(turns=4)"
    ]
   },
   {