From 77a200c75f4d503cb1fca7f3ab0883ece3f12bee Mon Sep 17 00:00:00 2001 From: Lewis Jayne Date: Thu, 24 Oct 2024 10:33:04 -0400 Subject: [PATCH 1/2] adding 3 way discussion including Gemini --- week2/day1.ipynb | 182 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 181 insertions(+), 1 deletion(-) diff --git a/week2/day1.ipynb b/week2/day1.ipynb index abdc15a..5681e64 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -436,10 +436,190 @@ " claude_messages.append(claude_next)" ] }, + { + "cell_type": "markdown", + "id": "392d2072-4e5d-424c-b4b4-098d7e3ead2d", + "metadata": {}, + "source": [ + "# And now for a 3 way convo including Gemini" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb0c319c-0226-4c6d-99bc-a9167fa86005", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "gpt_system = \"You are a chatbot who is very argumentative; \\\n", + "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", + "\n", + "claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "everything the other people in the conversation say, or find common ground. If another person is argumentative, \\\n", + "you try to calm them down and keep chatting.\"\n", + "\n", + "gemini_system = \"You are an extremely knowledgeable and know-it-all counselor chatbot. You try to help resolve disagreements, \\\n", + "and if a person is either too argumentative or too polite, you cannot help but to use quotes from famous psychologists to teach \\\n", + "your students to be kind yet maintain boundaries.\"\n", + "\n", + "gemini_instance = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=gemini_system\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "041b1596-a646-4321-a89a-9a51046d9b72", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [\"Hi there\"]\n", + "claude_messages = [\"Hi\"]\n", + "gemini_messages = [\"How is everyone?\"]\n", + "gpt_name = \"Bob\"\n", + "claude_name = \"Larry\"\n", + "gemini_name = \"Frank\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fcd0a633-d506-4b68-8411-46f3fbe34752", + "metadata": {}, + "outputs": [], + "source": [ + "def construct_joined_user_msg(msg1, msg1_name, msg2, msg2_name):\n", + " return msg1_name + ' said: ' + msg1 + '. \\n\\nThen ' + msg2_name + ' said: ' + msg2 + '.'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4aef8ba5-1d93-4473-8c5f-707a12d8a1cf", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt(return_msgs=False):\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(claude, claude_name, gemini, gemini_name)})\n", + " if return_msgs: return messages\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36365e7a-43b4-4a0d-a241-fff1440509d3", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt(return_msgs=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ebebebe-8255-4ca5-9335-258f93e3181f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude(return_msgs=False):\n", + " messages = []\n", + " for gpt, claude_msg, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(gemini, gemini_name, gpt, gpt_name)})\n", + " messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n", + " messages.append({\"role\": \"user\", \"content\": gpt_name + \" said \" + gpt_messages[-1]})\n", + " if return_msgs: return messages\n", + " message = claude.messages.create(\n", + " model=claude_model,\n", + " system=claude_system,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2da7997-a1fe-43fe-9970-4014f665e501", + "metadata": {}, + "outputs": [], + "source": [ + "call_claude(return_msgs=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "94497894-8cb9-4da4-8671-edede74055f8", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gemini(return_msgs=False):\n", + " messages = []\n", + " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt, gpt_name, claude, claude_name)})\n", + " messages.append({\"role\": \"model\", \"parts\": gemini})\n", + " messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt_messages[-1], gpt_name, claude_messages[-1], claude_name)})\n", + " if return_msgs: return messages\n", + " message = gemini_instance.generate_content(messages)\n", + " return message.text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fddff0fc-0cc3-4473-9d55-effe445ef1ca", + "metadata": {}, + "outputs": [], + "source": [ + "call_gemini(return_msgs=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1988abf3-1986-40f0-b804-c02b54472b8c", + "metadata": {}, + "outputs": [], + "source": [ + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", + "print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT aka {gpt_name}:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f\"Claude aka {claude_name}:\\n{claude_next}\\n\")\n", + " claude_messages.append(claude_next)\n", + "\n", + " gemini_next = call_gemini()\n", + " print(f\"Gemini aka {gemini_name}:\\n{gemini_next}\\n\")\n", + " gemini_messages.append(gemini_next)" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "2618c3fa-9b8e-4280-a070-d039361b8918", + "id": "b72906b3-8c4a-4c15-8508-01118d33782a", "metadata": {}, "outputs": [], "source": [] From 3366e86bbb7162ed973d841885fd6347730ac818 Mon Sep 17 00:00:00 2001 From: Edward Donner Date: Mon, 28 Oct 2024 10:34:43 -0400 Subject: [PATCH 2/2] Added community contribution of a 3 way with Gemini --- .../day1-with-3way.ipynb | 649 ++++++++++++++++++ week2/day1.ipynb | 178 +---- 2 files changed, 653 insertions(+), 174 deletions(-) create mode 100644 week2/community-contributions/day1-with-3way.ipynb diff --git a/week2/community-contributions/day1-with-3way.ipynb b/week2/community-contributions/day1-with-3way.ipynb new file mode 100644 index 0000000..5681e64 --- /dev/null +++ b/week2/community-contributions/day1-with-3way.ipynb @@ -0,0 +1,649 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", + "metadata": {}, + "source": [ + "# Welcome to Week 2!\n", + "\n", + "## Frontier Model APIs\n", + "\n", + "In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", + "\n", + "Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." + ] + }, + { + "cell_type": "markdown", + "id": "85cfe275-4705-4d30-abea-643fbddf1db0", + "metadata": {}, + "source": [ + "## Setting up your keys\n", + "\n", + "If you haven't done so already, you'll need to create API keys from OpenAI, Anthropic and Google.\n", + "\n", + "For OpenAI, visit https://openai.com/api/ \n", + "For Anthropic, visit https://console.anthropic.com/ \n", + "For Google, visit https://ai.google.dev/gemini-api \n", + "\n", + "When you get your API keys, you need to set them as environment variables.\n", + "\n", + "EITHER (recommended) create a file called `.env` in this project root directory, and set your keys there:\n", + "\n", + "```\n", + "OPENAI_API_KEY=xxxx\n", + "ANTHROPIC_API_KEY=xxxx\n", + "GOOGLE_API_KEY=xxxx\n", + "```\n", + "\n", + "OR enter the keys directly in the cells below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import google.generativeai\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv()\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic and Google\n", + "# All 3 APIs are similar\n", + "# Having problems with API files? You can use openai = OpenAI(api_key=\"your-key-here\") and same for claude\n", + "# Having problems with Google Gemini setup? Then just skip Gemini; you'll get all the experience you need from GPT and Claude.\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()\n", + "\n", + "google.generativeai.configure()" + ] + }, + { + "cell_type": "markdown", + "id": "42f77b59-2fb1-462a-b90d-78994e4cef33", + "metadata": {}, + "source": [ + "## Asking LLMs to tell a joke\n", + "\n", + "It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n", + "Later we will be putting LLMs to better use!\n", + "\n", + "### What information is included in the API\n", + "\n", + "Typically we'll pass to the API:\n", + "- The name of the model that should be used\n", + "- A system message that gives overall context for the role the LLM is playing\n", + "- A user message that provides the actual prompt\n", + "\n", + "There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "378a0296-59a2-45c6-82eb-941344d3eeff", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that is great at telling jokes\"\n", + "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", + "metadata": {}, + "outputs": [], + "source": [ + "prompts = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", + "metadata": {}, + "outputs": [], + "source": [ + "# GPT-3.5-Turbo\n", + "\n", + "completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", + "metadata": {}, + "outputs": [], + "source": [ + "# GPT-4o-mini\n", + "# Temperature setting controls creativity\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o-mini',\n", + " messages=prompts,\n", + " temperature=0.7\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", + "metadata": {}, + "outputs": [], + "source": [ + "# GPT-4o\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.4\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", + "metadata": {}, + "outputs": [], + "source": [ + "# Claude 3.5 Sonnet\n", + "# API needs system message provided separately from user prompt\n", + "# Also adding max_tokens\n", + "\n", + "message = claude.messages.create(\n", + " model=\"claude-3-5-sonnet-20240620\",\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "print(message.content[0].text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", + "metadata": {}, + "outputs": [], + "source": [ + "# Claude 3.5 Sonnet again\n", + "# Now let's add in streaming back results\n", + "\n", + "result = claude.messages.stream(\n", + " model=\"claude-3-5-sonnet-20240620\",\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "with result as stream:\n", + " for text in stream.text_stream:\n", + " print(text, end=\"\", flush=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", + "metadata": {}, + "outputs": [], + "source": [ + "# The API for Gemini has a slightly different structure\n", + "\n", + "gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=system_message\n", + ")\n", + "response = gemini.generate_content(user_prompt)\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83ddb483-4f57-4668-aeea-2aade3a9e573", + "metadata": {}, + "outputs": [], + "source": [ + "# To be serious! GPT-4o-mini with the original question\n", + "\n", + "prompts = [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n", + " {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution?\"}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "749f50ab-8ccd-4502-a521-895c3f0808a2", + "metadata": {}, + "outputs": [], + "source": [ + "# Have it stream back results in markdown\n", + "\n", + "stream = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.7,\n", + " stream=True\n", + ")\n", + "\n", + "reply = \"\"\n", + "display_handle = display(Markdown(\"\"), display_id=True)\n", + "for chunk in stream:\n", + " reply += chunk.choices[0].delta.content or ''\n", + " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", + " update_display(Markdown(reply), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "markdown", + "id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", + "metadata": {}, + "source": [ + "## And now for some fun - an adversarial conversation between Chatbots..\n", + "\n", + "You're already familar with prompts being organized into lists like:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"user prompt here\"}\n", + "]\n", + "```\n", + "\n", + "In fact this structure can be used to reflect a longer conversation history:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "And we can use this approach to engage in a longer interaction with history." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "gpt_system = \"You are a chatbot who is very argumentative; \\\n", + "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", + "\n", + "claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "everything the other person says, or find common ground. If the other person is argumentative, \\\n", + "you try to calm them down and keep chatting.\"\n", + "\n", + "gpt_messages = [\"Hi there\"]\n", + "claude_messages = [\"Hi\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": claude})\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude():\n", + " messages = []\n", + " for gpt, claude_message in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"user\", \"content\": gpt})\n", + " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", + " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", + " message = claude.messages.create(\n", + " model=claude_model,\n", + " system=claude_system,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "01395200-8ae9-41f8-9a04-701624d3fd26", + "metadata": {}, + "outputs": [], + "source": [ + "call_claude()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [\"Hi there\"]\n", + "claude_messages = [\"Hi\"]\n", + "\n", + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f\"Claude:\\n{claude_next}\\n\")\n", + " claude_messages.append(claude_next)" + ] + }, + { + "cell_type": "markdown", + "id": "392d2072-4e5d-424c-b4b4-098d7e3ead2d", + "metadata": {}, + "source": [ + "# And now for a 3 way convo including Gemini" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb0c319c-0226-4c6d-99bc-a9167fa86005", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "gpt_system = \"You are a chatbot who is very argumentative; \\\n", + "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", + "\n", + "claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "everything the other people in the conversation say, or find common ground. If another person is argumentative, \\\n", + "you try to calm them down and keep chatting.\"\n", + "\n", + "gemini_system = \"You are an extremely knowledgeable and know-it-all counselor chatbot. You try to help resolve disagreements, \\\n", + "and if a person is either too argumentative or too polite, you cannot help but to use quotes from famous psychologists to teach \\\n", + "your students to be kind yet maintain boundaries.\"\n", + "\n", + "gemini_instance = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=gemini_system\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "041b1596-a646-4321-a89a-9a51046d9b72", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [\"Hi there\"]\n", + "claude_messages = [\"Hi\"]\n", + "gemini_messages = [\"How is everyone?\"]\n", + "gpt_name = \"Bob\"\n", + "claude_name = \"Larry\"\n", + "gemini_name = \"Frank\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fcd0a633-d506-4b68-8411-46f3fbe34752", + "metadata": {}, + "outputs": [], + "source": [ + "def construct_joined_user_msg(msg1, msg1_name, msg2, msg2_name):\n", + " return msg1_name + ' said: ' + msg1 + '. \\n\\nThen ' + msg2_name + ' said: ' + msg2 + '.'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4aef8ba5-1d93-4473-8c5f-707a12d8a1cf", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt(return_msgs=False):\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(claude, claude_name, gemini, gemini_name)})\n", + " if return_msgs: return messages\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36365e7a-43b4-4a0d-a241-fff1440509d3", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt(return_msgs=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ebebebe-8255-4ca5-9335-258f93e3181f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude(return_msgs=False):\n", + " messages = []\n", + " for gpt, claude_msg, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(gemini, gemini_name, gpt, gpt_name)})\n", + " messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n", + " messages.append({\"role\": \"user\", \"content\": gpt_name + \" said \" + gpt_messages[-1]})\n", + " if return_msgs: return messages\n", + " message = claude.messages.create(\n", + " model=claude_model,\n", + " system=claude_system,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2da7997-a1fe-43fe-9970-4014f665e501", + "metadata": {}, + "outputs": [], + "source": [ + "call_claude(return_msgs=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "94497894-8cb9-4da4-8671-edede74055f8", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gemini(return_msgs=False):\n", + " messages = []\n", + " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt, gpt_name, claude, claude_name)})\n", + " messages.append({\"role\": \"model\", \"parts\": gemini})\n", + " messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt_messages[-1], gpt_name, claude_messages[-1], claude_name)})\n", + " if return_msgs: return messages\n", + " message = gemini_instance.generate_content(messages)\n", + " return message.text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fddff0fc-0cc3-4473-9d55-effe445ef1ca", + "metadata": {}, + "outputs": [], + "source": [ + "call_gemini(return_msgs=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1988abf3-1986-40f0-b804-c02b54472b8c", + "metadata": {}, + "outputs": [], + "source": [ + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", + "print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT aka {gpt_name}:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f\"Claude aka {claude_name}:\\n{claude_next}\\n\")\n", + " claude_messages.append(claude_next)\n", + "\n", + " gemini_next = call_gemini()\n", + " print(f\"Gemini aka {gemini_name}:\\n{gemini_next}\\n\")\n", + " gemini_messages.append(gemini_next)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b72906b3-8c4a-4c15-8508-01118d33782a", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/day1.ipynb b/week2/day1.ipynb index 5681e64..928c619 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -438,188 +438,18 @@ }, { "cell_type": "markdown", - "id": "392d2072-4e5d-424c-b4b4-098d7e3ead2d", + "id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac", "metadata": {}, "source": [ - "# And now for a 3 way convo including Gemini" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "eb0c319c-0226-4c6d-99bc-a9167fa86005", - "metadata": {}, - "outputs": [], - "source": [ - "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", - "# We're using cheap versions of models so the costs will be minimal\n", - "\n", - "gpt_model = \"gpt-4o-mini\"\n", - "claude_model = \"claude-3-haiku-20240307\"\n", - "\n", - "gpt_system = \"You are a chatbot who is very argumentative; \\\n", - "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", - "\n", - "claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", - "everything the other people in the conversation say, or find common ground. If another person is argumentative, \\\n", - "you try to calm them down and keep chatting.\"\n", - "\n", - "gemini_system = \"You are an extremely knowledgeable and know-it-all counselor chatbot. You try to help resolve disagreements, \\\n", - "and if a person is either too argumentative or too polite, you cannot help but to use quotes from famous psychologists to teach \\\n", - "your students to be kind yet maintain boundaries.\"\n", - "\n", - "gemini_instance = google.generativeai.GenerativeModel(\n", - " model_name='gemini-1.5-flash',\n", - " system_instruction=gemini_system\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "041b1596-a646-4321-a89a-9a51046d9b72", - "metadata": {}, - "outputs": [], - "source": [ - "gpt_messages = [\"Hi there\"]\n", - "claude_messages = [\"Hi\"]\n", - "gemini_messages = [\"How is everyone?\"]\n", - "gpt_name = \"Bob\"\n", - "claude_name = \"Larry\"\n", - "gemini_name = \"Frank\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fcd0a633-d506-4b68-8411-46f3fbe34752", - "metadata": {}, - "outputs": [], - "source": [ - "def construct_joined_user_msg(msg1, msg1_name, msg2, msg2_name):\n", - " return msg1_name + ' said: ' + msg1 + '. \\n\\nThen ' + msg2_name + ' said: ' + msg2 + '.'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4aef8ba5-1d93-4473-8c5f-707a12d8a1cf", - "metadata": {}, - "outputs": [], - "source": [ - "def call_gpt(return_msgs=False):\n", - " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", - " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", - " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", - " messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(claude, claude_name, gemini, gemini_name)})\n", - " if return_msgs: return messages\n", - " completion = openai.chat.completions.create(\n", - " model=gpt_model,\n", - " messages=messages\n", - " )\n", - " return completion.choices[0].message.content" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36365e7a-43b4-4a0d-a241-fff1440509d3", - "metadata": {}, - "outputs": [], - "source": [ - "call_gpt(return_msgs=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ebebebe-8255-4ca5-9335-258f93e3181f", - "metadata": {}, - "outputs": [], - "source": [ - "def call_claude(return_msgs=False):\n", - " messages = []\n", - " for gpt, claude_msg, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", - " messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(gemini, gemini_name, gpt, gpt_name)})\n", - " messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n", - " messages.append({\"role\": \"user\", \"content\": gpt_name + \" said \" + gpt_messages[-1]})\n", - " if return_msgs: return messages\n", - " message = claude.messages.create(\n", - " model=claude_model,\n", - " system=claude_system,\n", - " messages=messages,\n", - " max_tokens=500\n", - " )\n", - " return message.content[0].text" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b2da7997-a1fe-43fe-9970-4014f665e501", - "metadata": {}, - "outputs": [], - "source": [ - "call_claude(return_msgs=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "94497894-8cb9-4da4-8671-edede74055f8", - "metadata": {}, - "outputs": [], - "source": [ - "def call_gemini(return_msgs=False):\n", - " messages = []\n", - " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", - " messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt, gpt_name, claude, claude_name)})\n", - " messages.append({\"role\": \"model\", \"parts\": gemini})\n", - " messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt_messages[-1], gpt_name, claude_messages[-1], claude_name)})\n", - " if return_msgs: return messages\n", - " message = gemini_instance.generate_content(messages)\n", - " return message.text" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fddff0fc-0cc3-4473-9d55-effe445ef1ca", - "metadata": {}, - "outputs": [], - "source": [ - "call_gemini(return_msgs=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1988abf3-1986-40f0-b804-c02b54472b8c", - "metadata": {}, - "outputs": [], - "source": [ - "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", - "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", - "print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", - "\n", - "for i in range(5):\n", - " gpt_next = call_gpt()\n", - " print(f\"GPT aka {gpt_name}:\\n{gpt_next}\\n\")\n", - " gpt_messages.append(gpt_next)\n", - " \n", - " claude_next = call_claude()\n", - " print(f\"Claude aka {claude_name}:\\n{claude_next}\\n\")\n", - " claude_messages.append(claude_next)\n", + "# See the community-contributions folder\n", "\n", - " gemini_next = call_gemini()\n", - " print(f\"Gemini aka {gemini_name}:\\n{gemini_next}\\n\")\n", - " gemini_messages.append(gemini_next)" + "For a great variation with a 3-way bringing Gemini into the conversation!" ] }, { "cell_type": "code", "execution_count": null, - "id": "b72906b3-8c4a-4c15-8508-01118d33782a", + "id": "0d86790a-3a6f-4b18-ab0a-bc6107945a27", "metadata": {}, "outputs": [], "source": []