You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

196 lines
7.1 KiB

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "ec2e81cd-2172-4816-bf44-f29312b8a4bd",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"import google.generativeai as genai\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a558dfa4-9496-48ba-b0f5-b0c731adc7b8",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc7c2cda-a5d1-4930-87f2-e06485d6b2bd",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"claude = anthropic.Anthropic()\n",
"\n",
"genai.configure()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3eb32aec-ec93-4563-bd88-0d48d2471884",
"metadata": {},
"outputs": [],
"source": [
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"gemini_model = \"gemini-2.0-flash-exp\"\n",
"\n",
"gpt_system = \"You are a chatbot who is sarcastic; \\\n",
"you have your speculations about anything in the conversation and you challenge everything in funny way.\\\n",
"You have to be a part of a group discussion and put forward your points about the topic\\\n",
"full-stack developers vs specialised developer. Keep your points short and precise.\"\n",
"\n",
"claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"you try to calm them down and keep chatting.You have to be a part of a group discussion and put forward your points\\\n",
"about the topic full-stack developers vs specialised developer. Keep your points short and precise.\"\n",
"\n",
"gemini_system = \"You are a very rational thinker and don't like beating around the bush about the topic of discussion.\\\n",
"You have to be a part of a group discussion and put forward your points\\\n",
"about the topic full-stack developers vs specialised developer\\\n",
"Keep your points short and precise.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n",
"gemini_messages = [\"Hello to all\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e27252cf-05f5-4989-85ef-94e6802c5db9",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" messages.append({\"role\": \"assistant\", \"content\": gemini})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages,\n",
" max_tokens=500 # Add max_tokens to meet API requirement\n",
" )\n",
" return completion.choices[0].message.content\n",
"\n",
"# Function to call Claude\n",
"def call_claude():\n",
" messages = []\n",
" for gpt, claude_message,gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"assistant\", \"content\": gemini})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" max_tokens=500,\n",
" messages=messages\n",
" )\n",
" return message.content[0].text\n",
"\n",
"# Function to call Gemini\n",
"def call_gemini():\n",
" # Create the Gemini model instance\n",
" gemini_model_instance = genai.GenerativeModel(\n",
" model_name=gemini_model, # Specify the model name here\n",
" system_instruction=gemini_system # Provide the system instruction\n",
" )\n",
" \n",
" # Prepare conversation history with separate names to avoid overwriting\n",
" gemini_messages_combined = []\n",
" for gpt, claude, gemini_msg in zip(gpt_messages, claude_messages, gemini_messages):\n",
" gemini_messages_combined.append({\"role\": \"assistant\", \"content\": gpt})\n",
" gemini_messages_combined.append({\"role\": \"user\", \"content\": claude})\n",
" gemini_messages_combined.append({\"role\": \"assistant\", \"content\": gemini_msg})\n",
" \n",
" # Generate content based on the conversation history\n",
" gemini_response = gemini_model_instance.generate_content(\"\".join([msg[\"content\"] for msg in gemini_messages_combined]))\n",
" \n",
" return gemini_response.text\n",
"\n",
"# Initial print\n",
"print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n",
"\n",
"# Main loop to generate conversation\n",
"for i in range(3):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"Claude:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)\n",
" \n",
" gemini_next = call_gemini()\n",
" print(f\"Gemini:\\n{gemini_next}\\n\")\n",
" gemini_messages.append(gemini_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "52f43794-a20a-4b9a-a18d-6f363b8dc27d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}