|
|
|
@ -377,7 +377,7 @@
|
|
|
|
|
"stream = openai.chat.completions.create(\n", |
|
|
|
|
" model='gpt-4o',\n", |
|
|
|
|
" messages=prompts,\n", |
|
|
|
|
" temperature=0.7,\n", |
|
|
|
|
" temperature=0.2,\n", |
|
|
|
|
" stream=True\n", |
|
|
|
|
")\n", |
|
|
|
|
"\n", |
|
|
|
@ -452,9 +452,10 @@
|
|
|
|
|
"source": [ |
|
|
|
|
"def call_gpt():\n", |
|
|
|
|
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
|
|
|
|
" for gpt, claude in zip(gpt_messages, claude_messages):\n", |
|
|
|
|
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n", |
|
|
|
|
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", |
|
|
|
|
" messages.append({\"role\": \"user\", \"content\": claude})\n", |
|
|
|
|
" combined = llama + claude\n", |
|
|
|
|
" messages.append({\"role\": \"user\", \"content\": combined})\n", |
|
|
|
|
" completion = openai.chat.completions.create(\n", |
|
|
|
|
" model=gpt_model,\n", |
|
|
|
|
" messages=messages\n", |
|
|
|
@ -484,6 +485,7 @@
|
|
|
|
|
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n", |
|
|
|
|
" messages.append({\"role\": \"user\", \"content\": gpt})\n", |
|
|
|
|
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", |
|
|
|
|
" # messages.append(\"role\": \"moderator\", \"content\": llama_message)\n", |
|
|
|
|
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
|
|
|
|
" message = claude.messages.create(\n", |
|
|
|
|
" model=claude_model,\n", |
|
|
|
@ -597,7 +599,76 @@
|
|
|
|
|
"id": "c23224f6-7008-44ed-a57f-718975f4e291", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [] |
|
|
|
|
"source": [ |
|
|
|
|
"!ollama pull llama3.2" |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": null, |
|
|
|
|
"id": "cbbddf71-1473-42fe-b733-2bb42ea77333", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [ |
|
|
|
|
"\n", |
|
|
|
|
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
|
|
|
|
"HEADERS = {\"Content-Type\": \"application/json\"}\n", |
|
|
|
|
"import ollama\n", |
|
|
|
|
"\n", |
|
|
|
|
"llama_model = \"llama3.2\"\n", |
|
|
|
|
"\n", |
|
|
|
|
"llama_system = \"You are a chatbot who is very pacifist; \\\n", |
|
|
|
|
"you will try to resolve or neutralize any disagreement between other chatbots. Speak like a teacher or someone in authority.\"\n", |
|
|
|
|
"\n", |
|
|
|
|
"llama_messages = [\"Hello.\"]\n" |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": null, |
|
|
|
|
"id": "f629d2b2-ba20-4bfe-a2e5-bbe537ca46a2", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [ |
|
|
|
|
"\n", |
|
|
|
|
"def call_llama():\n", |
|
|
|
|
" combined_messages = gpt_messages[-1] + claude_messages[-1]\n", |
|
|
|
|
" messages = [{\"role\": \"system\", \"content\": llama_system}]\n", |
|
|
|
|
" for comb, llama in zip(combined_messages, llama_messages):\n", |
|
|
|
|
" messages.append({\"role\": \"assistant\", \"content\": llama})\n", |
|
|
|
|
" messages.append({\"role\": \"user\", \"content\": combined_messages})\n", |
|
|
|
|
" completion = ollama.chat(\n", |
|
|
|
|
" model=llama_model,\n", |
|
|
|
|
" messages=messages\n", |
|
|
|
|
" )\n", |
|
|
|
|
" return completion['message']['content']" |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": null, |
|
|
|
|
"id": "219b6af8-3166-4059-b79e-cf19af7ed1e9", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [ |
|
|
|
|
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", |
|
|
|
|
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", |
|
|
|
|
"print(f\"Llama:\\n{llama_messages[0]}\\n\" )\n", |
|
|
|
|
"\n", |
|
|
|
|
"for i in range(3):\n", |
|
|
|
|
" gpt_next = call_gpt()\n", |
|
|
|
|
" print(f\"GPT:\\n{gpt_next}\\n\")\n", |
|
|
|
|
" gpt_messages.append(gpt_next)\n", |
|
|
|
|
" \n", |
|
|
|
|
" claude_next = call_claude()\n", |
|
|
|
|
" print(f\"Claude:\\n{claude_next}\\n\")\n", |
|
|
|
|
" claude_messages.append(claude_next)\n", |
|
|
|
|
"\n", |
|
|
|
|
" llama_next = call_llama()\n", |
|
|
|
|
" print(f\"Llama:\\n{llama_next}\\n\")\n", |
|
|
|
|
" llama_messages.append(llama_next)\n", |
|
|
|
|
" " |
|
|
|
|
] |
|
|
|
|
} |
|
|
|
|
], |
|
|
|
|
"metadata": { |
|
|
|
|