diff --git a/week2/community-contributions/day1_Ollama&gemini_conversation.ipynb b/week2/community-contributions/day1_Ollama&gemini_conversation.ipynb new file mode 100644 index 0000000..ce28082 --- /dev/null +++ b/week2/community-contributions/day1_Ollama&gemini_conversation.ipynb @@ -0,0 +1,286 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "1194d35b-0b9f-4eb4-a539-5ddf55523367", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "#import anthropic\n", + "import ollama\n", + "import google.generativeai\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8a1f0b3-6d93-4de1-bc79-2132726598e3", + "metadata": {}, + "outputs": [], + "source": [ + "#constants\n", + "MODEL=\"llama3.2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "88fe4149-1ef5-4007-a117-6d3ccab3e3c3", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d186cf6e-fadd-450c-821c-df32e2574f5d", + "metadata": {}, + "outputs": [], + "source": [ + "# This is the set up code for Gemini\n", + "\n", + "google.generativeai.configure()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19a55117-f2ac-4a58-af6b-8b75259e80df", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that is great at telling jokes\"\n", + "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "908f69b1-54f8-42da-827b-f667631bc666", + "metadata": {}, + "outputs": [], + "source": [ + "prompts = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ec81488-883a-446f-91cf-2b3d92bbd3ba", + "metadata": {}, + "outputs": [], + "source": [ + "# The API for Gemini\n", + "gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-2.0-flash-exp',\n", + " system_instruction=system_message\n", + ")\n", + "response = gemini.generate_content(user_prompt)\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "baf411fa-48bd-46a3-8bc8-1b22d0888a1a", + "metadata": {}, + "outputs": [], + "source": [ + "# API for ollama\n", + "response = ollama.chat(model=MODEL,messages=prompts)\n", + "print(response['message']['content'])" + ] + }, + { + "cell_type": "markdown", + "id": "74ba5fc4-e4c6-44ee-b66f-e76d847933d2", + "metadata": {}, + "source": [ + "# Ardiversarial conversation between models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fd348154-18fa-4da8-815a-77f5f00107c3", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between Ollama and Gemini\n", + "# Adjusted models accordingly\n", + "\n", + "ollama_model = \"llama3.2\"\n", + "gemini_model = \"gemini-2.0-flash-exp\"\n", + "\n", + "#ollama_system = \"You are a chatbot who is very argumentative; \\\n", + "#you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", + "\n", + "ollama_system=\"You are a chatbot talking with the other person try to convince them to buy your proct of an ai app, \\\n", + "apply marketing strategies to make this client buy your product, use short clear explanations\"\n", + "\n", + "#gemini_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "#everything the other person says, or find common ground. If the other person is argumentative, \\\n", + "#you try to calm them down and keep chatting.\"\n", + "\n", + "gemini_system = \"You are the chatbot triying to be convinced by another person to buy their product, \\\n", + "ask important short questions and see if it is worth to give it a go, dont be too naive or easy go client\"\n", + "\n", + "ollama_messages = [\"Hi there\"]\n", + "gemini_messages = [\"Hi\"]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "becf327a-5485-4e78-8002-03272a99a3b9", + "metadata": {}, + "outputs": [], + "source": [ + "def call_ollama():\n", + " messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", + " for ollama_msg, gemini_msg in zip(ollama_messages, gemini_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": ollama_msg})\n", + " messages.append({\"role\": \"user\", \"content\": gemini_msg})\n", + " \n", + " response = ollama.chat(model=ollama_model, messages=messages)\n", + " \n", + " return response['message']['content']\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d0c6dbe7-0baf-4c43-a03b-9134654685f4", + "metadata": {}, + "outputs": [], + "source": [ + "call_ollama()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f68a134a-279a-4629-aec6-171587378991", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gemini():\n", + " gemini = google.generativeai.GenerativeModel(\n", + " model_name=gemini_model,\n", + " system_instruction=gemini_system\n", + " )\n", + "\n", + " # Build a list of dictionaries representing the conversation\n", + " conversation = []\n", + " for ollama_msg, gemini_msg in zip(ollama_messages, gemini_messages):\n", + " conversation.append({\"role\": \"user\", \"content\": ollama_msg})\n", + " conversation.append({\"role\": \"assistant\", \"content\": gemini_msg})\n", + " conversation.append({\"role\": \"user\", \"content\": ollama_messages[-1]})\n", + "\n", + " # Format the conversation into a string for the prompt\n", + " prompt = \"\"\n", + " for msg in conversation:\n", + " prompt += f\"{msg['role'].capitalize()}: {msg['content']}\\n\"\n", + "\n", + " message = gemini.generate_content(prompt)\n", + " \n", + " return message.text\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7511003a-f2b6-45f5-8cb0-1c9190d33ce9", + "metadata": {}, + "outputs": [], + "source": [ + "call_gemini()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d0e81f1f-9754-4790-8b73-5f52fef4ea64", + "metadata": {}, + "outputs": [], + "source": [ + "call_ollama()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6fbe59f6-a3ef-4062-ab4b-b999f6d1abe9", + "metadata": {}, + "outputs": [], + "source": [ + "ollama_messages = [\"Hi there\"]\n", + "gemini_messages = [\"Hi\"]\n", + "\n", + "print(f\"Ollama:\\n{ollama_messages[0]}\\n\")\n", + "print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " # Call Ollama to generate the next message\n", + " ollama_next = call_ollama() \n", + " print(f\"Ollama:\\n{ollama_next}\\n\")\n", + " ollama_messages.append(ollama_next)\n", + " \n", + " # Call Gemini to generate the next message\n", + " gemini_next = call_gemini() \n", + " print(f\"Gemini:\\n{gemini_next}\\n\")\n", + " gemini_messages.append(gemini_next)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9525600b-082e-417f-9088-c6483a613bf3", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}