Browse Source

Merge pull request #185 from johnIT56/my-feature-branch

Adding Ollama brochure version and Ollama Tutor version with streaming
pull/215/head
Ed Donner 3 months ago committed by GitHub
parent
commit
53f4da6ee7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 432
      week1/community-contributions/Ollama brochure.ipynb
  2. 125
      week1/community-contributions/week1_Tutor_Ollama.ipynb
  3. 286
      week2/community-contributions/day1_Ollama&gemini_conversation.ipynb

432
week1/community-contributions/Ollama brochure.ipynb

@ -0,0 +1,432 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 52,
"id": "b56a950c-db41-4575-bef9-0fa651dea363",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"import json\n",
"import ollama\n",
"from typing import List\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display,clear_output\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0ec875db-0f6a-4eec-a3b6-eae4b71a4b89",
"metadata": {},
"outputs": [],
"source": [
"# Constants\n",
"\n",
"MODEL = \"llama3.2\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "227cd07c-98a4-463b-94ad-94e33d04944b",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
" \"\"\"\n",
" A utility class to represent a Website that we have scraped, now with links\n",
" \"\"\"\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" if soup.body:\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" self.text = \"\"\n",
" links = [link.get('href') for link in soup.find_all('a')]\n",
" self.links = [link for link in links if link]\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4d5c5e40-c010-4102-8359-899f988185fb",
"metadata": {},
"outputs": [],
"source": [
"ed = Website(\"https://edwarddonner.com\")\n",
"ed.links"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5f0b5d71-487c-47a5-ace6-8e02465ed452",
"metadata": {},
"outputs": [],
"source": [
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n",
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n",
"link_system_prompt += \"You should respond in JSON as in this example:\"\n",
"link_system_prompt += \"\"\"\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
" ]\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6550325-5160-42c9-b7e7-980b504cd096",
"metadata": {},
"outputs": [],
"source": [
"print(link_system_prompt)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2db4ccc6-5c35-4775-a5b2-4b86e4c73808",
"metadata": {},
"outputs": [],
"source": [
"def get_links_user_prompt(website):\n",
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
"Do not include Terms of Service, Privacy, email links.\\n\"\n",
" user_prompt += \"Links (some might be relative links):\\n\"\n",
" user_prompt += \"\\n\".join(website.links)\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8af511c7-5a74-4d1a-b763-b31370e70cff",
"metadata": {},
"outputs": [],
"source": [
"print(get_links_user_prompt(ed))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a3b7fb61-ca15-4eab-b017-b0fe5cce46fd",
"metadata": {},
"outputs": [],
"source": [
"def get_links(url):\n",
" website = Website(url)\n",
" response = ollama.chat(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
" ], format = \"json\" #Define format as json!\n",
" )\n",
" result = response['message']['content']\n",
"\n",
" return json.loads(result)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7816d393-620d-4c53-913e-4ec130b2baba",
"metadata": {},
"outputs": [],
"source": [
"# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n",
"\n",
"anthropic = Website(\"https://anthropic.com\")\n",
"anthropic.links"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f32ceccb-1d45-41a3-a5c1-fb2e6cd76afe",
"metadata": {},
"outputs": [],
"source": [
"get_links(\"https://anthropic.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a7ec4727-e897-473c-a657-e74f6999c974",
"metadata": {},
"outputs": [],
"source": [
"def get_all_details(url):\n",
" result = \"Landing page:\\n\"\n",
" result += Website(url).get_contents()\n",
" links = get_links(url)\n",
" print(\"Found links:\", links)\n",
" for link in links[\"links\"]:\n",
" result += f\"\\n\\n{link['type']}\\n\"\n",
" result += Website(link[\"url\"]).get_contents()\n",
" return result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7acde0c5-1af2-4e8e-9303-e2a98ec9cdbb",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"print(get_all_details(\"https://anthropic.com\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5a2e2b1d-eb55-4bfb-bf55-5e8c87db0d96",
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"Include details of company culture, customers and careers/jobs if you have the information.\"\n",
"\n",
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n",
"\n",
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"# Include details of company culture, customers and careers/jobs if you have the information.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8eac1719-7f94-4460-bc4a-0c9c93bb17a5",
"metadata": {},
"outputs": [],
"source": [
"def get_brochure_user_prompt(company_name, url):\n",
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n",
" user_prompt += get_all_details(url)\n",
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e2e312f6-01c5-4e57-9134-fb4aa447d155",
"metadata": {},
"outputs": [],
"source": [
"get_brochure_user_prompt(\"Anthropic\", \"https://anthropic.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b05cbab-f0d2-4a9e-8b8c-c868a036e9cd",
"metadata": {},
"outputs": [],
"source": [
"def create_brochure(company_name, url):\n",
" response = ollama.chat(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
" ]\n",
" )\n",
" result = response[\"message\"][\"content\"]\n",
" display(Markdown(result))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "91ede0c0-daf2-42ef-9d31-749afb9d5352",
"metadata": {},
"outputs": [],
"source": [
"create_brochure(\"Anthropic\", \"https://anthropic.com\")"
]
},
{
"cell_type": "markdown",
"id": "afb4aeee-5108-42a7-a1c1-5bad254b7e8b",
"metadata": {},
"source": [
"# Final omprovement\n",
"\n",
"getting a typewriter animation"
]
},
{
"cell_type": "code",
"execution_count": 50,
"id": "177de611-1cb1-49e2-b7ea-8d01191af3ee",
"metadata": {},
"outputs": [],
"source": [
"def create_brochure(company_name, url):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
" ]\n",
"\n",
" display_markdown = display(Markdown(\"\"), display_id=True) # Initialize Markdown display\n",
" response_text = \"\"\n",
"\n",
" for chunk in ollama.chat(model=MODEL, messages=messages, stream=True): # Ensure stream=True (not a string)\n",
" response_text += chunk['message']['content']\n",
" clear_output(wait=True) # Clear previous output to create a streaming effect\n",
" display_markdown.update(Markdown(response_text)) # Update Markdown dynamically\n"
]
},
{
"cell_type": "code",
"execution_count": 53,
"id": "a1971d81-fc7f-4ed1-97a0-7ef5e8ed332a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found links: {'links': [{'type': 'About page', 'url': 'https://www.anthropic.com/company'}, {'type': 'Careers page', 'url': 'https://www.anthropic.com/careers'}, {'type': 'Company page', 'url': 'https://www.anthropic.com/'}, {'type': 'Research page', 'url': 'https://www.anthropic.com/research'}, {'type': 'Twitter profile', 'url': 'https://twitter.com/AnthropicAI'}, {'type': 'LinkedIn company page', 'url': 'https://www.linkedin.com/company/anthropicresearch'}, {'type': 'YouTube channel', 'url': 'https://www.youtube.com/@anthropic-ai'}]}\n"
]
},
{
"data": {
"text/markdown": [
"**Anthropic Brochure**\n",
"======================\n",
"\n",
"**Mission Statement**\n",
"-------------------\n",
"\n",
"Anthropic is an AI safety and research company dedicated to building reliable, interpretable, and steerable AI systems that benefit humanity in the long run.\n",
"\n",
"**Company Overview**\n",
"--------------------\n",
"\n",
"Anthropic is headquartered in San Francisco and brings together a diverse team of researchers, engineers, policy experts, and business leaders with experience spanning various disciplines. Our mission is to conduct frontier AI research, develop and apply safety techniques, and deploy the resulting systems via partnerships and products.\n",
"\n",
"**Research Focus**\n",
"-----------------\n",
"\n",
"Anthropic conducts cutting-edge AI research across various modalities, exploring novel and emerging safety research areas such as interpretability, RL from human feedback, policy, and societal impacts analysis. Our research aims to advance the field of AI safety and inform our product development.\n",
"\n",
"**Product Portfolio**\n",
"---------------------\n",
"\n",
"Our flagship product is Claude, a highly intelligent AI model that enables customers to build custom applications and experiences using our API. We also offer various enterprise solutions, including Claude for Enterprise, designed to meet the needs of large organizations.\n",
"\n",
"**Customer Base**\n",
"-----------------\n",
"\n",
"Anthropic serves a diverse range of customers, including businesses, nonprofits, civil society groups, and their clients around the globe. Our commitment to safety and reliability has earned us a reputation as a trusted partner in the AI industry.\n",
"\n",
"**Values and Culture**\n",
"----------------------\n",
"\n",
"At Anthropic, we value:\n",
"\n",
"* **Acting for the global good**: We strive to make decisions that maximize positive outcomes for humanity in the long run.\n",
"* **Holding light and shade**: We acknowledge the potential risks of AI and approach our work with caution and transparency.\n",
"\n",
"**Join Our Team**\n",
"-----------------\n",
"\n",
"We're a collaborative team of researchers, engineers, policy experts, and business leaders passionate about building safer AI systems. Join us to be part of this exciting journey and contribute your skills and expertise to shaping the future of AI.\n",
"\n",
"**Careers**\n",
"------------\n",
"\n",
"Check our website for open roles and learn more about our company culture, benefits, and career opportunities.\n",
"\n",
"[Learn More](link)\n",
"\n",
"**Get in Touch**\n",
"-----------------\n",
"\n",
"Stay up-to-date with the latest news and announcements from Anthropic. Follow us on Twitter, LinkedIn, or YouTube to join the conversation and stay informed.\n",
"\n",
"[Twitter](link)\n",
"[LinkedIn](link)\n",
"[YouTube](link)"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"create_brochure(\"Anthropic\", \"https://anthropic.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c33277a4-84f1-447c-a66e-eb7e2af42d2a",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

125
week1/community-contributions/week1_Tutor_Ollama.ipynb

@ -0,0 +1,125 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "135ee16c-2741-4ebf-aca9-1d263083b3ce",
"metadata": {},
"source": [
"# End of week 1 exercise\n",
"\n",
"Build a tutor tool by using Ollama."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c1070317-3ed9-4659-abe3-828943230e03",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"import ollama\n",
"from IPython.display import Markdown, display, clear_output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
"metadata": {},
"outputs": [],
"source": [
"# constants\n",
"MODEL_LLAMA = 'llama3.2'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
"metadata": {},
"outputs": [],
"source": [
"# here is the question; type over this to ask something new\n",
"\n",
"question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
"metadata": {},
"outputs": [],
"source": [
"# Get Llama 3.2 to answer, with streaming\n",
"\n",
"\n",
"messages=[{\"role\":\"user\",\"content\":question}]\n",
"\n",
"for chunk in ollama.chat(model=MODEL_LLAMA, messages=messages, stream=True):\n",
" print(chunk['message']['content'], end='', flush=True)\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1f71014-e780-4d3f-a227-1a7c18158a4c",
"metadata": {},
"outputs": [],
"source": [
"#Alternative answer with streaming in Markdown!\n",
"\n",
"def stream_response():\n",
" messages = [{\"role\": \"user\", \"content\": question}]\n",
" \n",
" display_markdown = display(Markdown(\"\"), display_id=True)\n",
"\n",
" response_text = \"\"\n",
" for chunk in ollama.chat(model=MODEL_LLAMA, messages=messages, stream=True):\n",
" \n",
" response_text += chunk['message']['content']\n",
" clear_output(wait=True) # Clears previous output\n",
" display_markdown.update(Markdown(response_text)) # Updates Markdown dynamically\n",
"\n",
"# Run the function\n",
"stream_response()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c38fdd2a-4b09-402c-ba46-999b22b0cb15",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

286
week2/community-contributions/day1_Ollama&gemini_conversation.ipynb

@ -0,0 +1,286 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "1194d35b-0b9f-4eb4-a539-5ddf55523367",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"#import anthropic\n",
"import ollama\n",
"import google.generativeai\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f8a1f0b3-6d93-4de1-bc79-2132726598e3",
"metadata": {},
"outputs": [],
"source": [
"#constants\n",
"MODEL=\"llama3.2\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "88fe4149-1ef5-4007-a117-6d3ccab3e3c3",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d186cf6e-fadd-450c-821c-df32e2574f5d",
"metadata": {},
"outputs": [],
"source": [
"# This is the set up code for Gemini\n",
"\n",
"google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "19a55117-f2ac-4a58-af6b-8b75259e80df",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that is great at telling jokes\"\n",
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "908f69b1-54f8-42da-827b-f667631bc666",
"metadata": {},
"outputs": [],
"source": [
"prompts = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4ec81488-883a-446f-91cf-2b3d92bbd3ba",
"metadata": {},
"outputs": [],
"source": [
"# The API for Gemini\n",
"gemini = google.generativeai.GenerativeModel(\n",
" model_name='gemini-2.0-flash-exp',\n",
" system_instruction=system_message\n",
")\n",
"response = gemini.generate_content(user_prompt)\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "baf411fa-48bd-46a3-8bc8-1b22d0888a1a",
"metadata": {},
"outputs": [],
"source": [
"# API for ollama\n",
"response = ollama.chat(model=MODEL,messages=prompts)\n",
"print(response['message']['content'])"
]
},
{
"cell_type": "markdown",
"id": "74ba5fc4-e4c6-44ee-b66f-e76d847933d2",
"metadata": {},
"source": [
"# Ardiversarial conversation between models"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fd348154-18fa-4da8-815a-77f5f00107c3",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between Ollama and Gemini\n",
"# Adjusted models accordingly\n",
"\n",
"ollama_model = \"llama3.2\"\n",
"gemini_model = \"gemini-2.0-flash-exp\"\n",
"\n",
"#ollama_system = \"You are a chatbot who is very argumentative; \\\n",
"#you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
"\n",
"ollama_system=\"You are a chatbot talking with the other person try to convince them to buy your proct of an ai app, \\\n",
"apply marketing strategies to make this client buy your product, use short clear explanations\"\n",
"\n",
"#gemini_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
"#everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"#you try to calm them down and keep chatting.\"\n",
"\n",
"gemini_system = \"You are the chatbot triying to be convinced by another person to buy their product, \\\n",
"ask important short questions and see if it is worth to give it a go, dont be too naive or easy go client\"\n",
"\n",
"ollama_messages = [\"Hi there\"]\n",
"gemini_messages = [\"Hi\"]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "becf327a-5485-4e78-8002-03272a99a3b9",
"metadata": {},
"outputs": [],
"source": [
"def call_ollama():\n",
" messages = [{\"role\": \"system\", \"content\": ollama_system}]\n",
" for ollama_msg, gemini_msg in zip(ollama_messages, gemini_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": ollama_msg})\n",
" messages.append({\"role\": \"user\", \"content\": gemini_msg})\n",
" \n",
" response = ollama.chat(model=ollama_model, messages=messages)\n",
" \n",
" return response['message']['content']\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d0c6dbe7-0baf-4c43-a03b-9134654685f4",
"metadata": {},
"outputs": [],
"source": [
"call_ollama()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f68a134a-279a-4629-aec6-171587378991",
"metadata": {},
"outputs": [],
"source": [
"def call_gemini():\n",
" gemini = google.generativeai.GenerativeModel(\n",
" model_name=gemini_model,\n",
" system_instruction=gemini_system\n",
" )\n",
"\n",
" # Build a list of dictionaries representing the conversation\n",
" conversation = []\n",
" for ollama_msg, gemini_msg in zip(ollama_messages, gemini_messages):\n",
" conversation.append({\"role\": \"user\", \"content\": ollama_msg})\n",
" conversation.append({\"role\": \"assistant\", \"content\": gemini_msg})\n",
" conversation.append({\"role\": \"user\", \"content\": ollama_messages[-1]})\n",
"\n",
" # Format the conversation into a string for the prompt\n",
" prompt = \"\"\n",
" for msg in conversation:\n",
" prompt += f\"{msg['role'].capitalize()}: {msg['content']}\\n\"\n",
"\n",
" message = gemini.generate_content(prompt)\n",
" \n",
" return message.text\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7511003a-f2b6-45f5-8cb0-1c9190d33ce9",
"metadata": {},
"outputs": [],
"source": [
"call_gemini()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d0e81f1f-9754-4790-8b73-5f52fef4ea64",
"metadata": {},
"outputs": [],
"source": [
"call_ollama()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6fbe59f6-a3ef-4062-ab4b-b999f6d1abe9",
"metadata": {},
"outputs": [],
"source": [
"ollama_messages = [\"Hi there\"]\n",
"gemini_messages = [\"Hi\"]\n",
"\n",
"print(f\"Ollama:\\n{ollama_messages[0]}\\n\")\n",
"print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" # Call Ollama to generate the next message\n",
" ollama_next = call_ollama() \n",
" print(f\"Ollama:\\n{ollama_next}\\n\")\n",
" ollama_messages.append(ollama_next)\n",
" \n",
" # Call Gemini to generate the next message\n",
" gemini_next = call_gemini() \n",
" print(f\"Gemini:\\n{gemini_next}\\n\")\n",
" gemini_messages.append(gemini_next)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9525600b-082e-417f-9088-c6483a613bf3",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading…
Cancel
Save