10 changed files with 9279 additions and 17 deletions
@ -0,0 +1,467 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 10, |
||||||
|
"id": "ac080a86-0da8-4a38-9f88-ca8b902c7782", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"#from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"#from openai import OpenAI\n", |
||||||
|
"import ollama\n", |
||||||
|
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 11, |
||||||
|
"id": "2a2fdd8c-1333-4a76-9fd5-1fb467c0eaa7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A class to represent a Webpage\n", |
||||||
|
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||||
|
"\n", |
||||||
|
"# Some websites need you to use proper headers when fetching them:\n", |
||||||
|
"headers = {\n", |
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||||
|
"}\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 12, |
||||||
|
"id": "fb4a3de7-196f-4fe7-acb1-55563d8382e6", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Home - Edward Donner\n", |
||||||
|
"Home\n", |
||||||
|
"Connect Four\n", |
||||||
|
"Outsmart\n", |
||||||
|
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", |
||||||
|
"About\n", |
||||||
|
"Posts\n", |
||||||
|
"Well, hi there.\n", |
||||||
|
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", |
||||||
|
"very\n", |
||||||
|
"amateur) and losing myself in\n", |
||||||
|
"Hacker News\n", |
||||||
|
", nodding my head sagely to things I only half understand.\n", |
||||||
|
"I’m the co-founder and CTO of\n", |
||||||
|
"Nebula.io\n", |
||||||
|
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", |
||||||
|
"acquired in 2021\n", |
||||||
|
".\n", |
||||||
|
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", |
||||||
|
"patented\n", |
||||||
|
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", |
||||||
|
"Connect\n", |
||||||
|
"with me for more!\n", |
||||||
|
"January 23, 2025\n", |
||||||
|
"LLM Workshop – Hands-on with Agents – resources\n", |
||||||
|
"December 21, 2024\n", |
||||||
|
"Welcome, SuperDataScientists!\n", |
||||||
|
"November 13, 2024\n", |
||||||
|
"Mastering AI and LLM Engineering – Resources\n", |
||||||
|
"October 16, 2024\n", |
||||||
|
"From Software Engineer to AI Data Scientist – resources\n", |
||||||
|
"Navigation\n", |
||||||
|
"Home\n", |
||||||
|
"Connect Four\n", |
||||||
|
"Outsmart\n", |
||||||
|
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", |
||||||
|
"About\n", |
||||||
|
"Posts\n", |
||||||
|
"Get in touch\n", |
||||||
|
"ed [at] edwarddonner [dot] com\n", |
||||||
|
"www.edwarddonner.com\n", |
||||||
|
"Follow me\n", |
||||||
|
"LinkedIn\n", |
||||||
|
"Twitter\n", |
||||||
|
"Facebook\n", |
||||||
|
"Subscribe to newsletter\n", |
||||||
|
"Type your email…\n", |
||||||
|
"Subscribe\n" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Let's try one out. Change the website and add print statements to follow along.\n", |
||||||
|
"\n", |
||||||
|
"ed = Website(\"https://edwarddonner.com\")\n", |
||||||
|
"print(ed.title)\n", |
||||||
|
"print(ed.text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 13, |
||||||
|
"id": "e6edeaca-9a4d-4d07-aac9-c464976ce519", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||||
|
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||||
|
"Respond in markdown.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 14, |
||||||
|
"id": "574ad4e2-8000-41b9-b732-10c9aacce14e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_for(website):\n", |
||||||
|
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||||
|
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||||
|
"please provide a short summary of this website in markdown. \\\n", |
||||||
|
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||||
|
" user_prompt += website.text\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 15, |
||||||
|
"id": "a8da1d50-9bbe-4ea0-ab89-d4e4909eedbb", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"You are looking at a website titled Home - Edward Donner\n", |
||||||
|
"The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n", |
||||||
|
"\n", |
||||||
|
"Home\n", |
||||||
|
"Connect Four\n", |
||||||
|
"Outsmart\n", |
||||||
|
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", |
||||||
|
"About\n", |
||||||
|
"Posts\n", |
||||||
|
"Well, hi there.\n", |
||||||
|
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", |
||||||
|
"very\n", |
||||||
|
"amateur) and losing myself in\n", |
||||||
|
"Hacker News\n", |
||||||
|
", nodding my head sagely to things I only half understand.\n", |
||||||
|
"I’m the co-founder and CTO of\n", |
||||||
|
"Nebula.io\n", |
||||||
|
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", |
||||||
|
"acquired in 2021\n", |
||||||
|
".\n", |
||||||
|
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", |
||||||
|
"patented\n", |
||||||
|
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", |
||||||
|
"Connect\n", |
||||||
|
"with me for more!\n", |
||||||
|
"January 23, 2025\n", |
||||||
|
"LLM Workshop – Hands-on with Agents – resources\n", |
||||||
|
"December 21, 2024\n", |
||||||
|
"Welcome, SuperDataScientists!\n", |
||||||
|
"November 13, 2024\n", |
||||||
|
"Mastering AI and LLM Engineering – Resources\n", |
||||||
|
"October 16, 2024\n", |
||||||
|
"From Software Engineer to AI Data Scientist – resources\n", |
||||||
|
"Navigation\n", |
||||||
|
"Home\n", |
||||||
|
"Connect Four\n", |
||||||
|
"Outsmart\n", |
||||||
|
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", |
||||||
|
"About\n", |
||||||
|
"Posts\n", |
||||||
|
"Get in touch\n", |
||||||
|
"ed [at] edwarddonner [dot] com\n", |
||||||
|
"www.edwarddonner.com\n", |
||||||
|
"Follow me\n", |
||||||
|
"LinkedIn\n", |
||||||
|
"Twitter\n", |
||||||
|
"Facebook\n", |
||||||
|
"Subscribe to newsletter\n", |
||||||
|
"Type your email…\n", |
||||||
|
"Subscribe\n" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"print(user_prompt_for(ed))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 16, |
||||||
|
"id": "4bda2a4b-dace-406e-bd90-3250d04dc664", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# See how this function creates exactly the format above\n", |
||||||
|
"\n", |
||||||
|
"def messages_for(website):\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 17, |
||||||
|
"id": "c5f6bd98-3ea8-450d-97f4-346ec7304c42", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/plain": [ |
||||||
|
"[{'role': 'system',\n", |
||||||
|
" 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n", |
||||||
|
" {'role': 'user',\n", |
||||||
|
" 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nConnect Four\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nJanuary 23, 2025\\nLLM Workshop – Hands-on with Agents – resources\\nDecember 21, 2024\\nWelcome, SuperDataScientists!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nNavigation\\nHome\\nConnect Four\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]" |
||||||
|
] |
||||||
|
}, |
||||||
|
"execution_count": 17, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "execute_result" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Try this out, and then try for a few more websites\n", |
||||||
|
"\n", |
||||||
|
"messages_for(ed)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 19, |
||||||
|
"id": "e3e1a92a-32e4-4390-8327-77c1f08ba6d9", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"MODEL = \"llama3.2\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 20, |
||||||
|
"id": "a2c7f58c-1a81-41a7-83b8-aed9fdd15c08", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# And now: call the OpenAI API. You will get very familiar with this!\n", |
||||||
|
"\n", |
||||||
|
"def summarize(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" response = ollama.chat(\n", |
||||||
|
" model = MODEL,\n", |
||||||
|
" messages = messages_for(website)\n", |
||||||
|
" )\n", |
||||||
|
" return response['message']['content']" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 21, |
||||||
|
"id": "df110fc8-cbc8-4f76-a393-cdea602a6401", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/plain": [ |
||||||
|
"'**Summary**\\n================\\n\\n* Edward Donner is the founder and CTO of Nebula.io, a company applying AI to help people discover their potential.\\n* He has co-founded and worked with various AI startups, including untapt, which was acquired in 2021.\\n\\n**News/Announcements**\\n--------------------\\n\\n* **December 21, 2024**: LLM Workshop – Hands-on with Agents - resources\\n* **November 13, 2024**: Welcome, SuperDataScientists!\\n* **October 16, 2024**: From Software Engineer to AI Data Scientist – resources\\n* **January 23, 2025**: LLM Workshop (upcoming event)\\n\\n**No navigation content has been included in this summary.**'" |
||||||
|
] |
||||||
|
}, |
||||||
|
"execution_count": 21, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "execute_result" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"summarize(\"https://edwarddonner.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 22, |
||||||
|
"id": "190de7a5-005a-44c5-a10c-c3c2946fbf26", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function to display this nicely in the Jupyter output, using markdown\n", |
||||||
|
"\n", |
||||||
|
"def display_summary(url):\n", |
||||||
|
" summary = summarize(url)\n", |
||||||
|
" display(Markdown(summary))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 23, |
||||||
|
"id": "d4c743ec-2027-4b87-91aa-67d3125b6f64", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"### Website Summary\n", |
||||||
|
"\n", |
||||||
|
"#### Overview\n", |
||||||
|
"The website is owned by Edward Donner, the co-founder and CTO of Nebula.io, a company applying AI to help people discover their potential. The website appears to be a personal blog or portfolio site.\n", |
||||||
|
"\n", |
||||||
|
"#### Content Highlights\n", |
||||||
|
"\n", |
||||||
|
"* **News and Announcements**\n", |
||||||
|
"\t+ Upcoming LLM Workshop on January 23, 2025\n", |
||||||
|
"\t+ Previous workshops: December 21, 2024 (LLM Workshop – Hands-on with Agents), November 13, 2024 (Welcome, SuperDataScientists!), October 16, 2024 (Mastering AI and LLM Engineering)\n", |
||||||
|
"* **Personal Interests**\n", |
||||||
|
"\t+ DJing and amateur electronic music production\n", |
||||||
|
"\t+ Hacker News enthusiast\n", |
||||||
|
"* **Professional Background**\n", |
||||||
|
"\t+ Co-founder and CTO of Nebula.io\n", |
||||||
|
"\t+ Previously founder and CEO of AI startup untapt (acquired in 2021)\n", |
||||||
|
"\n", |
||||||
|
"#### Links\n", |
||||||
|
"\n", |
||||||
|
"* LinkedIn\n", |
||||||
|
"* Twitter\n", |
||||||
|
"* Facebook\n", |
||||||
|
"* Newsletter subscription" |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://edwarddonner.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 24, |
||||||
|
"id": "f877b47b-50ff-4717-a42d-09dbd912c134", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"This appears to be the front page of CNN.com, the website for the American cable news channel CNN (Cable News Network). The page is constantly updated with breaking news stories, videos, and analysis on various topics including politics, business, entertainment, sports, and more.\n", |
||||||
|
"\n", |
||||||
|
"Here are some notable features and sections on this page:\n", |
||||||
|
"\n", |
||||||
|
"1. **Headlines**: A section featuring the top news headlines from around the world.\n", |
||||||
|
"2. **Videos**: A collection of short videos related to current events, interviews with newsmakers, and analysis segments.\n", |
||||||
|
"3. **News**: A section that aggregates breaking news stories from various parts of the world.\n", |
||||||
|
"4. **Politics**: A section dedicated to US politics, including presidential elections, legislative updates, and opinion pieces.\n", |
||||||
|
"5. **Business**: A section covering business news, markets, and economic trends.\n", |
||||||
|
"6. **Entertainment**: A section featuring entertainment news, reviews, and analysis on movies, TV shows, music, and celebrities.\n", |
||||||
|
"7. **Sports**: A section dedicated to sports news, scores, and updates on various professional leagues.\n", |
||||||
|
"8. **Features**: A section with in-depth articles, interviews, and profiles on a wide range of topics.\n", |
||||||
|
"9. **Opinion**: A section featuring opinion pieces from CNN's editorial team and other writers on current events and issues.\n", |
||||||
|
"\n", |
||||||
|
"This is just one example of the many ways that CNN presents news to its audience." |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://cnn.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 25, |
||||||
|
"id": "62ed7fdc-8ff5-4bc4-ae98-81fe22a48ccc", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"# Website Summary\n", |
||||||
|
"### Title: Just a Moment...\n", |
||||||
|
"\n", |
||||||
|
"**Overview**\n", |
||||||
|
"The website appears to be under maintenance, requiring users to enable JavaScript and cookies to proceed. No content is displayed until these conditions are met.\n", |
||||||
|
"\n", |
||||||
|
"### News/Announcements (none found)\n", |
||||||
|
"Unfortunately, there are no news or announcements on this website at the time of analysis." |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://anthropic.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "142362ce-77a9-418b-a65d-83e0adb636f1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,499 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 4, |
||||||
|
"id": "dcaac8eb-b65c-42c5-abf6-efd9152d5873", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from IPython.display import Markdown, display, update_display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import ollama\n", |
||||||
|
"# imports\n", |
||||||
|
"# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"import json\n", |
||||||
|
"from typing import List\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display, update_display\n", |
||||||
|
"from openai import OpenAI" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 5, |
||||||
|
"id": "ddbba5c3-d7aa-45b4-9fc3-0ecc300a6b62", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# constants\n", |
||||||
|
"\n", |
||||||
|
"MODEL_GPT = 'gpt-4o-mini'\n", |
||||||
|
"MODEL_LLAMA = 'llama3.2'" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 6, |
||||||
|
"id": "da61c277-e7df-48ed-9acc-a44af7539eee", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"API key looks good so far\n" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Initialize and constants\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", |
||||||
|
" print(\"API key looks good so far\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
||||||
|
" \n", |
||||||
|
"MODEL = 'gpt-4o-mini'\n", |
||||||
|
"openai = OpenAI()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 7, |
||||||
|
"id": "b64764ad-1ec2-4788-98a1-69b7275fef92", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# here is the question; type over this to ask something new\n", |
||||||
|
"\n", |
||||||
|
"question = \"\"\"\n", |
||||||
|
"Please explain what this code does and why:\n", |
||||||
|
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 8, |
||||||
|
"id": "ce33cd8c-f2a8-411b-a478-0788b7e2af80", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# prompts\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"You are a helpful technical tutor who answers questions about python code, software engineering, data science and LLMs\"\n", |
||||||
|
"user_prompt = \"Please give a detailed explanation to the following question: \" + question" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 9, |
||||||
|
"id": "76605197-5df4-4893-b245-10256cb377a7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# messages\n", |
||||||
|
"\n", |
||||||
|
"messages=[\n", |
||||||
|
" {\"role\":\"system\",\"content\":system_prompt},\n", |
||||||
|
" {\"role\":\"user\",\"content\":user_prompt}\n", |
||||||
|
"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 10, |
||||||
|
"id": "a9e3577d-f73b-43eb-b3b3-5d089358fb90", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"The provided code snippet is a Python expression that uses the `yield from` statement along with a set comprehension to generate a sequence of authors from a collection of books. Let’s break down the components of this code to understand what it does and why it may be structured this way.\n", |
||||||
|
"\n", |
||||||
|
"### Breakdown of the Code\n", |
||||||
|
"\n", |
||||||
|
"1. **Set Comprehension**: \n", |
||||||
|
" python\n", |
||||||
|
" {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||||
|
" \n", |
||||||
|
" This part of the code is a set comprehension. A set is a collection of unique elements in Python. The expression iterates over a collection named `books`, which we can assume is likely a list or another iterable structure containing dictionaries (since the code uses `book.get(...)`).\n", |
||||||
|
"\n", |
||||||
|
" - Inside the comprehension, it iterates over each `book` in `books`.\n", |
||||||
|
" - The expression `book.get(\"author\")` tries to retrieve the value associated with the key \"author\" from each `book` dictionary.\n", |
||||||
|
" - The condition `if book.get(\"author\")` ensures that only books with a non-None, non-empty author value are included in the resulting set. If a book does not have an author or if the author is falsy (like `None` or an empty string), it is excluded.\n", |
||||||
|
"\n", |
||||||
|
" The result is a set containing unique author names from the provided `books` collection.\n", |
||||||
|
"\n", |
||||||
|
"2. **Yield from**: \n", |
||||||
|
" python\n", |
||||||
|
" yield from ...\n", |
||||||
|
" \n", |
||||||
|
" The `yield from` statement is used inside a generator function to yield all values from an iterable (in this case, the set created by the comprehension). When a generator function is called, it returns a generator object without starting execution immediately. The execution begins when the generator is iterated over, and each time a value is yielded, the generator can pause its state.\n", |
||||||
|
"\n", |
||||||
|
" By using `yield from`, the generator will produce each author value sequentially. If the set comprehension yields a set of authors, `yield from` will iterate over that set and yield each author back to whoever is consuming the generator.\n", |
||||||
|
"\n", |
||||||
|
"### Why Use This Structure?\n", |
||||||
|
"\n", |
||||||
|
"1. **Functionality**: The main purpose of this code is to obtain and yield unique authors from a set of book items. The set comprehension ensures that there are no duplicate authors in the result.\n", |
||||||
|
"\n", |
||||||
|
"2. **Generator Behavior**: Using `yield from` makes the code concise and allows for a lazy evaluation of the authors. This is efficient, especially if the list of books is large, as authors are only generated when requested.\n", |
||||||
|
"\n", |
||||||
|
"3. **Set for Uniqueness**: Using a set directly means duplicate author names are automatically filtered out, which can be useful if `books` potentially contains multiple entries with the same author.\n", |
||||||
|
"\n", |
||||||
|
"### Final Thoughts\n", |
||||||
|
"\n", |
||||||
|
"Assuming this code is encapsulated inside a generator function, when you call this generator, it will produce a sequence of unique authors from the provided `books`. The use of both yield and set comprehension here is effective for cases where uniqueness of elements is desired, and where memory efficiency is a priority due to the potential size of data. \n", |
||||||
|
"\n", |
||||||
|
"Here's how you might see this in context:\n", |
||||||
|
"\n", |
||||||
|
"python\n", |
||||||
|
"def unique_authors(books):\n", |
||||||
|
" yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"In this context, `unique_authors` will yield each unique author found in `books`, and you can use it like so:\n", |
||||||
|
"\n", |
||||||
|
"python\n", |
||||||
|
"books = [\n", |
||||||
|
" {\"title\": \"Book 1\", \"author\": \"Author A\"},\n", |
||||||
|
" {\"title\": \"Book 2\", \"author\": \"Author B\"},\n", |
||||||
|
" {\"title\": \"Book 3\", \"author\": \"Author A\"},\n", |
||||||
|
" {\"title\": \"Book 4\"} # No author\n", |
||||||
|
"]\n", |
||||||
|
"\n", |
||||||
|
"for author in unique_authors(books):\n", |
||||||
|
" print(author)\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"This would output:\n", |
||||||
|
"\n", |
||||||
|
"Author A\n", |
||||||
|
"Author B\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"With the use of a generator, you can efficiently handle potentially large datasets without needing to store the entire result set in memory at once." |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Get gpt-4o-mini to answer, with streaming\n", |
||||||
|
"\n", |
||||||
|
"stream = openai.chat.completions.create(model=MODEL_GPT, messages=messages,stream=True)\n", |
||||||
|
" \n", |
||||||
|
"response = \"\"\n", |
||||||
|
"display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||||
|
"for chunk in stream:\n", |
||||||
|
" response += chunk.choices[0].delta.content or ''\n", |
||||||
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||||
|
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 11, |
||||||
|
"id": "e92800fc-dcfd-4334-a61b-d53a325c9d25", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"Let's break down this line of code.\n", |
||||||
|
"\n", |
||||||
|
"**What is this code doing?**\n", |
||||||
|
"\n", |
||||||
|
"This code is using a combination of Python features to extract a list of authors from a collection of books, while avoiding certain issues with iteration.\n", |
||||||
|
"\n", |
||||||
|
"Here's what's happening:\n", |
||||||
|
"\n", |
||||||
|
"1. `yield from`: This keyword is used in Python 3.3 and later versions. It's short for \"yield from\" or \"pass through,\" which means that it sends each item yielded by the inner generator expression to the outer generator.\n", |
||||||
|
"2. `{book.get(\"author\") for book in books if book.get(\"author\")}`: This is a generator expression, also known as a \"list comprehension with generators.\" It's equivalent to writing a regular list comprehension but uses generators instead of lists.\n", |
||||||
|
"\n", |
||||||
|
"Here's what this part does:\n", |
||||||
|
"\n", |
||||||
|
"* `for book in books`: Iterates over the `books` collection.\n", |
||||||
|
"* `if book.get(\"author\")`: Filters out any book that doesn't have an \"author\" key. If a book is missing this key, it will be skipped in the iteration.\n", |
||||||
|
"* `book.get(\"author\")`: Retrieves the value associated with the \"author\" key from each book dictionary.\n", |
||||||
|
"\n", |
||||||
|
"So, putting it all together, `yield from {book.get(\"author\") for book in books if book.get(\"author\")}`:\n", |
||||||
|
"\n", |
||||||
|
"1. Iterates over each book in the `books` collection.\n", |
||||||
|
"2. Skips any book without an \"author\" key.\n", |
||||||
|
"3. Yields (i.e., sends) the author's name from each remaining book dictionary.\n", |
||||||
|
"\n", |
||||||
|
"**Why is this code necessary?**\n", |
||||||
|
"\n", |
||||||
|
"In Python, when you use a list comprehension or a for loop with a generator expression, it creates a temporary container (a list or iterator object) that stores the results of the expression. This can lead to memory issues if the collection being iterated over is very large.\n", |
||||||
|
"\n", |
||||||
|
"By using `yield from` and a generator expression, this code avoids creating an intermediate list or iterator object. Instead, it directly yields each author's name one by one, which is more memory-efficient for large collections.\n", |
||||||
|
"\n", |
||||||
|
"This technique is commonly used when working with large datasets, as it helps reduce memory usage and prevents potential out-of-memory errors.\n", |
||||||
|
"\n", |
||||||
|
"**Example Use Case**\n", |
||||||
|
"\n", |
||||||
|
"Suppose you have a collection of book dictionaries, like this:\n", |
||||||
|
"```python\n", |
||||||
|
"books = [\n", |
||||||
|
" {\"title\": \"Book 1\", \"author\": \"Author A\"},\n", |
||||||
|
" {\"title\": \"Book 2\", \"author\": \"Author B\"},\n", |
||||||
|
" {\"title\": \"Book 3\"}, # missing author key\n", |
||||||
|
" {\"title\": \"Book 4\", \"author\": \"Author D\"}\n", |
||||||
|
"]\n", |
||||||
|
"```\n", |
||||||
|
"Using the given code, you can extract a list of authors like this:\n", |
||||||
|
"```python\n", |
||||||
|
"authors = yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||||
|
"print(authors) # Output: [\"Author A\", \"Author B\", \"Author D\"]\n", |
||||||
|
"```\n", |
||||||
|
"This will print out the names of all authors, without creating an intermediate list or iterator object." |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Get Llama 3.2 to answer\n", |
||||||
|
"\n", |
||||||
|
"response = ollama.chat(model=MODEL_LLAMA, messages=messages)\n", |
||||||
|
"reply = response['message']['content']\n", |
||||||
|
"display(Markdown(reply))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 12, |
||||||
|
"id": "2623d9de-34dc-4f97-9726-5e95d26197e8", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def input_question(question):\n", |
||||||
|
" system_prompt = \"You are a helpful technical tutor who answers questions about python code, software engineering, data science and LLMs\"\n", |
||||||
|
" user_prompt = f\" Please give a detailed explanation for this question: {question}\"\n", |
||||||
|
"\n", |
||||||
|
" # messages\n", |
||||||
|
"\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\":\"system\",\"content\":system_prompt},\n", |
||||||
|
" {\"role\":\"user\",\"content\":user_prompt}\n", |
||||||
|
" ]\n", |
||||||
|
"\n", |
||||||
|
" print(\"Fetching details of GPT-4o\")\n", |
||||||
|
" # Get gpt-4o-mini to answer, with streaming\n", |
||||||
|
"\n", |
||||||
|
" stream = openai.chat.completions.create(model=MODEL_GPT, messages=messages,stream=True)\n", |
||||||
|
" \n", |
||||||
|
" response_GPT = \"\"\n", |
||||||
|
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" response_GPT += chunk.choices[0].delta.content or ''\n", |
||||||
|
" response_GPT = response_GPT.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||||
|
" update_display(Markdown(response_GPT), display_id=display_handle.display_id)\n", |
||||||
|
"\n", |
||||||
|
" print(\"Fetching response from Llama 3.2...\")\n", |
||||||
|
" response_llama = ollama.chat(model=MODEL_LLAMA, messages=messages)\n", |
||||||
|
" reply_llama = response_llama['message']['content']\n", |
||||||
|
" display(Markdown(reply_llama))\n", |
||||||
|
" " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 13, |
||||||
|
"id": "f52b1677-eddc-46a9-bd91-508c2fab2037", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdin", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Please enter your question: what are LLms and its applications\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Fetching details of GPT-4o\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"Certainly! Let's break down the concept of LLMs, which stands for \"Large Language Models.\"\n", |
||||||
|
"\n", |
||||||
|
"### What are LLMs?\n", |
||||||
|
"\n", |
||||||
|
"Large Language Models are a class of artificial intelligence (AI) systems that use machine learning techniques to understand and generate human-like text based on the data they were trained on. These models are built primarily using neural networks, particularly a type called transformers, which have become the state-of-the-art architecture for various natural language processing (NLP) tasks.\n", |
||||||
|
"\n", |
||||||
|
"#### Key Features of LLMs:\n", |
||||||
|
"\n", |
||||||
|
"1. **Scale**: LLMs are named for their size—the \"large\" in LLM refers to the number of parameters (weights) in the model, which can range from millions to hundreds of billions. Training these models requires vast amounts of text data and significant computational resources.\n", |
||||||
|
"\n", |
||||||
|
"2. **Contextual Understanding**: LLMs can capture contextual relationships between words and phrases, enabling them to understand nuances in language better than smaller models.\n", |
||||||
|
"\n", |
||||||
|
"3. **Pre-training and Fine-tuning**:\n", |
||||||
|
" - **Pre-training**: LLMs are initially trained on a large corpus of text in an unsupervised manner, allowing them to learn grammar, facts, and the structure of the language without specific task instructions.\n", |
||||||
|
" - **Fine-tuning**: After pre-training, these models can be fine-tuned on specific datasets to optimize their performance for particular applications.\n", |
||||||
|
"\n", |
||||||
|
"4. **Generative Capabilities**: Many LLMs can generate coherent and contextually relevant text, making them useful for a wide array of applications beyond mere text classification or sentiment analysis.\n", |
||||||
|
"\n", |
||||||
|
"### Applications of LLMs:\n", |
||||||
|
"\n", |
||||||
|
"LLMs have a broad range of applications across various domains. Here are some of the most notable ones:\n", |
||||||
|
"\n", |
||||||
|
"1. **Chatbots and Virtual Assistants**: LLMs can power conversational agents that assist users in customer service, FAQs, and more, providing human-like responses.\n", |
||||||
|
"\n", |
||||||
|
"2. **Content Creation**: They can generate articles, blogs, stories, and even poetry. This functionality can be utilized in marketing, social media, and entertainment industries.\n", |
||||||
|
"\n", |
||||||
|
"3. **Translation Services**: LLMs can help in translating text between languages, providing not just word-for-word translations but contextually relevant interpretations.\n", |
||||||
|
"\n", |
||||||
|
"4. **Text Summarization**: They can condense longer text documents into summaries, extracting key points while maintaining coherence.\n", |
||||||
|
"\n", |
||||||
|
"5. **Sentiment Analysis**: Businesses use LLMs for analyzing customer feedback, reviews, or social media sentiment to drive decision-making.\n", |
||||||
|
"\n", |
||||||
|
"6. **Code Generation**: Some LLMs are trained on programming languages and can assist in code completions or generate functional code snippets, aiding software developers.\n", |
||||||
|
"\n", |
||||||
|
"7. **Education**: LLMs can assist in generating educational material, answering student queries, grading essays, and providing personalized learning experiences.\n", |
||||||
|
"\n", |
||||||
|
"8. **Healthcare**: They can process medical literature, assist in diagnosing by analyzing patient records, and help healthcare providers communicate more effectively with patients.\n", |
||||||
|
"\n", |
||||||
|
"9. **Legal Services**: LLMs can assist lawyers by summarizing cases, drafting documents, and conducting legal research by analyzing vast amounts of legal texts.\n", |
||||||
|
"\n", |
||||||
|
"10. **Creative Writing**: In art and entertainment, LLMs can collaborate with writers and artists to brainstorm ideas, generate scripts, or create interactive storytelling experiences.\n", |
||||||
|
"\n", |
||||||
|
"### Challenges and Considerations:\n", |
||||||
|
"\n", |
||||||
|
"While LLMs have significant potential, they also pose challenges:\n", |
||||||
|
"\n", |
||||||
|
"- **Bias**: LLMs can inadvertently perpetuate biases present in their training data, leading to unfair or prejudiced outcomes.\n", |
||||||
|
" \n", |
||||||
|
"- **Misinformation**: They might generate text that sounds plausible but is factually incorrect, necessitating rigorous fact-checking.\n", |
||||||
|
"\n", |
||||||
|
"- **Ethical Concerns**: The use of LLMs can raise questions about authorship, content ownership, and the implications of AI in society.\n", |
||||||
|
"\n", |
||||||
|
"- **Resource Intensive**: Training and running LLMs require substantial computational resources, raising environmental and accessibility concerns.\n", |
||||||
|
"\n", |
||||||
|
"In summary, LLMs represent a transformative advancement in AI and natural language processing, offering a range of applications that can significantly enhance human capabilities across various fields. As the technology continues to evolve, addressing its challenges will be crucial for its responsible and effective use." |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
}, |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Fetching response from Llama 3.2...\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"**What is an LLM (Large Language Model)?**\n", |
||||||
|
"\n", |
||||||
|
"A Large Language Model (LLM) is a type of artificial intelligence (AI) model that is designed to process and understand human language. These models are trained on vast amounts of text data, which enables them to learn patterns, relationships, and nuances of language.\n", |
||||||
|
"\n", |
||||||
|
"LLMs are typically based on deep learning architectures, such as transformer models, which use self-attention mechanisms to weigh the importance of different words or tokens in a sentence. This allows LLMs to capture contextual information and generate human-like language outputs.\n", |
||||||
|
"\n", |
||||||
|
"**How do LLMs work?**\n", |
||||||
|
"\n", |
||||||
|
"The training process for an LLM involves the following steps:\n", |
||||||
|
"\n", |
||||||
|
"1. **Data collection**: A massive corpus of text data is collected, which can come from various sources such as books, articles, conversations, or even social media posts.\n", |
||||||
|
"2. **Preprocessing**: The text data is preprocessed to remove irrelevant information, such as punctuation, special characters, and stop words (common words like \"the\", \"and\", etc.).\n", |
||||||
|
"3. **Model training**: The LLM is trained on the preprocessed data using a supervised or unsupervised learning approach. The goal is to predict the next word in a sequence, given the context of the previous words.\n", |
||||||
|
"4. **Model evaluation**: The trained model is evaluated on a validation set to assess its performance and identify areas for improvement.\n", |
||||||
|
"\n", |
||||||
|
"**Applications of LLMs**\n", |
||||||
|
"\n", |
||||||
|
"LLMs have numerous applications across various industries:\n", |
||||||
|
"\n", |
||||||
|
"1. **Language Translation**: LLMs can be used for machine translation, where they can translate text from one language to another with high accuracy.\n", |
||||||
|
"2. **Text Summarization**: LLMs can summarize long documents or articles into concise summaries, highlighting the main points and key information.\n", |
||||||
|
"3. **Question Answering**: LLMs can answer questions based on the input text, providing relevant and accurate responses.\n", |
||||||
|
"4. **Sentiment Analysis**: LLMs can analyze text to determine its sentiment, emotional tone, or polarity (positive/negative).\n", |
||||||
|
"5. **Chatbots and Virtual Assistants**: LLMs can power chatbots and virtual assistants, enabling them to understand user queries and respond accordingly.\n", |
||||||
|
"6. **Content Generation**: LLMs can generate human-like content, such as news articles, product descriptions, or even entire books.\n", |
||||||
|
"7. **Natural Language Processing (NLP)**: LLMs are used in various NLP tasks, including text classification, entity recognition, and language modeling.\n", |
||||||
|
"8. **Speech Recognition**: LLMs can improve speech recognition systems by better understanding the context and nuances of human speech.\n", |
||||||
|
"\n", |
||||||
|
"**Real-world examples of LLMs**\n", |
||||||
|
"\n", |
||||||
|
"Some notable examples of LLMs include:\n", |
||||||
|
"\n", |
||||||
|
"1. **Google's BERT**: A pre-trained language model that has achieved state-of-the-art results in various NLP tasks.\n", |
||||||
|
"2. **Facebook's M** : A language model used for chatbots and virtual assistants on Facebook platforms.\n", |
||||||
|
"3. **Microsoft's Turing-NLG**: A natural language generation (NLG) model developed by Microsoft to generate human-like content.\n", |
||||||
|
"\n", |
||||||
|
"In conclusion, LLMs are powerful AI models that have revolutionized the field of NLP. Their applications are vast and diverse, ranging from simple text processing tasks to complex content generation and language translation.\n", |
||||||
|
"\n", |
||||||
|
"Do you have any specific questions about LLMs or their applications?" |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"your_question = input(\"Please enter your question: \")\n", |
||||||
|
"input_question(your_question)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4d39ac93-5536-422e-a21b-9eb89eb816cd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -0,0 +1,951 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Welcome to Week 2!\n", |
||||||
|
"\n", |
||||||
|
"## Frontier Model APIs\n", |
||||||
|
"\n", |
||||||
|
"In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", |
||||||
|
"\n", |
||||||
|
"Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "2b268b6e-0ba4-461e-af86-74a41f4d681f", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#900;\">Important Note - Please read me</h2>\n", |
||||||
|
" <span style=\"color:#900;\">I'm continually improving these labs, adding more examples and exercises.\n", |
||||||
|
" At the start of each week, it's worth checking you have the latest code.<br/>\n", |
||||||
|
" First do a <a href=\"https://chatgpt.com/share/6734e705-3270-8012-a074-421661af6ba9\">git pull and merge your changes as needed</a>. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!<br/><br/>\n", |
||||||
|
" After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:<br/>\n", |
||||||
|
" <code>conda env update --f environment.yml</code><br/>\n", |
||||||
|
" Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):<br/>\n", |
||||||
|
" <code>pip install -r requirements.txt</code>\n", |
||||||
|
" <br/>Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", |
||||||
|
" </span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>\n", |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#f71;\">Reminder about the resources page</h2>\n", |
||||||
|
" <span style=\"color:#f71;\">Here's a link to resources for the course. This includes links to all the slides.<br/>\n", |
||||||
|
" <a href=\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\">https://edwarddonner.com/2024/11/13/llm-engineering-resources/</a><br/>\n", |
||||||
|
" Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", |
||||||
|
" </span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "85cfe275-4705-4d30-abea-643fbddf1db0", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Setting up your keys\n", |
||||||
|
"\n", |
||||||
|
"If you haven't done so already, you could now create API keys for Anthropic and Google in addition to OpenAI.\n", |
||||||
|
"\n", |
||||||
|
"**Please note:** if you'd prefer to avoid extra API costs, feel free to skip setting up Anthopic and Google! You can see me do it, and focus on OpenAI for the course. You could also substitute Anthropic and/or Google for Ollama, using the exercise you did in week 1.\n", |
||||||
|
"\n", |
||||||
|
"For OpenAI, visit https://openai.com/api/ \n", |
||||||
|
"For Anthropic, visit https://console.anthropic.com/ \n", |
||||||
|
"For Google, visit https://ai.google.dev/gemini-api \n", |
||||||
|
"\n", |
||||||
|
"### Also - adding DeepSeek if you wish\n", |
||||||
|
"\n", |
||||||
|
"Optionally, if you'd like to also use DeepSeek, create an account [here](https://platform.deepseek.com/), create a key [here](https://platform.deepseek.com/api_keys) and top up with at least the minimum $2 [here](https://platform.deepseek.com/top_up).\n", |
||||||
|
"\n", |
||||||
|
"### Adding API keys to your .env file\n", |
||||||
|
"\n", |
||||||
|
"When you get your API keys, you need to set them as environment variables by adding them to your `.env` file.\n", |
||||||
|
"\n", |
||||||
|
"```\n", |
||||||
|
"OPENAI_API_KEY=xxxx\n", |
||||||
|
"ANTHROPIC_API_KEY=xxxx\n", |
||||||
|
"GOOGLE_API_KEY=xxxx\n", |
||||||
|
"DEEPSEEK_API_KEY=xxxx\n", |
||||||
|
"```\n", |
||||||
|
"\n", |
||||||
|
"Afterwards, you may need to restart the Jupyter Lab Kernel (the Python process that sits behind this notebook) via the Kernel menu, and then rerun the cells from the top." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import anthropic\n", |
||||||
|
"from IPython.display import Markdown, display, update_display" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# import for google\n", |
||||||
|
"# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n", |
||||||
|
"# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n", |
||||||
|
"\n", |
||||||
|
"import google.generativeai" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"# Print the key prefixes to help with any debugging\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||||
|
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"if openai_api_key:\n", |
||||||
|
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"OpenAI API Key not set\")\n", |
||||||
|
" \n", |
||||||
|
"if anthropic_api_key:\n", |
||||||
|
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"Anthropic API Key not set\")\n", |
||||||
|
"\n", |
||||||
|
"if google_api_key:\n", |
||||||
|
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"Google API Key not set\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Connect to OpenAI, Anthropic\n", |
||||||
|
"\n", |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"\n", |
||||||
|
"claude = anthropic.Anthropic()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# This is the set up code for Gemini\n", |
||||||
|
"# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", |
||||||
|
"\n", |
||||||
|
"google.generativeai.configure()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "42f77b59-2fb1-462a-b90d-78994e4cef33", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Asking LLMs to tell a joke\n", |
||||||
|
"\n", |
||||||
|
"It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n", |
||||||
|
"Later we will be putting LLMs to better use!\n", |
||||||
|
"\n", |
||||||
|
"### What information is included in the API\n", |
||||||
|
"\n", |
||||||
|
"Typically we'll pass to the API:\n", |
||||||
|
"- The name of the model that should be used\n", |
||||||
|
"- A system message that gives overall context for the role the LLM is playing\n", |
||||||
|
"- A user message that provides the actual prompt\n", |
||||||
|
"\n", |
||||||
|
"There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "378a0296-59a2-45c6-82eb-941344d3eeff", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"system_message = \"You are an assistant that is great at telling jokes\"\n", |
||||||
|
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"prompts = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_message},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# GPT-3.5-Turbo\n", |
||||||
|
"\n", |
||||||
|
"completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n", |
||||||
|
"print(completion.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# GPT-4o-mini\n", |
||||||
|
"# Temperature setting controls creativity\n", |
||||||
|
"\n", |
||||||
|
"completion = openai.chat.completions.create(\n", |
||||||
|
" model='gpt-4o-mini',\n", |
||||||
|
" messages=prompts,\n", |
||||||
|
" temperature=0.7\n", |
||||||
|
")\n", |
||||||
|
"print(completion.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# GPT-4o\n", |
||||||
|
"\n", |
||||||
|
"completion = openai.chat.completions.create(\n", |
||||||
|
" model='gpt-4o',\n", |
||||||
|
" messages=prompts,\n", |
||||||
|
" temperature=0.4\n", |
||||||
|
")\n", |
||||||
|
"print(completion.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Claude 3.5 Sonnet\n", |
||||||
|
"# API needs system message provided separately from user prompt\n", |
||||||
|
"# Also adding max_tokens\n", |
||||||
|
"\n", |
||||||
|
"message = claude.messages.create(\n", |
||||||
|
" model=\"claude-3-5-sonnet-latest\",\n", |
||||||
|
" max_tokens=200,\n", |
||||||
|
" temperature=0.7,\n", |
||||||
|
" system=system_message,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt},\n", |
||||||
|
" ],\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"print(message.content[0].text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Claude 3.5 Sonnet again\n", |
||||||
|
"# Now let's add in streaming back results\n", |
||||||
|
"# If the streaming looks strange, then please see the note below this cell!\n", |
||||||
|
"\n", |
||||||
|
"result = claude.messages.stream(\n", |
||||||
|
" model=\"claude-3-5-sonnet-latest\",\n", |
||||||
|
" max_tokens=200,\n", |
||||||
|
" temperature=0.7,\n", |
||||||
|
" system=system_message,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt},\n", |
||||||
|
" ],\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"with result as stream:\n", |
||||||
|
" for text in stream.text_stream:\n", |
||||||
|
" print(text, end=\"\", flush=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "dd1e17bc-cd46-4c23-b639-0c7b748e6c5a", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## A rare problem with Claude streaming on some Windows boxes\n", |
||||||
|
"\n", |
||||||
|
"2 students have noticed a strange thing happening with Claude's streaming into Jupyter Lab's output -- it sometimes seems to swallow up parts of the response.\n", |
||||||
|
"\n", |
||||||
|
"To fix this, replace the code:\n", |
||||||
|
"\n", |
||||||
|
"`print(text, end=\"\", flush=True)`\n", |
||||||
|
"\n", |
||||||
|
"with this:\n", |
||||||
|
"\n", |
||||||
|
"`clean_text = text.replace(\"\\n\", \" \").replace(\"\\r\", \" \")` \n", |
||||||
|
"`print(clean_text, end=\"\", flush=True)`\n", |
||||||
|
"\n", |
||||||
|
"And it should work fine!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# The API for Gemini has a slightly different structure.\n", |
||||||
|
"# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", |
||||||
|
"# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", |
||||||
|
"\n", |
||||||
|
"gemini = google.generativeai.GenerativeModel(\n", |
||||||
|
" model_name='gemini-2.0-flash-exp',\n", |
||||||
|
" system_instruction=system_message\n", |
||||||
|
")\n", |
||||||
|
"response = gemini.generate_content(user_prompt)\n", |
||||||
|
"print(response.text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "49009a30-037d-41c8-b874-127f61c4aa3a", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# As an alternative way to use Gemini that bypasses Google's python API library,\n", |
||||||
|
"# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", |
||||||
|
"\n", |
||||||
|
"gemini_via_openai_client = OpenAI(\n", |
||||||
|
" api_key=google_api_key, \n", |
||||||
|
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"response = gemini_via_openai_client.chat.completions.create(\n", |
||||||
|
" model=\"gemini-2.0-flash-exp\",\n", |
||||||
|
" messages=prompts\n", |
||||||
|
")\n", |
||||||
|
"print(response.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "33f70c88-7ca9-470b-ad55-d93a57dcc0ab", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## (Optional) Trying out the DeepSeek model\n", |
||||||
|
"\n", |
||||||
|
"### Let's ask DeepSeek a really hard question - both the Chat and the Reasoner model" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3d0019fb-f6a8-45cb-962b-ef8bf7070d4d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Optionally if you wish to try DeekSeek, you can also use the OpenAI client library\n", |
||||||
|
"\n", |
||||||
|
"deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"if deepseek_api_key:\n", |
||||||
|
" print(f\"DeepSeek API Key exists and begins {deepseek_api_key[:3]}\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"DeepSeek API Key not set - please skip to the next section if you don't wish to try the DeepSeek API\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c72c871e-68d6-4668-9c27-96d52b77b867", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Using DeepSeek Chat\n", |
||||||
|
"\n", |
||||||
|
"deepseek_via_openai_client = OpenAI(\n", |
||||||
|
" api_key=deepseek_api_key, \n", |
||||||
|
" base_url=\"https://api.deepseek.com\"\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"response = deepseek_via_openai_client.chat.completions.create(\n", |
||||||
|
" model=\"deepseek-chat\",\n", |
||||||
|
" messages=prompts,\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"print(response.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "50b6e70f-700a-46cf-942f-659101ffeceb", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"challenge = [{\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"How many words are there in your answer to this prompt\"}]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "66d1151c-2015-4e37-80c8-16bc16367cfe", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Using DeepSeek Chat with a harder question! And streaming results\n", |
||||||
|
"\n", |
||||||
|
"stream = deepseek_via_openai_client.chat.completions.create(\n", |
||||||
|
" model=\"deepseek-chat\",\n", |
||||||
|
" messages=challenge,\n", |
||||||
|
" stream=True\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"reply = \"\"\n", |
||||||
|
"display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||||
|
"for chunk in stream:\n", |
||||||
|
" reply += chunk.choices[0].delta.content or ''\n", |
||||||
|
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", |
||||||
|
" update_display(Markdown(reply), display_id=display_handle.display_id)\n", |
||||||
|
"\n", |
||||||
|
"print(\"Number of words:\", len(reply.split(\" \")))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "43a93f7d-9300-48cc-8c1a-ee67380db495", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Using DeepSeek Reasoner - this may hit an error if DeepSeek is busy\n", |
||||||
|
"# It's over-subscribed (as of 28-Jan-2025) but should come back online soon!\n", |
||||||
|
"# If this fails, come back to this in a few days..\n", |
||||||
|
"\n", |
||||||
|
"response = deepseek_via_openai_client.chat.completions.create(\n", |
||||||
|
" model=\"deepseek-reasoner\",\n", |
||||||
|
" messages=challenge\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"reasoning_content = response.choices[0].message.reasoning_content\n", |
||||||
|
"content = response.choices[0].message.content\n", |
||||||
|
"\n", |
||||||
|
"print(reasoning_content)\n", |
||||||
|
"print(content)\n", |
||||||
|
"print(\"Number of words:\", len(content.split(\" \")))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "c09e6b5c-6816-4cd3-a5cd-a20e4171b1a0", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Back to OpenAI with a serious question" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# To be serious! GPT-4o-mini with the original question\n", |
||||||
|
"\n", |
||||||
|
"prompts = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"You are a helpful assistant that responds in Markdown\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution? Please respond in Markdown.\"}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Have it stream back results in markdown\n", |
||||||
|
"\n", |
||||||
|
"stream = openai.chat.completions.create(\n", |
||||||
|
" model='gpt-4o',\n", |
||||||
|
" messages=prompts,\n", |
||||||
|
" temperature=0.7,\n", |
||||||
|
" stream=True\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"reply = \"\"\n", |
||||||
|
"display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||||
|
"for chunk in stream:\n", |
||||||
|
" reply += chunk.choices[0].delta.content or ''\n", |
||||||
|
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", |
||||||
|
" update_display(Markdown(reply), display_id=display_handle.display_id)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## And now for some fun - an adversarial conversation between Chatbots..\n", |
||||||
|
"\n", |
||||||
|
"You're already familar with prompts being organized into lists like:\n", |
||||||
|
"\n", |
||||||
|
"```\n", |
||||||
|
"[\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"system message here\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"user prompt here\"}\n", |
||||||
|
"]\n", |
||||||
|
"```\n", |
||||||
|
"\n", |
||||||
|
"In fact this structure can be used to reflect a longer conversation history:\n", |
||||||
|
"\n", |
||||||
|
"```\n", |
||||||
|
"[\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"system message here\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", |
||||||
|
" {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", |
||||||
|
"]\n", |
||||||
|
"```\n", |
||||||
|
"\n", |
||||||
|
"And we can use this approach to engage in a longer interaction with history." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", |
||||||
|
"# We're using cheap versions of models so the costs will be minimal\n", |
||||||
|
"\n", |
||||||
|
"gpt_model = \"gpt-4o-mini\"\n", |
||||||
|
"claude_model = \"claude-3-haiku-20240307\"\n", |
||||||
|
"\n", |
||||||
|
"gpt_system = \"You are a chatbot who is very argumentative; \\\n", |
||||||
|
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", |
||||||
|
"\n", |
||||||
|
"claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", |
||||||
|
"everything the other person says, or find common ground. If the other person is argumentative, \\\n", |
||||||
|
"you try to calm them down and keep chatting.\"\n", |
||||||
|
"\n", |
||||||
|
"gpt_messages = [\"Hi there\"]\n", |
||||||
|
"claude_messages = [\"Hi\"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def call_gpt():\n", |
||||||
|
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
||||||
|
" for gpt, claude in zip(gpt_messages, claude_messages):\n", |
||||||
|
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": claude})\n", |
||||||
|
" completion = openai.chat.completions.create(\n", |
||||||
|
" model=gpt_model,\n", |
||||||
|
" messages=messages\n", |
||||||
|
" )\n", |
||||||
|
" return completion.choices[0].message.content" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"call_gpt()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def call_claude():\n", |
||||||
|
" messages = []\n", |
||||||
|
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gpt})\n", |
||||||
|
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||||
|
" message = claude.messages.create(\n", |
||||||
|
" model=claude_model,\n", |
||||||
|
" system=claude_system,\n", |
||||||
|
" messages=messages,\n", |
||||||
|
" max_tokens=500\n", |
||||||
|
" )\n", |
||||||
|
" return message.content[0].text" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "01395200-8ae9-41f8-9a04-701624d3fd26", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"call_claude()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"call_gpt()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"gpt_messages = [\"Hi there\"]\n", |
||||||
|
"claude_messages = [\"Hi\"]\n", |
||||||
|
"\n", |
||||||
|
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", |
||||||
|
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", |
||||||
|
"\n", |
||||||
|
"for i in range(5):\n", |
||||||
|
" gpt_next = call_gpt()\n", |
||||||
|
" print(f\"GPT:\\n{gpt_next}\\n\")\n", |
||||||
|
" gpt_messages.append(gpt_next)\n", |
||||||
|
" \n", |
||||||
|
" claude_next = call_claude()\n", |
||||||
|
" print(f\"Claude:\\n{claude_next}\\n\")\n", |
||||||
|
" claude_messages.append(claude_next)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "1d10e705-db48-4290-9dc8-9efdb4e31323", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#900;\">Before you continue</h2>\n", |
||||||
|
" <span style=\"color:#900;\">\n", |
||||||
|
" Be sure you understand how the conversation above is working, and in particular how the <code>messages</code> list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?<br/>\n", |
||||||
|
" </span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# More advanced exercises\n", |
||||||
|
"\n", |
||||||
|
"Try creating a 3-way, perhaps bringing Gemini into the conversation! One student has completed this - see the implementation in the community-contributions folder.\n", |
||||||
|
"\n", |
||||||
|
"Try doing this yourself before you look at the solutions. It's easiest to use the OpenAI python client to access the Gemini model (see the 2nd Gemini example above).\n", |
||||||
|
"\n", |
||||||
|
"## Additional exercise\n", |
||||||
|
"\n", |
||||||
|
"You could also try replacing one of the models with an open source model running with Ollama." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "446c81e3-b67e-4cd9-8113-bc3092b93063", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#181;\">Business relevance</h2>\n", |
||||||
|
" <span style=\"color:#181;\">This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.</span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c23224f6-7008-44ed-a57f-718975f4e291", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# exercise between openai, ollama, gemini\n", |
||||||
|
"\n", |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"\n", |
||||||
|
"# This is for Gemini Google\n", |
||||||
|
"gemini_via_openai = OpenAI(base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\", api_key=google_api_key)\n", |
||||||
|
"\n", |
||||||
|
"# This is for local Llama\n", |
||||||
|
"llama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", |
||||||
|
"\n", |
||||||
|
"gpt_model = \"gpt-4o-mini\"\n", |
||||||
|
"gemini_model = \"gemini-2.0-flash-exp\"\n", |
||||||
|
"ollama_model = \"llama3.2\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "495b0854-f686-4afb-8fde-0d68442f8caf", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"gpt_system = \"You are a chatbot who is very argumentative; \\\n", |
||||||
|
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", |
||||||
|
"\n", |
||||||
|
"gemini_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", |
||||||
|
"everything the other person says, or find common ground. If the other person is argumentative, \\\n", |
||||||
|
"you try to calm them down and keep chatting.\"\n", |
||||||
|
"\n", |
||||||
|
"ollama_system = \"You are an extremely knowledgeable and know-it-all counselor chatbot. You try to help resolve disagreements, \\\n", |
||||||
|
"and if a person is either too argumentative or too polite, you cannot help but to use quotes from famous psychologists to teach \\\n", |
||||||
|
"your students to be kind yet maintain boundaries.\"\n", |
||||||
|
"\n", |
||||||
|
"gemini_messages = [\"Hey everyone, thoughts on AGI?\"]\n", |
||||||
|
"gpt_messages = [\"AGI? You mean Always Getting Irritated by AI hype?\"]\n", |
||||||
|
"ollama_messages = [\"AGI is the next stage of evolution in human cognition, blended with machines.\"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a2b3533e-5377-40d9-be8c-23742ca9ca2b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def call_openai():\n", |
||||||
|
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
||||||
|
"\n", |
||||||
|
" for op,gem,llam in zip(gpt_messages,gemini_messages,ollama_messages):\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gem})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": llam})\n", |
||||||
|
" messages.append({\"role\": \"assistant\", \"content\": op})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": ollama_messages[-1]})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gemini_messages[-1]})\n", |
||||||
|
"\n", |
||||||
|
" completion = openai.chat.completions.create(\n", |
||||||
|
" model=gpt_model,\n", |
||||||
|
" messages=messages\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" return completion.choices[0].message.content" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d07b8796-e18f-453b-b616-f6a3523d6a96", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"call_openai()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "73a38fc2-aa4a-425a-89c7-b0da807ba70b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def call_gemini():\n", |
||||||
|
" messages = [{\"role\": \"system\", \"content\": gemini_system}]\n", |
||||||
|
" \n", |
||||||
|
" for gem, op, llam in zip(gemini_messages, gpt_messages, ollama_messages):\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": op})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": llam})\n", |
||||||
|
" messages.append({\"role\": \"assistant\", \"content\": gem})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": ollama_messages[-1]})\n", |
||||||
|
"\n", |
||||||
|
" completion = gemini_via_openai.chat.completions.create(\n", |
||||||
|
" model=gemini_model,\n", |
||||||
|
" messages=messages\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" return completion.choices[0].message.content\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "79607d52-0803-4013-9308-91112802a8e3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"call_gemini()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "1fc2afe3-e99c-41cc-a088-08eba3e23c98", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def call_ollama():\n", |
||||||
|
" messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", |
||||||
|
" \n", |
||||||
|
" for llam, op, gem in zip(ollama_messages, gpt_messages, gemini_messages):\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": op})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gem})\n", |
||||||
|
" messages.append({\"role\": \"assistant\", \"content\": llam})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||||
|
" messages.append({\"role\": \"user\", \"content\": gemini_messages[-1]})\n", |
||||||
|
"\n", |
||||||
|
" completion = llama_via_openai.chat.completions.create(\n", |
||||||
|
" model=ollama_model,\n", |
||||||
|
" messages=messages\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" return completion.choices[0].message.content\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "faae150d-b859-4a13-a725-5b88e7dad46b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"call_ollama()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2ce659d1-6a27-407e-9492-1ba736e3f06e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"\n", |
||||||
|
"gemini_messages = [\"Hey everyone, thoughts on AGI?\"]\n", |
||||||
|
"gpt_messages = [\"AGI? You mean Always Getting Irritated by AI hype?\"]\n", |
||||||
|
"ollama_messages = [\"AGI is the next stage of evolution in human cognition, blended with machines.\"]\n", |
||||||
|
"\n", |
||||||
|
"print(f\"\\n🌌 Gemini:\\n{gemini_messages[0]}\\n\")\n", |
||||||
|
"print(f\"\\n🧠 OpenAI:\\n{gpt_messages[0]}\\n\")\n", |
||||||
|
"print(f\"\\n🦙 Ollama:\\n{ollama_messages[0]}\\n\")\n", |
||||||
|
"\n", |
||||||
|
"print(\"Starting Multi-Agent Chat...\\n\")\n", |
||||||
|
"for i in range(5):\n", |
||||||
|
" gemini_reply = call_gemini()\n", |
||||||
|
" print(f\"\\n🌌 Gemini:\\n{gemini_reply}\")\n", |
||||||
|
" gemini_messages.append(gemini_reply)\n", |
||||||
|
" \n", |
||||||
|
" openai_reply = call_openai()\n", |
||||||
|
" print(f\"\\n🧠 OpenAI:\\n{openai_reply}\")\n", |
||||||
|
" gpt_messages.append(openai_reply)\n", |
||||||
|
" \n", |
||||||
|
" ollama_reply = call_ollama()\n", |
||||||
|
" print(f\"\\n🦙 Ollama:\\n{ollama_reply}\")\n", |
||||||
|
" ollama_messages.append(ollama_reply)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2cc0b127-5b36-4110-947a-dc9a4a1a8db0", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
File diff suppressed because one or more lines are too long
Loading…
Reference in new issue