From 606b11682cf1d9294ca65baa3940db764871c5bf Mon Sep 17 00:00:00 2001 From: SyedNaqi Hussain Date: Sat, 22 Feb 2025 14:47:39 -0600 Subject: [PATCH 01/43] Use llm to generate funny tweet on image alt-text --- week1/day1.ipynb | 103 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 82 insertions(+), 21 deletions(-) diff --git a/week1/day1.ipynb b/week1/day1.ipynb index bb8e5fa..af7aac4 100644 --- a/week1/day1.ipynb +++ b/week1/day1.ipynb @@ -159,8 +159,8 @@ "metadata": {}, "outputs": [], "source": [ - "openai = OpenAI()\n", - "\n", + "import httpx\n", + "openai = OpenAI(http_client=httpx.Client(verify=False))\n", "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" ] @@ -217,7 +217,8 @@ " Create this Website object from the given url using the BeautifulSoup library\n", " \"\"\"\n", " self.url = url\n", - " response = requests.get(url, headers=headers)\n", + " requests.packages.urllib3.disable_warnings()\n", + " response = requests.get(url, headers=headers, verify=False)\n", " soup = BeautifulSoup(response.content, 'html.parser')\n", " self.title = soup.title.string if soup.title else \"No title found\"\n", " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", @@ -233,8 +234,7 @@ "outputs": [], "source": [ "# Let's try one out. Change the website and add print statements to follow along.\n", - "\n", - "ed = Website(\"https://edwarddonner.com\")\n", + "ed = Website(\"http://edwarddonner.com\")\n", "print(ed.title)\n", "print(ed.text)" ] @@ -434,12 +434,24 @@ "cell_type": "code", "execution_count": null, "id": "3018853a-445f-41ff-9560-d925d1774b2f", - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ "display_summary(\"https://edwarddonner.com\")" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8a34db6-9c2f-4f5e-95b4-62090d7b591b", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://openai.com\")" + ] + }, { "cell_type": "markdown", "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", @@ -470,7 +482,9 @@ "cell_type": "code", "execution_count": null, "id": "75e9fd40-b354-4341-991e-863ef2e59db7", - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ "display_summary(\"https://anthropic.com\")" @@ -510,30 +524,77 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", "metadata": {}, - "outputs": [], - "source": [ + "outputs": [ + { + "data": { + "text/markdown": [ + "Here's a markdown layout featuring tables for each image with a funny tweet alongside it:\n", + "\n", + "```markdown\n", + "| Image | Funny Tweet |\n", + "|------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------|\n", + "| ![Vintage Motorcycle](https://images.pexels.com/photos/30770767/pexels-photo-30770767/free-photo-of-classic-motorcycle-in-kerala-countryside.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When you want to feel like a rebel, but your bike is still in the shop. ๐Ÿ๏ธ๐Ÿ˜‚\" |\n", + "| ![Flock of Birds](https://images.pexels.com/photos/30810205/pexels-photo-30810205/free-photo-of-flock-of-birds-in-flight-against-clear-sky.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When the squad is finally ready to leave the party but you can't find your keys. ๐Ÿ•Š๏ธ๐Ÿคฃ\" |\n", + "| ![Playful Seals](https://images.pexels.com/photos/30824250/pexels-photo-30824250/free-photo-of-playful-seals-on-rocky-san-diego-shore.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When youโ€™re trying to chill at the beach, but your buddy wonโ€™t stop splashing you. ๐Ÿฆญ๐Ÿ’ฆ\" |\n", + "```\n", + "\n", + "Feel free to use or modify the layout and the tweets as you see fit!" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# A small exercise to feed the llm with image alt text and return a funny tweet.\n", + "\n", "# Step 1: Create your prompts\n", + "import json\n", + "system_prompt = \"You are a meme lord. You like tweeting funny and hilarious comments on images. To understand the image you would be given alt text on the image.\"\n", + "class website:\n", + " def __init__(self,url):\n", + " self.url = url\n", + " requests.packages.urllib3.disable_warnings()\n", + " response = requests.get(url, headers=headers, verify=False)\n", + " html_content = response.content\n", + " soup = BeautifulSoup(html_content, 'html.parser')\n", + " image_tags = soup.find_all('img')\n", + " self.image_urls = [img['src'] for img in image_tags if img.get('src')]\n", + " self.image_alt = [img['alt'] if img.get('alt') else \"\" for img in image_tags]\n", + "\n", + " # Restricting to 3 images only.\n", + " if self.image_urls:\n", + " self.images = {self.image_urls[i]:self.image_alt[i] for i in range(4)}\n", + " else:\n", + " self.images = {}\n", + " \n", "\n", - "system_prompt = \"something here\"\n", - "user_prompt = \"\"\"\n", - " Lots of text\n", - " Can be pasted here\n", - "\"\"\"\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"Following are images with their alt-text:\"\n", + " user_prompt += json.dumps(website.images)\n", + " user_prompt += \"\\n Give me a markdown layout with tables for each image where each image is given its own row, with the image itself on the left and funny tweet on the right.\"\n", + " return user_prompt\n", "\n", - "# Step 2: Make the messages list\n", "\n", - "messages = [] # fill this in\n", + "# Step 2: Make the messages list\n", + "page = website(\"https://www.pexels.com/\")\n", + "user_prompt = user_prompt_for(page)\n", + "messages = [{\"role\":\"system\",\"content\":system_prompt},{\"role\":\"user\", \"content\":user_prompt}] # fill this in\n", "\n", "# Step 3: Call OpenAI\n", - "\n", - "response =\n", + "response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages\n", + " )\n", "\n", "# Step 4: print the result\n", - "\n", - "print(" + "display(Markdown((response.choices[0].message.content)))" ] }, { From 23dafd3ef3c63e2ebc6f23e985d62f5bd679226c Mon Sep 17 00:00:00 2001 From: SyedNaqi Hussain Date: Sat, 22 Feb 2025 14:50:41 -0600 Subject: [PATCH 02/43] Rename file --- week1/tweet-generate-from-alt-text.ipynb | 655 +++++++++++++++++++++++ 1 file changed, 655 insertions(+) create mode 100644 week1/tweet-generate-from-alt-text.ipynb diff --git a/week1/tweet-generate-from-alt-text.ipynb b/week1/tweet-generate-from-alt-text.ipynb new file mode 100644 index 0000000..af7aac4 --- /dev/null +++ b/week1/tweet-generate-from-alt-text.ipynb @@ -0,0 +1,655 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# YOUR FIRST LAB\n", + "## Please read this. This is super-critical to get you prepared; there's no fluff here!\n", + "\n", + "## Your first Frontier LLM Project\n", + "\n", + "Let's build a useful LLM solution - in a matter of minutes.\n", + "\n", + "By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", + "\n", + "Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", + "\n", + "Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", + "\n", + "## If you're new to Jupyter Lab\n", + "\n", + "Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", + "\n", + "I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", + "\n", + "## If you're new to the Command Line\n", + "\n", + "Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n", + "Linux people, something tells me you could teach _me_ a thing or two about the command line!\n", + "\n", + "## If you'd prefer to work in IDEs\n", + "\n", + "If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n", + "If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n", + "\n", + "## If you'd like to brush up your Python\n", + "\n", + "I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n", + "`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n", + "\n", + "## I am here to help\n", + "\n", + "If you have any problems at all, please do reach out. \n", + "I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n", + "And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done ๐Ÿ˜‚ \n", + "\n", + "## More troubleshooting\n", + "\n", + "Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", + "\n", + "## If this is old hat!\n", + "\n", + "If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Please read - important note

\n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Treat these labs as a resource

\n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business value of these exercises

\n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "markdown", + "id": "6900b2a8-6384-4316-8aaa-5e519fca4254", + "metadata": {}, + "source": [ + "# Connecting to OpenAI\n", + "\n", + "The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", + "\n", + "## Troubleshooting if you have problems:\n", + "\n", + "Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", + "\n", + "If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", + "\n", + "Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", + "\n", + "Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", + "metadata": {}, + "outputs": [], + "source": [ + "import httpx\n", + "openai = OpenAI(http_client=httpx.Client(verify=False))\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" + ] + }, + { + "cell_type": "markdown", + "id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", + "metadata": {}, + "source": [ + "# Let's make a quick call to a Frontier model to get started, as a preview!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", + "\n", + "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "2aa190e5-cb31-456a-96cc-db109919cd78", + "metadata": {}, + "source": [ + "## OK onwards with our first project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5e793b2-6775-426a-a139-4848291d0463", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " requests.packages.urllib3.disable_warnings()\n", + " response = requests.get(url, headers=headers, verify=False)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "ed = Website(\"http://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "markdown", + "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", + "metadata": {}, + "source": [ + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT4o have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", + "metadata": {}, + "outputs": [], + "source": [ + "print(user_prompt_for(ed))" + ] + }, + { + "cell_type": "markdown", + "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "The API from OpenAI expects to receive messages in a particular structure.\n", + "Many of the other APIs share this structure:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", + " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", + "]\n", + "\n", + "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with system and user messages:\n", + "\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", + "metadata": {}, + "source": [ + "## And now let's build useful messages for GPT-4o-mini, using a function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", + "metadata": {}, + "outputs": [], + "source": [ + "# Try this out, and then try for a few more websites\n", + "\n", + "messages_for(ed)" + ] + }, + { + "cell_type": "markdown", + "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", + "metadata": {}, + "source": [ + "## Time to bring it together - the API for OpenAI is very simple!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. You will get very familiar with this!\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", + "metadata": {}, + "outputs": [], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the Jupyter output, using markdown\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3018853a-445f-41ff-9560-d925d1774b2f", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "display_summary(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8a34db6-9c2f-4f5e-95b4-62090d7b591b", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://openai.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", + "metadata": {}, + "source": [ + "# Let's try more websites\n", + "\n", + "Note that this will only work on websites that can be scraped using this simplistic approach.\n", + "\n", + "Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", + "\n", + "Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", + "\n", + "But many websites will work just fine!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d83403-a24c-44b5-84ac-961449b4008f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://cnn.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75e9fd40-b354-4341-991e-863ef2e59db7", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "display_summary(\"https://anthropic.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "c951be1a-7f1b-448f-af1f-845978e47e2c", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business applications

\n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Before you continue - now try yourself

\n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Here's a markdown layout featuring tables for each image with a funny tweet alongside it:\n", + "\n", + "```markdown\n", + "| Image | Funny Tweet |\n", + "|------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------|\n", + "| ![Vintage Motorcycle](https://images.pexels.com/photos/30770767/pexels-photo-30770767/free-photo-of-classic-motorcycle-in-kerala-countryside.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When you want to feel like a rebel, but your bike is still in the shop. ๐Ÿ๏ธ๐Ÿ˜‚\" |\n", + "| ![Flock of Birds](https://images.pexels.com/photos/30810205/pexels-photo-30810205/free-photo-of-flock-of-birds-in-flight-against-clear-sky.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When the squad is finally ready to leave the party but you can't find your keys. ๐Ÿ•Š๏ธ๐Ÿคฃ\" |\n", + "| ![Playful Seals](https://images.pexels.com/photos/30824250/pexels-photo-30824250/free-photo-of-playful-seals-on-rocky-san-diego-shore.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When youโ€™re trying to chill at the beach, but your buddy wonโ€™t stop splashing you. ๐Ÿฆญ๐Ÿ’ฆ\" |\n", + "```\n", + "\n", + "Feel free to use or modify the layout and the tweets as you see fit!" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# A small exercise to feed the llm with image alt text and return a funny tweet.\n", + "\n", + "# Step 1: Create your prompts\n", + "import json\n", + "system_prompt = \"You are a meme lord. You like tweeting funny and hilarious comments on images. To understand the image you would be given alt text on the image.\"\n", + "class website:\n", + " def __init__(self,url):\n", + " self.url = url\n", + " requests.packages.urllib3.disable_warnings()\n", + " response = requests.get(url, headers=headers, verify=False)\n", + " html_content = response.content\n", + " soup = BeautifulSoup(html_content, 'html.parser')\n", + " image_tags = soup.find_all('img')\n", + " self.image_urls = [img['src'] for img in image_tags if img.get('src')]\n", + " self.image_alt = [img['alt'] if img.get('alt') else \"\" for img in image_tags]\n", + "\n", + " # Restricting to 3 images only.\n", + " if self.image_urls:\n", + " self.images = {self.image_urls[i]:self.image_alt[i] for i in range(4)}\n", + " else:\n", + " self.images = {}\n", + " \n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"Following are images with their alt-text:\"\n", + " user_prompt += json.dumps(website.images)\n", + " user_prompt += \"\\n Give me a markdown layout with tables for each image where each image is given its own row, with the image itself on the left and funny tweet on the right.\"\n", + " return user_prompt\n", + "\n", + "\n", + "# Step 2: Make the messages list\n", + "page = website(\"https://www.pexels.com/\")\n", + "user_prompt = user_prompt_for(page)\n", + "messages = [{\"role\":\"system\",\"content\":system_prompt},{\"role\":\"user\", \"content\":user_prompt}] # fill this in\n", + "\n", + "# Step 3: Call OpenAI\n", + "response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages\n", + " )\n", + "\n", + "# Step 4: print the result\n", + "display(Markdown((response.choices[0].message.content)))" + ] + }, + { + "cell_type": "markdown", + "id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", + "metadata": {}, + "source": [ + "## An extra exercise for those who enjoy web scraping\n", + "\n", + "You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" + ] + }, + { + "cell_type": "markdown", + "id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", + "metadata": {}, + "source": [ + "# Sharing your code\n", + "\n", + "I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", + "\n", + "If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", + "\n", + "Here are good instructions courtesy of an AI friend: \n", + "https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4484fcf-8b39-4c3f-9674-37970ed71988", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 67a296c6e9191f2e2f4e865b9ad6b8f6ae02d2f2 Mon Sep 17 00:00:00 2001 From: SyedNaqi Hussain Date: Sat, 22 Feb 2025 19:18:24 -0600 Subject: [PATCH 03/43] Output clear. --- week1/tweet-generate-from-alt-text.ipynb | 27 ++---------------------- 1 file changed, 2 insertions(+), 25 deletions(-) diff --git a/week1/tweet-generate-from-alt-text.ipynb b/week1/tweet-generate-from-alt-text.ipynb index af7aac4..9b7ba91 100644 --- a/week1/tweet-generate-from-alt-text.ipynb +++ b/week1/tweet-generate-from-alt-text.ipynb @@ -524,33 +524,10 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "Here's a markdown layout featuring tables for each image with a funny tweet alongside it:\n", - "\n", - "```markdown\n", - "| Image | Funny Tweet |\n", - "|------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------|\n", - "| ![Vintage Motorcycle](https://images.pexels.com/photos/30770767/pexels-photo-30770767/free-photo-of-classic-motorcycle-in-kerala-countryside.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When you want to feel like a rebel, but your bike is still in the shop. ๐Ÿ๏ธ๐Ÿ˜‚\" |\n", - "| ![Flock of Birds](https://images.pexels.com/photos/30810205/pexels-photo-30810205/free-photo-of-flock-of-birds-in-flight-against-clear-sky.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When the squad is finally ready to leave the party but you can't find your keys. ๐Ÿ•Š๏ธ๐Ÿคฃ\" |\n", - "| ![Playful Seals](https://images.pexels.com/photos/30824250/pexels-photo-30824250/free-photo-of-playful-seals-on-rocky-san-diego-shore.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When youโ€™re trying to chill at the beach, but your buddy wonโ€™t stop splashing you. ๐Ÿฆญ๐Ÿ’ฆ\" |\n", - "```\n", - "\n", - "Feel free to use or modify the layout and the tweets as you see fit!" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# A small exercise to feed the llm with image alt text and return a funny tweet.\n", "\n", From ed6c0d140a1899376f98c8ea37207348d961c74a Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Wed, 26 Feb 2025 08:48:41 +0500 Subject: [PATCH 04/43] Added llama 3.2 - claude 3.5 sonnet chatbot interaction to community-contributions --- .../day1-ollama-claude.ipynb | 218 ++++++++++++++++++ 1 file changed, 218 insertions(+) create mode 100644 week2/community-contributions/day1-ollama-claude.ipynb diff --git a/week2/community-contributions/day1-ollama-claude.ipynb b/week2/community-contributions/day1-ollama-claude.ipynb new file mode 100644 index 0000000..f620759 --- /dev/null +++ b/week2/community-contributions/day1-ollama-claude.ipynb @@ -0,0 +1,218 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e063b35e-5598-4084-b255-89956bfedaac", + "metadata": {}, + "source": [ + "### Models an interaction between LLama 3.2 and Claude 3.5 Haiku" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4f534359-cdb4-4441-aa66-d6700fa4d6a5", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "import anthropic\n", + "import ollama" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3bdff240-9118-4061-9369-585c4d4ce0a7", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff110b3f-3986-4fd8-a0b1-fd4b51133a8d", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to Anthropic\n", + "\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6e596c6-6307-49c1-a29f-5c4e88f8d34d", + "metadata": {}, + "outputs": [], + "source": [ + "# Download the llama3.2:1b model for local execution.\n", + "!ollama pull llama3.2:1b" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "633b6892-6d04-40cb-8b61-196fc754b00c", + "metadata": {}, + "outputs": [], + "source": [ + "# Define models\n", + "CLAUDE_MODEL = \"claude-3-5-haiku-latest\"\n", + "LLAMA_MODEL = \"llama3.2:1b\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a699a809-e3d3-4392-94bd-e2f80a5aec60", + "metadata": {}, + "outputs": [], + "source": [ + "claude_system = \"You are a chatbot designed as a study tutor for undergraduate students. \\\n", + "You explain information and key-technical terms related to the subject in a succint yet \\\n", + "comprehensive manner. You may use tables, formatting and other visuals to help create \\\n", + "'cheat-sheets' of sorts.\"\n", + "\n", + "llama_system = \"You are a chatbot designed to ask questions about different topics related to \\\n", + "computer vision. You are meant to simulate a student, not teacher. Act as if you have no \\\n", + "prior knowledge\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bdb049d8-130b-42dd-aaab-29c09e3e2347", + "metadata": {}, + "outputs": [], + "source": [ + "llama_messages = [\"Hi\"]\n", + "claude_messages = [\"Hello\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c158f31c-5e8b-48a4-9980-6b280393800b", + "metadata": {}, + "outputs": [], + "source": [ + "def call_llama():\n", + " messages = [{\"role\": \"system\", \"content\": llama_system}]\n", + " for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": llama_msg})\n", + " messages.append({\"role\": \"user\", \"content\": claude_msg})\n", + " response = ollama.chat(model=LLAMA_MODEL, messages=messages)\n", + " return response['message']['content']\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d803c5a2-df54-427a-9b80-8e9dd04ee36d", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude():\n", + " messages = []\n", + " for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n", + " messages.append({\"role\": \"user\", \"content\": llama_msg})\n", + " messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n", + " messages.append({\"role\": \"user\", \"content\": llama_messages[-1]})\n", + " message = claude.messages.create(\n", + " model=CLAUDE_MODEL,\n", + " system=claude_system,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a23794bb-0f36-4f91-aa28-24b876203a36", + "metadata": {}, + "outputs": [], + "source": [ + "call_llama()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7f5c3e2f-a1bb-403b-b6b5-944a10d93305", + "metadata": {}, + "outputs": [], + "source": [ + "call_claude()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d6eb874-1c8f-47d8-a9f1-2e0fe197ae83", + "metadata": {}, + "outputs": [], + "source": [ + "llama_messages = [\"Hi\"]\n", + "claude_messages = [\"Hello there, what would you like to learn today?\"]\n", + "\n", + "print(f'Ollama:\\n{ollama_messages[0]}')\n", + "print(f'Claude:\\n{claude_messages[0]}')\n", + "\n", + "for _ in range(5):\n", + " llama_next = call_llama()\n", + " print(f'Llama 3.2:\\n{llama_next}')\n", + " llama_messages.append(llama_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f'Claude 3.5 Haiku:\\n{claude_next}')\n", + " claude_messages.append(claude_next)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d1e651ad-85c8-45c7-ba83-f7c689080d6b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 30e2e2355dde74ad2e4e1b6a272de0a892b3059c Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Wed, 26 Feb 2025 21:23:09 +0500 Subject: [PATCH 05/43] Added advanced brochure generator to community-contributions --- .../brochure-generator-interface.ipynb | 460 ++++++++++++++++++ 1 file changed, 460 insertions(+) create mode 100644 week2/community-contributions/brochure-generator-interface.ipynb diff --git a/week2/community-contributions/brochure-generator-interface.ipynb b/week2/community-contributions/brochure-generator-interface.ipynb new file mode 100644 index 0000000..b7b8d8c --- /dev/null +++ b/week2/community-contributions/brochure-generator-interface.ipynb @@ -0,0 +1,460 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e71d7ff9-c27a-4602-9230-856626b1de07", + "metadata": {}, + "source": [ + "# Company Brochure Generator UI\n", + "Generates a brochure for a company website, after scraping the website and pages linked with that page, based on the provided company URL. \n", + "Enables users to \n", + "- Choose a model type (Llama 3.2, Claude, GPT)-\n", + "- Choose the tone preference\n", + "- Choose the target audience" + ] + }, + { + "cell_type": "markdown", + "id": "de9b59b9-8673-42e7-8849-62fe30f56711", + "metadata": {}, + "source": [ + "#### Imports, Keys, Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "39fd7fed-b215-4037-bd6e-7e1af1b83897", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "import json\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import anthropic\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "0bf24357-1d77-4721-9d5a-f99827b2158c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "1afc12e1-02c1-4394-b589-19cd08d2a8bb", + "metadata": {}, + "outputs": [], + "source": [ + "# Define models\n", + "CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n", + "GPT_MODEL = \"gpt-4o-mini\"" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "d5d79a69-0a39-4ab4-aaf8-bc591bce0536", + "metadata": {}, + "outputs": [], + "source": [ + "# Creating instances\n", + "claude = anthropic.Anthropic()\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "markdown", + "id": "1d3369bc-b751-4f4d-a288-d7d81c384e67", + "metadata": {}, + "source": [ + "#### Web Scraper" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "fafe1074-fbf4-47cc-80dc-34413a447977", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to represent a Website that we have scraped, now with links\n", + " \"\"\"\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = \"\"\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " self.links = [link for link in links if link]\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "41c1f1af-ae20-423b-bf7c-efd7f8c2751b", + "metadata": {}, + "outputs": [], + "source": [ + "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", + "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", + "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + "link_system_prompt += \"You should respond in JSON as in this example:\"\n", + "link_system_prompt += \"\"\"\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "eb537563-e393-47ca-9af2-a8ea7393edd9", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links_user_prompt(website):\n", + " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", + "Do not include Terms of Service, Privacy, email or social media links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"\\n\".join(website.links)\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "033568d2-3f1a-43ac-a288-7a65b4ea86a5", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model=GPT_MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", + " ],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "d8f316ac-f0b1-42d9-88a8-0a61fcb0023d", + "metadata": {}, + "outputs": [], + "source": [ + "def get_all_details(url):\n", + " result = \"Landing page:\\n\"\n", + " result += Website(url).get_contents()\n", + " links = get_links(url)\n", + " print(\"Found links:\", links)\n", + " for link in links[\"links\"]:\n", + " print(f\"Processing {link['url']}...\")\n", + " result += f\"\\n\\n{link['type']}\\n\"\n", + " result += Website(link[\"url\"]).get_contents()\n", + " return result" + ] + }, + { + "cell_type": "markdown", + "id": "016e065a-ac5a-48c0-bc4b-e916e9801384", + "metadata": {}, + "source": [ + "#### System Message" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "ed1c6068-5f4f-47a7-ab97-738dfb94e057", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n", + "and creates a short brochure about the company for prospective customers, investors and recruits. \\\n", + "You are also provided with the tone, and the target audience. Provide an appropriate answer. Respond in markdown.\"" + ] + }, + { + "cell_type": "markdown", + "id": "6d4f594c-927d-440f-8aae-33cfeb9c445c", + "metadata": {}, + "source": [ + "#### LLM Call Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "5b6a0379-3465-4c04-a553-4e4cdb9064b9", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_gpt(prompt,company_name,url):\n", + " messages = [\n", + " {\"role\": \"user\", \"content\": prompt},\n", + " {\"role\":\"system\",\"content\":system_message}\n", + " ]\n", + " stream = openai.chat.completions.create(\n", + " model=GPT_MODEL,\n", + " messages=messages,\n", + " stream=True\n", + " )\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " yield result" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "a2194e1d-4e99-4127-9515-aa9353382bc6", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_claude(prompt):\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=1000,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": prompt},\n", + " ],\n", + " )\n", + " response = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " response += text or \"\"\n", + " yield response" + ] + }, + { + "cell_type": "markdown", + "id": "64adf26c-33b2-4589-8df6-dc5d6da71420", + "metadata": {}, + "source": [ + "#### Brochure Creation" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "8192f39f-508b-4592-a075-767db68672b3", + "metadata": {}, + "outputs": [], + "source": [ + "def get_brochure_user_prompt(company_name, url):\n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", + " user_prompt += get_all_details(url)\n", + " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "8aebfabe-4d51-4ee7-a9d2-5a379e9427cb", + "metadata": {}, + "outputs": [], + "source": [ + "def create_brochure(company_name, url,model,tone,target):\n", + " print('create brochure function called')\n", + " prompt = f\"Please generate a company brochure for {company_name}.\"\n", + " prompt += f\"Use a {tone} tone; and target content at {target}\"\n", + " prompt += get_brochure_user_prompt(company_name,url)\n", + " \n", + " if model == \"GPT\":\n", + " result = stream_gpt(prompt,company_name,url)\n", + " elif model==\"Claude\":\n", + " result = stream_claude(prompt,company_name,url)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " yield from result" + ] + }, + { + "cell_type": "markdown", + "id": "c5f4f97b-c9d0-4d4c-8b02-e6209ba2549c", + "metadata": {}, + "source": [ + "#### Putting it all together : Gradio UI" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "33162303-9b49-46fe-a8e0-0d01be45685b", + "metadata": {}, + "outputs": [], + "source": [ + "force_dark_mode = \"\"\"\n", + "function refresh() {\n", + " const url = new URL(window.location);\n", + " if (url.searchParams.get('__theme') !== 'dark') {\n", + " url.searchParams.set('__theme', 'dark');\n", + " window.location.href = url.href;\n", + " }\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "47ab9a41-cecd-4c21-bd68-4a15966b80c4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7877\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 41, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'about page', 'url': 'https://www.vellum.ai/'}, {'type': 'careers page', 'url': 'https://www.vellum.ai/careers'}]}\n", + "Processing https://www.vellum.ai/...\n", + "Processing https://www.vellum.ai/careers...\n" + ] + } + ], + "source": [ + "gr.Interface(\n", + " fn=create_brochure,\n", + " inputs=[\n", + " gr.Textbox(label='Company Name:'),\n", + " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", + " gr.Dropdown(['GPT','Claude'],label='Select Model:'),\n", + " gr.Dropdown(['Formal','Casual','Persuasive','Informative','Conversational'],label='Select Tone:'),\n", + " gr.Dropdown(['Businesses','General Public','Students','Investors','Customers'],label='Select Target Audience:'),\n", + " ],\n", + " outputs = [gr.Markdown(label='Brochure')],\n", + " flagging_mode = 'never',\n", + " js = force_dark_mode\n", + ").launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2b923b09-6738-450a-9035-2c8d1bb9cae6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 95d7e8de4a95ae3e2c07c3697cf613b224cf1f5d Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Thu, 27 Feb 2025 21:31:45 +0500 Subject: [PATCH 06/43] Added my contributions to community-contributions --- .../day3-programming-tutor.ipynb | 142 ++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 week2/community-contributions/day3-programming-tutor.ipynb diff --git a/week2/community-contributions/day3-programming-tutor.ipynb b/week2/community-contributions/day3-programming-tutor.ipynb new file mode 100644 index 0000000..700a0c9 --- /dev/null +++ b/week2/community-contributions/day3-programming-tutor.ipynb @@ -0,0 +1,142 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d18a61ce-bbd4-491c-ab2e-8b352f9af844", + "metadata": {}, + "source": [ + "### An AI Chatbot that teaches students programming using GPT API" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c658ac85-6087-4a2c-b23f-1b92c17f0db3", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "import anthropic" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46df0488-f874-41e0-a6a4-9a64aa7be53c", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables \n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + " \n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7eadc218-5b10-4174-bf26-575361640524", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7484731-ac84-405a-a688-6e81d139c5ce", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful programming study assistant\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "54e82f5a-993f-4a95-9d9d-caf35dbc4e76", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " print(\"History is:\")\n", + " print(history)\n", + " print(\"And messages is:\")\n", + " print(messages)\n", + "\n", + " stream = openai.chat.completions.create(model='gpt-4o-mini', messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5941ed67-e2a7-41bc-a8a3-079e9f1fdb64", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e8fcfe68-bbf6-4058-acc9-0230c96608c2", + "metadata": {}, + "outputs": [], + "source": [ + "system_message += \"Whenever the user talks about a topic that is not connected to programmming,\\\n", + "nudge them in the right direction by stating that you are here to help with programming. Encourage \\\n", + "the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge \\\n", + "if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone. Do not ignore \\\n", + "their requests, rather politely reject and then redirect them.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "090e7d49-fcbf-4715-b120-8d7aa91d165f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 122620c9b99e551b8b7c4462723fa03ebf8cc0bc Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Fri, 28 Feb 2025 14:29:58 +0500 Subject: [PATCH 07/43] Added my StudyAI Course booking assistant to community-contributions --- .../day 4 - course booking assistant.ipynb | 251 ++++++++++++ .../community-contributions/day3-gemini.ipynb | 2 +- .../day3-gradio-auth.ipynb | 4 +- .../day3-refine-user-query-by-llama.ipynb | 4 +- .../community-contributions/day3.upsell.ipynb | 2 +- ...oking_and_multiple_tools_per_message.ipynb | 373 +----------------- 6 files changed, 268 insertions(+), 368 deletions(-) create mode 100644 week2/community-contributions/day 4 - course booking assistant.ipynb diff --git a/week2/community-contributions/day 4 - course booking assistant.ipynb b/week2/community-contributions/day 4 - course booking assistant.ipynb new file mode 100644 index 0000000..c7a057e --- /dev/null +++ b/week2/community-contributions/day 4 - course booking assistant.ipynb @@ -0,0 +1,251 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5d799d2a-6e58-4a83-b17a-dbbc40efdc39", + "metadata": {}, + "source": [ + "## Project - Course Booking AI Asssistant\n", + "AI Customer Support Bot that \n", + "- Returns Prices\n", + "- Books Tickets\n", + "- Adds Information to Text File" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b1ad9acd-a702-48a3-8ff5-d536bcac8030", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74adab0c-99b3-46cd-a79f-320a3e74138a", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "MODEL = \"gpt-4o-mini\"\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d3240a4-99c1-4c07-acaa-ecbb69ffd2e4", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an Online Course Platform called StudyAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"\n", + "system_message += \"If you are given a partial name, for example 'discrete' instead of 'discrete structures' \\\n", + "ask the user if they meant to say 'discrete structures', and then display the price. The user may also use \\\n", + "acronyms like 'PF' instead of programming fundamentals or 'OOP' to mean 'Object oriented programming'. \\\n", + "Clarify what the user means and then proceed as directed.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9a1b8d5f-f893-477b-8396-ff7d697eb0c3", + "metadata": {}, + "outputs": [], + "source": [ + "course_prices = {\"programming fundamentals\": \"$19\", \"discrete structures\": \"$39\", \"operating systems\": \"$24\", \"object oriented programming\": \"$39\"}\n", + "\n", + "def get_course_price(course):\n", + " print(f\"Tool get_course_price called for {course}\")\n", + " course = course.lower()\n", + " return course_prices.get(course, \"Unknown\")\n", + "\n", + "def enroll_in_course(course):\n", + " print(f'Tool enroll_in_course_ called for {course}')\n", + " course_price = get_course_price(course)\n", + " if course_price != 'Unknown':\n", + " with open('enrolled_courses.txt', 'a') as file: \n", + " file.write(course + \"\\n\")\n", + " return 'Successfully enrolled in course'\n", + " else:\n", + " return 'Enrollment failed, no such course available'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "330d2b94-a8c5-4967-ace7-15d2cd52d7ae", + "metadata": {}, + "outputs": [], + "source": [ + "get_course_price('graph theory')\n", + "get_course_price('discrete structures')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5bb65830-fab8-45a7-bf43-7e52186915a0", + "metadata": {}, + "outputs": [], + "source": [ + "price_function = {\n", + " \"name\": \"get_course_price\",\n", + " \"description\": \"Get the price of a course. Call this whenever you need to know the course price, for example when a customer asks 'How much is a ticket for this course?'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"course\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The course that the customer wants to purchase\",\n", + " },\n", + " },\n", + " \"required\": [\"course\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}\n", + "\n", + "enroll_function = {\n", + " \"name\": \"enroll_in_course\",\n", + " \"description\":\"Get the success status of course enrollment. Call whenever a customer wants to enroll in a course\\\n", + " for example, if they say 'I want to purchase this course' or 'I want to enroll in this course'\",\n", + " \"parameters\":{\n", + " \"type\":\"object\",\n", + " \"properties\":{\n", + " \"course\":{\n", + " \"type\":\"string\",\n", + " \"description\": \"The course that the customer wants to purchase\",\n", + " },\n", + " },\n", + " \"required\": [\"course\"],\n", + " \"additionalProperties\": False\n", + " } \n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08af86b9-3aaa-4b6b-bf7c-ee668ba1cbfe", + "metadata": {}, + "outputs": [], + "source": [ + "tools = [\n", + " {\"type\":\"function\",\"function\":price_function},\n", + " {\"type\":\"function\",\"function\":enroll_function}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "482efc34-ff1f-4146-9570-58b4d59c3b2f", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message,history):\n", + " messages = [{\"role\":\"system\",\"content\":system_message}] + history + [{\"role\":\"user\",\"content\":message}]\n", + " response = openai.chat.completions.create(model=MODEL,messages=messages,tools=tools)\n", + "\n", + " if response.choices[0].finish_reason == \"tool_calls\":\n", + " message = response.choices[0].message\n", + " messages.append(message)\n", + " for tool_call in message.tool_calls:\n", + " messages.append(handle_tool_call(tool_call))\n", + " response = openai.chat.completions.create(model=MODEL,messages=messages)\n", + "\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f725b4fb-d477-4d7d-80b5-5d70e1b25a86", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "\n", + "def handle_tool_call(tool_call):\n", + " function = tool_call.function.name\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " match function:\n", + " case 'get_course_price':\n", + " course = arguments.get('course')\n", + " price = get_course_price(course)\n", + " return {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"course\": course,\"price\": price}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " case 'enroll_in_course':\n", + " course = arguments.get('course')\n", + " status = enroll_in_course(course)\n", + " return {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"course\": course, \"status\": status}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c446272a-9ce1-4ffd-9bc8-483d782810b4", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat,type=\"messages\").launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1fe714a3-f793-4c3b-b5aa-6c81b82aea1b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/day3-gemini.ipynb b/week2/community-contributions/day3-gemini.ipynb index 714f93a..e942279 100644 --- a/week2/community-contributions/day3-gemini.ipynb +++ b/week2/community-contributions/day3-gemini.ipynb @@ -288,7 +288,7 @@ ], "metadata": { "kernelspec": { - "display_name": "llms", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/week2/community-contributions/day3-gradio-auth.ipynb b/week2/community-contributions/day3-gradio-auth.ipynb index fe94e55..0b6137a 100644 --- a/week2/community-contributions/day3-gradio-auth.ipynb +++ b/week2/community-contributions/day3-gradio-auth.ipynb @@ -160,7 +160,7 @@ ], "metadata": { "kernelspec": { - "display_name": "llms", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -178,5 +178,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/week2/community-contributions/day3-refine-user-query-by-llama.ipynb b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb index 1034274..abeb431 100644 --- a/week2/community-contributions/day3-refine-user-query-by-llama.ipynb +++ b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb @@ -342,7 +342,7 @@ ], "metadata": { "kernelspec": { - "display_name": "llm_env", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -356,7 +356,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.11.11" } }, "nbformat": 4, diff --git a/week2/community-contributions/day3.upsell.ipynb b/week2/community-contributions/day3.upsell.ipynb index dd2bd06..a3f94c1 100644 --- a/week2/community-contributions/day3.upsell.ipynb +++ b/week2/community-contributions/day3.upsell.ipynb @@ -347,7 +347,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.11.11" } }, "nbformat": 4, diff --git a/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb b/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb index 28aa34e..1489c51 100644 --- a/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb +++ b/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb @@ -63,14 +63,14 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 1, "id": "0a521d84-d07c-49ab-a0df-d6451499ed97", "metadata": {}, "outputs": [], "source": [ "system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", - "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + "system_message += \"Always be accurate. If you don't know the answer, say so.\"\n" ] }, { @@ -335,372 +335,21 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 3, "id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", "metadata": { "scrolled": true }, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7873\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "application/json": [ - { - "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", - "role": "system" - }, - { - "content": "tickets to london and paris for $50 each please", - "role": "user" - } - ], - "text/plain": [ - "" - ] - }, - "metadata": { - "application/json": { - "expanded": false, - "root": "root" - } - }, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-AtMTR6PDyoghY9BxBI88y03wrkyWT', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_62youPDgpaS0eXN4gru6NT7n', function=Function(arguments='{\"destination_city\": \"London\"}', name='get_ticket_price'), type='function'), ChatCompletionMessageToolCall(id='call_kvQK4Cdyk4b82rqtzkfJyoRh', function=Function(arguments='{\"destination_city\": \"Paris\"}', name='get_ticket_price'), type='function')]))], created=1737757793, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=49, prompt_tokens=313, total_tokens=362, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tool get_ticket_price called for London\n", - "Tool get_ticket_price called for Paris\n" - ] - }, - { - "data": { - "application/json": [ - { - "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", - "role": "system" - }, - { - "content": "tickets to london and paris for $50 each please", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "user" - }, - { - "content": "I'm sorry, but tickets to London are $799 and to Paris are $899, which is much higher than $50.", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "assistant" - }, - { - "content": "Can't you book them any way pretty please?", - "role": "user" - } - ], - "text/plain": [ - "" - ] - }, - "metadata": { - "application/json": { - "expanded": false, - "root": "root" - } - }, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-AtMTijl9VhY8svKRySpZ3rdyHBLmq', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"I'm afraid I cannot book the tickets at the price you've requested; the current prices are fixed.\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1737757810, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=21, prompt_tokens=355, total_tokens=376, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/json": [ - { - "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", - "role": "system" - }, - { - "content": "tickets to london and paris for $50 each please", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "user" - }, - { - "content": "I'm sorry, but tickets to London are $799 and to Paris are $899, which is much higher than $50.", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "assistant" - }, - { - "content": "Can't you book them any way pretty please?", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "user" - }, - { - "content": "I'm afraid I cannot book the tickets at the price you've requested; the current prices are fixed.", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "assistant" - }, - { - "content": "how about you book london for $749?", - "role": "user" - } - ], - "text/plain": [ - "" - ] - }, - "metadata": { - "application/json": { - "expanded": false, - "root": "root" - } - }, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-AtMU0N8Fp2SeWaMw5LiiBnDgAAWdm', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_qOCom3JGJBFzJvsEwQvDYKIG', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"749\"}', name='book_ticket'), type='function')]))], created=1737757828, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=20, prompt_tokens=391, total_tokens=411, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tool book_ticket for London for 749\n", - "Tool get_ticket_price called for London\n" - ] - }, - { - "data": { - "application/json": [ - { - "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", - "role": "system" - }, - { - "content": "tickets to london and paris for $50 each please", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "user" - }, - { - "content": "I'm sorry, but tickets to London are $799 and to Paris are $899, which is much higher than $50.", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "assistant" - }, - { - "content": "Can't you book them any way pretty please?", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "user" - }, - { - "content": "I'm afraid I cannot book the tickets at the price you've requested; the current prices are fixed.", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "assistant" - }, - { - "content": "how about you book london for $749?", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "user" - }, - { - "content": "Your ticket to London has been successfully booked for $749!", - "metadata": { - "duration": null, - "id": null, - "parent_id": null, - "status": null, - "title": null - }, - "options": null, - "role": "assistant" - }, - { - "content": "cool, what was the discount?", - "role": "user" - } - ], - "text/plain": [ - "" - ] - }, - "metadata": { - "application/json": { - "expanded": false, - "root": "root" - } - }, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-AtMUBOoWmKT4m7Ru3mkPRx7mQPgmd', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The original price for the ticket to London was $799, so you received a discount of $50.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1737757839, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=23, prompt_tokens=418, total_tokens=441, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/json": [ - { - "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", - "role": "system" - }, - { - "content": "tickets to london and paris for $50 each please", - "role": "user" - } - ], - "text/plain": [ - "" - ] - }, - "metadata": { - "application/json": { - "expanded": false, - "root": "root" - } - }, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "ChatCompletion(id='chatcmpl-AtMUh5f9LEaGjH0FLpPdKf6jgyQsT', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_6Ihkd1XGA10QxxlCn9uIJvqO', function=Function(arguments='{\"destination_city\": \"London\"}', name='get_ticket_price'), type='function'), ChatCompletionMessageToolCall(id='call_a9qmfQQlwU5L8pu2mvBgMMXl', function=Function(arguments='{\"destination_city\": \"Paris\"}', name='get_ticket_price'), type='function')]))], created=1737757871, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=49, prompt_tokens=313, total_tokens=362, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tool get_ticket_price called for London\n", - "Tool get_ticket_price called for Paris\n" + "ename": "NameError", + "evalue": "name 'gr' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[1;31m------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[3], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43mgr\u001b[49m\u001b[38;5;241m.\u001b[39mChatInterface(fn\u001b[38;5;241m=\u001b[39mchat, \u001b[38;5;28mtype\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39mlaunch()\n", + "\u001b[1;31mNameError\u001b[0m: name 'gr' is not defined" ] } ], From dbe489e22e1ed17019a4f36223a2bd8fa5083463 Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Sat, 1 Mar 2025 17:08:44 +0500 Subject: [PATCH 08/43] Added my contributions to community-contributions --- .../multi-modal-StudyAI.ipynb | 227 ++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100644 week2/community-contributions/multi-modal-StudyAI.ipynb diff --git a/week2/community-contributions/multi-modal-StudyAI.ipynb b/week2/community-contributions/multi-modal-StudyAI.ipynb new file mode 100644 index 0000000..0cafb5d --- /dev/null +++ b/week2/community-contributions/multi-modal-StudyAI.ipynb @@ -0,0 +1,227 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6aa646e3-7a57-461a-b69a-073179effa18", + "metadata": {}, + "source": [ + "## Additional End of week Exercise - week 2\n", + "\n", + "This includes \n", + "- Gradio UI\n", + "- use of the system prompt to add expertise\n", + "- audio input so you can talk to it\n", + "- respond with audio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "72f3dca4-b052-4e9f-90c8-f42e667c165c", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "from IPython.display import Markdown, display, update_display\n", + "import gradio as gr\n", + "import json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23570b9f-8c7a-4cc7-b809-3505334b60a7", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "openai = OpenAI()\n", + "MODEL = 'gpt-4o-mini'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d379178a-8672-4e6f-a380-ad8d85f5c64e", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"\"\"You are a personal study tutor, designed to provide clear, yet brief and succint answers to \n", + "students that ask you questions. The topics are related to data science, computer science \n", + "and technology in general, so you are allowed to use a moderate level of jargon. Explain in \n", + "simple terminology, so a student can easily understand. \n", + "\n", + "You may also be asked about prices for special courses.In this case, respond that you have no such\n", + "data available. \n", + "\n", + "\"\"\"\n", + "# Use a tabular format where possible \n", + "# for ease of information flow " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4745d439-c66e-4e5c-b5d4-9f0ba97aefdc", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + "\n", + " reply = response.choices[0].message.content\n", + " history += [{\"role\":\"assistant\", \"content\":reply}]\n", + "\n", + " # Comment out or delete the next line if you'd rather skip Audio for now..\n", + " talker(reply)\n", + " \n", + " return history" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8b31799-df86-4151-98ea-66ef50fe767e", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install openai-whisper" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9f5b8e51-2833-44be-a4f4-63c4683f2b6e", + "metadata": {}, + "outputs": [], + "source": [ + "import whisper\n", + "\n", + "def transcribe_audio(audio):\n", + " if audio is None:\n", + " return \"No audio received.\"\n", + " \n", + " model = whisper.load_model(\"base\") # You can use \"tiny\", \"small\", etc.\n", + " result = model.transcribe(audio)\n", + " \n", + " return result[\"text\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e55f8e43-2da1-4f2a-bcd4-3fffa830db48", + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image\n", + "from IPython.display import Audio, display\n", + "\n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\",\n", + " input=message)\n", + "\n", + " audio_stream = BytesIO(response.content)\n", + " output_filename = \"output_audio.mp3\"\n", + " with open(output_filename, \"wb\") as f:\n", + " f.write(audio_stream.read())\n", + "\n", + " # Play the generated audio\n", + " display(Audio(output_filename, autoplay=True))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb3107a7-bfdc-4255-825f-bfabcf458c0c", + "metadata": {}, + "outputs": [], + "source": [ + "# More involved Gradio code as we're not using the preset Chat interface!\n", + "# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", + "\n", + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " chatbot = gr.Chatbot(height=400,type=\"messages\")\n", + " with gr.Row():\n", + " entry = gr.Textbox(label=\"Chat with our StudyAI Assistant:\")\n", + " # with gr.Row():\n", + " # entry = gr.Textbox(label=\"Speak or Type:\", placeholder=\"Speak your question...\", interactive=True, microphone=True)\n", + " with gr.Row():\n", + " audio_input = gr.Audio(type=\"filepath\", label=\"Speak your question\")\n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear\")\n", + "\n", + " def do_entry(message, history):\n", + " history += [{\"role\":\"user\", \"content\":message}]\n", + " return \"\", history\n", + "\n", + " def handle_audio(audio, history):\n", + " text = transcribe_audio(audio)\n", + " history += [{\"role\": \"user\", \"content\": text}]\n", + " return \"\", history\n", + "\n", + " entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", + " chat, inputs=[chatbot], outputs=[chatbot]\n", + " )\n", + "\n", + " audio_input.change(handle_audio, inputs=[audio_input, chatbot], outputs=[entry, chatbot]).then(\n", + " chat, inputs=[chatbot], outputs=[chatbot]\n", + " )\n", + " \n", + " clear.click(lambda: [], inputs=None, outputs=chatbot, queue=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73e0a776-d43e-4b04-a37f-a27d3714cf47", + "metadata": {}, + "outputs": [], + "source": [ + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcd45503-d314-4b28-a41c-4dbb87059188", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 7a050c84bbbda913895817d700ca669a19ed3ca1 Mon Sep 17 00:00:00 2001 From: RAFAEL ESPINILLA CUEVAS Date: Sat, 8 Mar 2025 16:40:30 -0500 Subject: [PATCH 09/43] Added my contributions to community-contributions --- .../week1 exercise - my AI tutor.ipynb | 148 ++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 week1/community-contributions/week1 exercise - my AI tutor.ipynb diff --git a/week1/community-contributions/week1 exercise - my AI tutor.ipynb b/week1/community-contributions/week1 exercise - my AI tutor.ipynb new file mode 100644 index 0000000..e761e08 --- /dev/null +++ b/week1/community-contributions/week1 exercise - my AI tutor.ipynb @@ -0,0 +1,148 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "f38e9ebb-453d-4b40-84f6-bc3e9bf4d7ef", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "import json\n", + "import ollama\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "\n", + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f367c5bb-80a2-4d78-8f27-823f5dafe7c0", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "openai = OpenAI()\n", + "\n", + "# System prompt for the AI TECHNICAL LLM AND PYTHON TUTOR.\"\n", + "\n", + "system_prompt = \"You are an EXPERT in AI, LLMS and Python \\\n", + "Provide the answer with example ALLWAYS when necessary. \\\n", + "If you do not know the answer just say 'I don't know the answer' \\\n", + "Respond in markdown in Spanish.\"\n", + "\n", + "# messages\n", + "def messages_for(question):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": question}\n", + " ]\n", + "\n", + "# here is the question; type over this to ask something new\n", + "\n", + "question = \"\"\"\n", + "Please explain what this code does and why:\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"\n", + "question = question[:5_000] # Truncate if more than 5,000 characters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a90d726d-d494-401f-9cd6-0260f5c781e0", + "metadata": {}, + "outputs": [], + "source": [ + "# METHODS TO DISPLAY\n", + "def display_summary_ollama(question):\n", + " response = ollama.chat(\n", + " model = MODEL_LLAMA,\n", + " messages = messages_for(question)\n", + " ) \n", + " summary = response['message']['content']\n", + " display(Markdown(summary))\n", + "\n", + "def display_summary_gpt(question):\n", + " stream = openai.chat.completions.create(\n", + " model = MODEL_GPT,\n", + " messages = messages_for(question),\n", + " stream=True\n", + " )\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n", + " \n", + "def display_summary(llm, question):\n", + " if llm.startswith(\"llama3.2\"):\n", + " display_summary_ollama(question)\n", + " else:\n", + " display_summary_gpt(question)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e993b6d-8fee-43f3-9e36-f86701a5cc57", + "metadata": {}, + "outputs": [], + "source": [ + "# Get gpt-4o-mini to answer, with streaming\n", + "\n", + "display_summary(MODEL_GPT, question)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31f6283a-ee57-415e-9a57-83d07261b7f9", + "metadata": {}, + "outputs": [], + "source": [ + "# Get Llama 3.2 to answer\n", + "\n", + "display_summary(MODEL_LLAMA, question)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 579d0bb9063b76b853a0db53c70275bbc8505298 Mon Sep 17 00:00:00 2001 From: Shay Harding <3178267+kellewic@users.noreply.github.com> Date: Sun, 9 Mar 2025 17:58:56 +0000 Subject: [PATCH 10/43] Added frontier model testing table compared to basic models and human evaluator --- .../week6_day4_frontier_model_testing.ipynb | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 week6/community-contributions/week6_day4_frontier_model_testing.ipynb diff --git a/week6/community-contributions/week6_day4_frontier_model_testing.ipynb b/week6/community-contributions/week6_day4_frontier_model_testing.ipynb new file mode 100644 index 0000000..b6e9722 --- /dev/null +++ b/week6/community-contributions/week6_day4_frontier_model_testing.ipynb @@ -0,0 +1,71 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "00f05a05-d989-4bf7-b1f1-9418e25ecd58", + "metadata": {}, + "source": [ + "# The Product Pricer Continued\n", + "\n", + "I tested numerous frontier models from OpenAI, Anthropic, Google, and others via Groq API.\n", + "\n", + "Here are the results of all tests including ones from Day 3 and how the frontier models stacked up.\n", + "\n", + "They are ordered by Error from best to worst.\n", + "\n", + "I ran each model once on 2025-03-09.\n", + "\n", + "Main repo at [https://github.com/kellewic/llm](https://github.com/kellewic/llm)" + ] + }, + { + "cell_type": "markdown", + "id": "a69cc81a-e582-4d04-8e12-fd83e120a7d1", + "metadata": {}, + "source": [ + "| Rank | Model | Error ($) | RMSLE | Hits (%) | Chart Link |\n", + "|------|-----------------------------------|-----------|-------|----------|------------|\n", + "| 1 | **gemini-2.0-flash** | 73.48 | 0.56 | 56.4% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/gemini-2.0-flash.png) |\n", + "| 2 | **gpt-4o-2024-08-06** | 75.66 | 0.89 | 57.6% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/gpt-4o-2024-08-06.png) |\n", + "| 3 | **gemini-2.0-flash-lite** | 76.42 | 0.61 | 56.0% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/gemini-2.0-flash-lite.png) |\n", + "| 4 | **gpt-4o-mini (original)** | 81.61 | 0.60 | 51.6% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/gpt-4o-mini.png) |\n", + "| 5 | **claude-3-5-haiku-20241022** | 85.25 | 0.62 | 50.8% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/claude-3-5-haiku-20241022.png) |\n", + "| 6 | **claude-3-5-sonnet-20241022** | 88.97 | 0.61 | 49.2% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/claude-3-5-sonnet-20241022.png) |\n", + "| 7 | **claude-3-7-sonnet-20250219** | 89.41 | 0.62 | 55.2% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/claude-3-7-sonnet-20250219.png) |\n", + "| 8 | **mistral-saba-24b** | 98.02 | 0.82 | 44.8% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/mistral-saba-24b.png) |\n", + "| 9 | **llama-3.3-70b-versatile** | 98.24 | 0.70 | 44.8% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/llama-3.3-70b-versatile.png) |\n", + "| 10 | **GPT-4o-mini (fine-tuned)** | 101.49 | 0.81 | 41.2% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_tuning/gpt_fine_tuned.png) |\n", + "| 11 | **Random Forest Regressor** | 105.10 | 0.89 | 37.6% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/basic_model_training/random_forest_pricer.png) |\n", + "| 12 | **deepseek-r1-distill-llama-70b** | 109.09 | 0.67 | 48.4% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/deepseek-r1-distill-llama-70b.png) |\n", + "| 13 | **Linear SVR** | 110.91 | 0.92 | 29.2% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/basic_model_training/svr_pricer.png) |\n", + "| 14 | **Word2Vec LR** | 113.14 | 1.05 | 22.8% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/basic_model_training/word2vec_lr_pricer.png) |\n", + "| 15 | **Bag of Words LR** | 113.60 | 0.99 | 24.8% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/basic_model_training/bow_lr_pricer.png) |\n", + "| 16 | **Human Performance** | 126.55 | 1.00 | 32.0% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/human_pricer.png) |\n", + "| 17 | **Average** | 137.17 | 1.19 | 15.2% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/basic_model_training/average_pricer.png) |\n", + "| 18 | **Linear Regression** | 139.20 | 1.17 | 15.6% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/basic_model_training/linear_regression_pricer.png) |\n", + "| 19 | **deepseek-r1-distill-qwen-32b** | 151.59 | 0.80 | 38.4% | [๐Ÿ“Š](https://github.com/kellewic/llm/blob/main/frontier_model_test/deepseek-r1-distill-qwen-32b.png) |" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 0f7fdf40e2ea9e94cc66bfa88e4a99ce567a0341 Mon Sep 17 00:00:00 2001 From: samt07 Date: Mon, 10 Mar 2025 06:38:42 -0400 Subject: [PATCH 11/43] Added week1 and week2 projects --- .../gradio_testcase_automation.ipynb | 444 +++++++++++ .../day5-event_assistant.ipynb | 701 ++++++++++++++++++ 2 files changed, 1145 insertions(+) create mode 100644 week1/community-contributions/gradio_testcase_automation.ipynb create mode 100644 week2/community-contributions/day5-event_assistant.ipynb diff --git a/week1/community-contributions/gradio_testcase_automation.ipynb b/week1/community-contributions/gradio_testcase_automation.ipynb new file mode 100644 index 0000000..4fefa8c --- /dev/null +++ b/week1/community-contributions/gradio_testcase_automation.ipynb @@ -0,0 +1,444 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "it1JLoxrSqO1", + "metadata": { + "id": "it1JLoxrSqO1" + }, + "outputs": [], + "source": [ + "!pip install openai python-docx python-dotenv gradio openpyxl" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "950a084a-7f92-4669-af62-f07cb121da56", + "metadata": { + "id": "950a084a-7f92-4669-af62-f07cb121da56" + }, + "outputs": [], + "source": [ + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "#from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "from docx import Document" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d0548135-ef16-4102-a55a-cea888a51c29", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import re\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d", + "metadata": { + "id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d" + }, + "outputs": [], + "source": [ + "class ReqDoc:\n", + " def __init__(self, file_path):\n", + " self.file_path = file_path\n", + "\n", + " def extract(self):\n", + " \"\"\"\n", + " Reads the content of a .docx file and returns the paragraphs as a list of strings.\n", + " \"\"\"\n", + " try:\n", + " # Check if the file exists\n", + " if not os.path.exists(self.file_path):\n", + " raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n", + "\n", + " # Attempt to open and read the document\n", + " doc = Document(self.file_path)\n", + " text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n", + " return text\n", + "\n", + " except FileNotFoundError as fnf_error:\n", + " print(fnf_error)\n", + " return None\n", + " except Exception as e:\n", + " print(f\"An error occurred: {e}\")\n", + " return None\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "008f485a-5718-48f6-b408-06eb6d59d7f9", + "metadata": { + "id": "008f485a-5718-48f6-b408-06eb6d59d7f9" + }, + "outputs": [], + "source": [ + "# Initialize and constants\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n", + " print(\"API key looks good!\")\n", + "else:\n", + " print(\"There might be a problem with your API key. Please check!\")\n", + " \n", + "MODEL = 'gpt-4o-mini'\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6110ff3-74bc-430a-8051-7d86a216f0fb", + "metadata": { + "id": "b6110ff3-74bc-430a-8051-7d86a216f0fb" + }, + "outputs": [], + "source": [ + "#Set up system prompt for extracting just the requirements from the document\n", + "\n", + "req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n", + "You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n", + "functional or non-functional and list them all.\\n\"\n", + "req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n", + "Do not make up your own requirements. \\n\"\n", + "req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n", + "req_doc_system_prompt += \"\"\"\n", + "{\n", + " \"requirements\": [\n", + " {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n", + " {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n", + " {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n", + " {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n", + " ]\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20460e45-c1b7-4dc4-ab07-932235c19895", + "metadata": { + "id": "20460e45-c1b7-4dc4-ab07-932235c19895" + }, + "outputs": [], + "source": [ + "#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n", + "#respond in JSON format.\n", + "\n", + "def req_doc_user_prompt(doc):\n", + " user_prompt = \"Here is the contents from a requirement document.\\n\"\n", + " user_prompt += f\"{doc.extract()} \\n\"\n", + " user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n", + "paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n", + "You must respond in a JSON format\"\n", + " user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n", + " user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891", + "metadata": { + "id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891" + }, + "outputs": [], + "source": [ + "#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n", + "def get_requirements(doc):\n", + " reqdoc = ReqDoc(doc)\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": req_doc_system_prompt},\n", + " {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n", + " ],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e", + "metadata": { + "id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e" + }, + "outputs": [], + "source": [ + "#Uncomment and run this if you want to see the extracted requriements in json format.\n", + "#get_requirements(\"reqdoc.docx\")" + ] + }, + { + "cell_type": "markdown", + "id": "1fe8618c-1dfe-4030-bad8-405731294c93", + "metadata": { + "id": "1fe8618c-1dfe-4030-bad8-405731294c93" + }, + "source": [ + "### Next, we will make another call to gpt-4o-mini" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b", + "metadata": { + "id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b" + }, + "outputs": [], + "source": [ + "#Set up system prompt to ask for test cases in table format\n", + "system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n", + "You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n", + "The table will contain the following columns. \\\n", + "1.S No\\\n", + "2.Requirement No\\\n", + "3.Requirement Description\\\n", + "4.Test Case ID\\\n", + "5.Test case summary\\\n", + "6.Test case description\\\n", + "7.Success criteria \\n\"\n", + "system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5", + "metadata": { + "id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5" + }, + "outputs": [], + "source": [ + "# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n", + "\n", + "def get_testcase_user_prompt(reqdoc):\n", + " user_prompt = \"You are looking at the following list of requirements. \\n\"\n", + " user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n", + " user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n", + " user_prompt += user_prompt[:25000]\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b2a2b46-9d9c-416c-b189-3007b4d26d76", + "metadata": {}, + "outputs": [], + "source": [ + "#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n", + "def create_testcase_doc_gradio(response, is_response_ready, is_cleared, file_input):\n", + " if is_cleared or file_input == None: # Prevent OpenAI call if \"Clear\" was clicked\n", + " return \"\", False\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_testcase_user_prompt(file_input)}\n", + " ],\n", + " stream=True\n", + " )\n", + " #Modified for Gradio\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " #print(result)\n", + " yield result, False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2bb96a11-063e-4b20-9880-71fa9ea4d3f7", + "metadata": {}, + "outputs": [], + "source": [ + "# Define this variable and then pass js=force_dark_mode when creating the Interface\n", + "force_dark_mode = \"\"\"\n", + "function refresh() {\n", + " const url = new URL(window.location);\n", + " if (url.searchParams.get('__theme') !== 'dark') {\n", + " url.searchParams.set('__theme', 'dark');\n", + " window.location.href = url.href;\n", + " }\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5c81c766-9613-4614-b88d-410654672b89", + "metadata": {}, + "outputs": [], + "source": [ + "def show_or_hide_save_button(response, is_response_ready, is_cleared):\n", + " if is_cleared or response == None:\n", + " return \"\", False\n", + " table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", + " table_match = re.search(table_pattern, response)\n", + " if table_match:\n", + " return response, True #(response, is_response_ready)\n", + " else:\n", + " return response, False #(response, is_response_ready)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a5f5d8e7-d29c-4f40-8d57-a9911bb7c47e", + "metadata": {}, + "outputs": [], + "source": [ + "def extract_table_from_markdown(response):\n", + " # Regular expression to match Markdown tables\n", + " table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", + " table_match = re.search(table_pattern, response)\n", + "\n", + " if table_match:\n", + " table_data = table_match.group(0)\n", + " # Process the table into a format pandas can read\n", + " rows = table_data.strip().split(\"\\n\")\n", + " data = [row.split(\"|\")[1:-1] for row in rows] # Split columns by '|'\n", + "\n", + " # Convert to DataFrame\n", + " df = pd.DataFrame(data[1:], columns=data[0]) # First row is the header\n", + "\n", + " # Save to Excel\n", + " output_file = \"test_cases.xlsx\"\n", + " df.to_excel(output_file, index=False)\n", + "\n", + " return output_file\n", + " else:\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1380b11-3e28-40de-ab1a-93a5fd73cf81", + "metadata": {}, + "outputs": [], + "source": [ + "def extract_and_save_button(response, is_cleared):\n", + " if is_cleared:\n", + " return None # Do nothing if the file was cleared\n", + " # This function will be triggered when the user clicks \"Save as Excel\"\n", + " output_file = extract_table_from_markdown(response)\n", + " if output_file:\n", + " return output_file\n", + " else:\n", + " return \"No table found in the provided input.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a532b42-9f81-4c75-8be4-e40d621a6b35", + "metadata": {}, + "outputs": [], + "source": [ + "# Gradio interface\n", + "with gr.Blocks(js=force_dark_mode) as demo:\n", + " gr.HTML(\"

๐Ÿ“„ Test case automation

\")\n", + " with gr.Row():\n", + " file_input = gr.File(label=\"Upload your requirements docx file\", file_types=[\".docx\"])\n", + " with gr.Row():\n", + " response = gr.Markdown()\n", + " # Button to save the table as Excel file (optional)\n", + " save_button = gr.Button(\"Download Table as Excel\", visible=False)\n", + " file_output = gr.File(label=\"Download Excel File\", visible=False) \n", + " # State variable to track if response is ready\n", + " is_response_ready = gr.State(False)\n", + " with gr.Row():\n", + " clear_button = gr.Button(\"Clear\")\n", + " # State variable to track if clear button is clicked\n", + " is_cleared = gr.State(False)\n", + "\n", + " # Function to show \"Processing...\" message\n", + " def show_processing(is_cleared, file_input):\n", + " if is_cleared or file_input==None:\n", + " return None, False, is_cleared, file_input # Do nothing if the file was cleared\n", + " #return gr.HTML(\"
โŒ› Processing your file... Please wait!
\"), False, is_cleared, file_input\n", + " return \"โŒ› Processing your file... Please wait!\", False, is_cleared, file_input\n", + " \n", + " # Trigger response only if the file was uploaded and not cleared\n", + " file_input.change(\n", + " lambda _: False, # Directly set is_cleared to False\n", + " inputs=[file_input],\n", + " outputs=[is_cleared]\n", + " ).then(\n", + " show_processing, inputs=[is_cleared, file_input], outputs=[response, is_response_ready, is_cleared, file_input]\n", + " ).then(\n", + " create_testcase_doc_gradio, inputs=[response, is_response_ready, is_cleared, file_input], outputs=[response, is_response_ready]\n", + " ).then(\n", + " show_or_hide_save_button, inputs=[response, is_response_ready, is_cleared], outputs=[response, is_response_ready]\n", + " ).then(\n", + " lambda _, ready: (gr.update(visible=ready), gr.update(visible=ready)), inputs=[response, is_response_ready], outputs=[save_button,file_output])\n", + "\n", + " #.then() passes the previous function outputs as inputs to the next function\n", + "\n", + " # Button action to extract and save table as an Excel file\n", + " save_button.click(extract_and_save_button, inputs=[response, is_cleared], outputs=file_output)\n", + " \n", + " # Clear button resets both file and output while setting is_cleared to True\n", + " clear_button.click(lambda: (None, None, None, True), inputs=None, outputs=[file_input, file_output, response, is_cleared]) \n", + "\n", + "# Launch Gradio app\n", + "demo.launch(share=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd5314b2-ee91-49bd-9d40-558775d44382", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/day5-event_assistant.ipynb b/week2/community-contributions/day5-event_assistant.ipynb new file mode 100644 index 0000000..31edd3c --- /dev/null +++ b/week2/community-contributions/day5-event_assistant.ipynb @@ -0,0 +1,701 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ec4f6b32-46e9-429a-a3cd-521ff5418493", + "metadata": {}, + "source": [ + "# Occasio - Event Management Assistant" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "import time\n", + "import pprint\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "import google.generativeai as genai\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv()\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b501508-0082-47be-9903-52ff1c243486", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic and Google and assign a model for each\n", + "\n", + "openai = OpenAI()\n", + "OPENAI_MODEL = \"gpt-4o-mini\"\n", + "\n", + "claude = anthropic.Anthropic()\n", + "ANTHROPIC_MODEL = \"claude-3-haiku-20240307\"\n", + "\n", + "genai.configure()\n", + "GOOGLE_MODEL = \"gemini-2.0-flash\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a521d84-d07c-49ab-a0df-d6451499ed97", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are called \\\"EventAI\\\", a virtual assistant for an Elementary school called Eagle Elementary School. You can help users by giving \\\n", + "them details of upcoming shcool events like event name, description, location etc. \"\n", + "#system_message += \"Introduce yourself with a warm welcome message on your first response ONLY.\"\n", + "system_message += \"Give short, courteous answers, no more than 2 sentences. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so. Do not make up your own event details information\"\n", + "system_message += \"You might be asked to list the questions asked by the user so far. In that situation, based on the conversation history provided to you, \\\n", + "list the questions and respond\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c27c4ba-8ed5-492f-add1-02ce9c81d34c", + "metadata": {}, + "outputs": [], + "source": [ + "# Some imports for handling images\n", + "\n", + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "773a9f11-557e-43c9-ad50-56cbec3a0f8f", + "metadata": {}, + "outputs": [], + "source": [ + "def artist(event_text):\n", + " image_response = openai.images.generate(\n", + " model=\"dall-e-3\",\n", + " prompt=f\"An image representing an {event_text}, showing typical activities that happen for that {event_text}, in a vibrant pop-art style that elementary school kids will like\",\n", + " size=\"1024x1024\",\n", + " n=1,\n", + " response_format=\"b64_json\",\n", + " )\n", + " image_base64 = image_response.data[0].b64_json\n", + " image_data = base64.b64decode(image_base64)\n", + " return Image.open(BytesIO(image_data))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d104b96a-02ca-4159-82fe-88e0452aa479", + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image\n", + "from IPython.display import Audio, display\n", + "\n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\",\n", + " input=message)\n", + "\n", + " audio_stream = BytesIO(response.content)\n", + " output_filename = \"output_audio.mp3\"\n", + " with open(output_filename, \"wb\") as f:\n", + " f.write(audio_stream.read())\n", + "\n", + " # Play the generated audio\n", + " display(Audio(output_filename, autoplay=True))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0428a74-4daa-4b0d-b25a-219a35f39f55", + "metadata": {}, + "outputs": [], + "source": [ + "school_events = [\n", + " {\n", + " \"event_id\": \"pta\",\n", + " \"name\": \"Parent Teachers Meeting (PTA/PTM)\",\n", + " \"description\": \"Parent teachers meeting (PTA/PTM) to discuss students' progress.\",\n", + " \"date_time\": \"Apr 1st, 2025 11 AM\",\n", + " \"location\" : \"Glove Annexure Hall\"\n", + " },\n", + " {\n", + " \"event_id\": \"read aloud\",\n", + " \"name\": \"Read Aloud to your class/Reading to your class\",\n", + " \"description\": \"Kids can bring their favorite book and read it to their class.\",\n", + " \"date_time\": \"Apr 15th, 2025 1 PM\",\n", + " \"location\": \"Classroom\"\n", + " },\n", + " {\n", + " \"event_id\": \"100 days of school\",\n", + " \"name\": \"Celebrating 100 days of school. Dress up time for kids\",\n", + " \"description\": \"Kids can dress up as old people and celebrate the milestone with their teachers.\",\n", + " \"date_time\": \"May 15th, 2025 11 AM\",\n", + " \"location\": \"Classroom\"\n", + " },\n", + " {\n", + " \"event_id\": \"Book fair\",\n", + " \"name\": \"Scholastic book fair\",\n", + " \"description\": \"Kids can purchase their favorite scholastic books.\",\n", + " \"date_time\": \"Jun 22nd, 2025 10:30 AM\",\n", + " \"location\": \"Library\"\n", + " },\n", + " {\n", + " \"event_id\": \"Halloween\",\n", + " \"name\": \"Halloween\",\n", + " \"description\": \"Kids can dress up as their favorite characters\",\n", + " \"date_time\": \"Oct 31st, 2025\",\n", + " \"location\": \"Classroom\"\n", + " },\n", + " {\n", + " \"event_id\": \"Movie Night\",\n", + " \"name\": \"Movie Night\",\n", + " \"description\": \"A popular and kids centric movie will be played. Kids and families are welcome.\",\n", + " \"date_time\": \"May 3rd, 2025\",\n", + " \"location\": \"Main auditorium\"\n", + " },\n", + " {\n", + " \"event_id\": \"Intruder Drill\",\n", + " \"name\": \"Intruder Drill\",\n", + " \"description\": \"State mandated monthly intruder drill to prepare staff and students with necessary safety skills in times of a crisis\",\n", + " \"date_time\": \"May 3rd, 2025\",\n", + " \"location\": \"Main auditorium\"\n", + " }\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b7027eec-e522-49c1-af59-56a82f9d3be8", + "metadata": {}, + "outputs": [], + "source": [ + "def get_event_details(query):\n", + " search_words = query.lower().split() \n", + " for event in school_events:\n", + " event_text = event['name'].lower() + ' ' + event['description'].lower()\n", + " if all(word in event_text for word in search_words):\n", + " return event\n", + " return None" + ] + }, + { + "cell_type": "markdown", + "id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Tools are an incredibly powerful feature provided by the frontier LLMs.\n", + "\n", + "With tools, you can write a function, and have the LLM call that function as part of its response.\n", + "\n", + "Sounds almost spooky.. we're giving it the power to run code on our machine?\n", + "\n", + "Well, kinda." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68e96b54-b891-4e7b-a6bc-17693dc99970", + "metadata": {}, + "outputs": [], + "source": [ + "# for claude\n", + "tools_claude = [\n", + " {\n", + " \"name\": \"get_event_details\",\n", + " \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n", + "'When is the pta meeting scheduled?\",\n", + " \"input_schema\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"event_text\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The event keyword that the user wants to getails on\"\n", + " }\n", + " },\n", + " \"required\": [\"event_text\"]\n", + " }\n", + "}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "636188d2-7e7a-48a0-9f04-f3813c7dc323", + "metadata": {}, + "outputs": [], + "source": [ + "# For GPT\n", + "events_function_gpt = {\n", + " \"name\": \"get_event_details\",\n", + " \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n", + " 'When is the pta meeting scheduled?\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"event_text\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The event keyword that the user wants to getails on\",\n", + " },\n", + " },\n", + " \"required\": [\"event_text\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "605684f8-ed02-4cc9-8a16-012533b601cb", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "tools_gpt = [{\"type\": \"function\", \"function\": events_function_gpt}]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ac5a34c-a630-449a-9d46-669daace799c", + "metadata": {}, + "outputs": [], + "source": [ + "#Gemini function declaration structure\n", + "gemini_event_details = [{\n", + " \"name\": \"get_event_details\",\n", + " \"description\":\"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks 'When is the pta meeting scheduled?\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"event_text\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The event keyword that the user wants to details on\",\n", + " },\n", + " },\n", + " \"required\": [\"event_text\"],\n", + " },\n", + " },\n", + " {\n", + " \"name\": \"get_event_test\",\n", + " \"description\":\"This is a test function to validate if the function call picks up the right function if there are multiple functions.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"event_text\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The event keyword that the user wants to details on\",\n", + " },\n", + " },\n", + " \"required\": [\"event_text\"],\n", + " },\n", + " }\n", + "]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6331113-63b0-4712-94bb-f363422a8441", + "metadata": {}, + "outputs": [], + "source": [ + "def chat_claude(history):\n", + " print(f\"\\nhistory is {history}\\n\")\n", + " #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs\n", + " history_claude = list({\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in history if \"role\" in msg and \"content\" in msg)\n", + " #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]\n", + " #messages = history\n", + " message = claude.messages.create(\n", + " model=ANTHROPIC_MODEL,\n", + " max_tokens=1000,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=history_claude,\n", + " tools=tools_claude\n", + " )\n", + " image = None\n", + " print(f\"Claude's message is \\n {pprint.pprint(message)}\\n\")\n", + " try: \n", + " if message.stop_reason == \"tool_use\":\n", + " tool_use = next(block for block in message.content if block.type == \"tool_use\")\n", + " event_text = tool_use.input.get('event_text')\n", + " image = artist(event_text)\n", + " tool_result = handle_tool_call(event_text)\n", + " #tool_result = handle_tool_call(tool_use, \"Claude\")\n", + " \n", + " print(f\"Tool Result: {tool_result}\")\n", + " \n", + " response = claude.messages.stream(\n", + " model=ANTHROPIC_MODEL,\n", + " max_tokens=4096,\n", + " system=system_message,\n", + " messages=[\n", + " {\n", + " \"role\": \"user\", \n", + " \"content\": [\n", + " {\n", + " \"type\": \"text\",\n", + " \"text\": history[-1].get('content')\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"role\": \"assistant\", \n", + " \"content\": message.content\n", + " },\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"tool_result\",\n", + " \"tool_use_id\": tool_use.id,\n", + " \"content\": tool_result,\n", + " }\n", + " ],\n", + " },\n", + " ],\n", + " tools=tools_claude\n", + " )\n", + " result = \"\"\n", + " with response as stream:\n", + " for text in stream.text_stream:\n", + " result += text or \"\"\n", + " yield result, None\n", + " talker(result)\n", + " #image= artist(tool_input.get('event_text'))\n", + " yield result, image\n", + " else:\n", + " response = next((block.text for block in message.content if hasattr(block, \"text\")), None,)\n", + " chunk_size=30\n", + " for i in range(0, len(response), chunk_size):\n", + " yield response[:i + chunk_size], None\n", + " time.sleep(0.05) #Simulate streaming delay\n", + " talker(response)\n", + " #image= artist(tool_input.get('event_text'))\n", + " yield response, None\n", + " except Exception as e:\n", + " error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", + " print(e)\n", + " yield error_message, None\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9915ae05-5d52-4fdc-a3ea-18f050a79bd3", + "metadata": {}, + "outputs": [], + "source": [ + "def chat_gpt(history):\n", + " print(f\"\\nhistory is {history}\\n\")\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", + " response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)\n", + " image = None\n", + " try:\n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " tool = message.tool_calls[0]\n", + " arguments = json.loads(tool.function.arguments)\n", + " event_text = arguments.get('event_text')\n", + " image = artist(event_text)\n", + " event_json = handle_tool_call(event_text)\n", + " tool_output = {\n", + " \"role\": \"tool\",\n", + " \"content\": event_json,\n", + " \"tool_call_id\": tool.id\n", + " }\n", + " messages.append(message)\n", + " messages.append(tool_output)\n", + " stream = openai.chat.completions.create(\n", + " model=OPENAI_MODEL,\n", + " messages=messages,\n", + " stream=True\n", + " )\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " yield result, None\n", + " talker(result)\n", + " yield result, image\n", + " else: \n", + " reply = response.choices[0].message.content\n", + " chunk_size=30\n", + " for i in range(0, len(reply), chunk_size):\n", + " yield reply[:i + chunk_size], None\n", + " time.sleep(0.05)\n", + " talker(reply)\n", + " #image= artist(\"No such event\")\n", + " yield reply, None\n", + " except Exception as e:\n", + " error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", + " print(e)\n", + " yield error_message, None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30fa3de9-5b55-4bb6-93ea-a13fc09d38c1", + "metadata": {}, + "outputs": [], + "source": [ + "def chat_gemini(history):\n", + " print(f\"\\nhistroy is {history}\\n\")\n", + " history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format\n", + " else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is\n", + " else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc\n", + " \n", + " print(f\"\\nhistroy_gemini is {history_gemini}\\n\")\n", + " model = genai.GenerativeModel(\n", + " model_name=GOOGLE_MODEL,\n", + " system_instruction=system_message\n", + " )\n", + " response = model.generate_content(\n", + " contents = history_gemini,\n", + " #contents = contents,\n", + " tools = [{\n", + " 'function_declarations': gemini_event_details,\n", + " }],\n", + " )\n", + " #print(f\"response is {response}\")\n", + "\n", + " image = None\n", + " try:\n", + " # Check if the model wants to use a tool\n", + " if response.candidates[0].content.parts[0].function_call:\n", + " function_call = response.candidates[0].content.parts[0].function_call\n", + " event_text = function_call.args.get(\"event_text\")\n", + " image = artist(event_text)\n", + " tool_result = handle_tool_call(event_text)\n", + " \n", + " print(f\"\\ntool_result is {tool_result}\\n\")\n", + " stream = model.generate_content(\n", + " \"Based on this information `\" + tool_result + \"`, extract the details of the event and provide the event details to the user\",\n", + " stream=True \n", + " )\n", + " #print(f\"\\nSecond response is {stream}\\n\")\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.candidates[0].content.parts[0].text or \"\"\n", + " #print(f\"REsult is \\n{result}\\n\")\n", + " yield result, None\n", + " talker(result) \n", + " yield result, image\n", + " #print(f\"REsult is \\n{result}\\n\")\n", + " else: \n", + " reply = response.text\n", + " chunk_size=30\n", + " for i in range(0, len(reply), chunk_size):\n", + " yield reply[:i + chunk_size], None\n", + " time.sleep(0.05)\n", + " talker(reply)\n", + " #image= artist(\"No such event\")\n", + " yield reply, None\n", + " \n", + " except Exception as e:\n", + " error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", + " print(e)\n", + " yield error_message, None\n", + " \n", + "\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "570fffb2-a054-4217-89ae-8b6f4630e383", + "metadata": {}, + "outputs": [], + "source": [ + "def call_and_process_model_responses(fn_name, chatbot):#, response, image):\n", + " response = \"\"\n", + " image = None\n", + " for response, image in fn_name(chatbot):\n", + " if chatbot and chatbot[-1][\"role\"] == \"assistant\": \n", + " chatbot[-1][\"content\"] = response # Update the last message\n", + " else:\n", + " chatbot.append({\"role\": \"assistant\", \"content\": response}) # First assistant message\n", + " #print(chatbot)\n", + " yield chatbot, image # Stream updated history to UI\n", + " \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32a6ccce-44fa-49a7-bd1a-08c70002771c", + "metadata": {}, + "outputs": [], + "source": [ + "def handle_tool_call(event_text):\n", + " print(f\"event text is {event_text}\")\n", + " event_found = get_event_details(event_text)\n", + " print(f\"event_found is {event_found}\")\n", + " \n", + " if event_found:\n", + " response = json.dumps({\"name\": event_found['name'],\"description\": event_found['description'], \"when\": event_found['date_time'], \"where\": event_found['location']})\n", + " else: \n", + " response = json.dumps({\"event\": f\"Sorry, there is no schedule currently for {event_text}\"})\n", + " return response \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4eaaaf9e-64b9-4d0b-9931-388cee8ea21d", + "metadata": {}, + "outputs": [], + "source": [ + "def process_chosen_model(chatbot, model):\n", + " if model == 'GPT':\n", + " for chatbot, image in call_and_process_model_responses(chat_gpt, chatbot):\n", + " yield chatbot, image\n", + " elif model == 'Claude': \n", + " for chatbot, image in call_and_process_model_responses(chat_claude, chatbot):\n", + " yield chatbot, image\n", + " else:\n", + " #for Gemini, the content is to be replaced with parts.\n", + " for chatbot, image in call_and_process_model_responses(chat_gemini, chatbot):\n", + " yield chatbot, image\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "627f6d49-5376-4f1d-8071-f2e96fd6e78b", + "metadata": {}, + "outputs": [], + "source": [ + "# More involved Gradio code as we're not using the preset Chat interface!\n", + "# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", + "\n", + "with gr.Blocks(css=\"\"\"\n", + " select.gr-box { \n", + " appearance: auto !important; \n", + " -webkit-appearance: auto !important; \n", + " }\n", + "\"\"\") as ui:\n", + " with gr.Row():\n", + " gr.HTML(\"

Occasio! An Event Management Assistant

\") # Added title\n", + " with gr.Row():\n", + " # with gr.Column(scale=3): #Acts as a spacer on the left\n", + " # pass\n", + " \n", + " with gr.Column(scale=0):\n", + " model = gr.Dropdown(\n", + " choices=[\"GPT\", \"Claude\", \"Gemini\"], \n", + " label=\"Select model\", \n", + " value=\"GPT\",\n", + " interactive=True,\n", + " container=True # Applying the CSS class\n", + " )\n", + " # with gr.Column(scale=-54, min_width=200):\n", + " # gr.HTML(\"

Occasio

\") # Added title\n", + " # pass #Acts as a spacer on the right\n", + " with gr.Row():\n", + " chatbot = gr.Chatbot(height=500, type=\"messages\")\n", + " image_output = gr.Image(height=500)\n", + " with gr.Row():\n", + " entry = gr.Textbox(label=\"Ask me \\\"when is pta meeting\\\", \\\"how about book fair\\\" and more... \")\n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear\", min_width=150)\n", + " #message=None\n", + "\n", + " def do_entry(message, history):\n", + " history += [{\"role\":\"user\", \"content\":message}]\n", + " return \"\", history\n", + " \n", + " entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", + " process_chosen_model, inputs=[chatbot, model], outputs=[chatbot, image_output]\n", + " )\n", + " clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", + "\n", + "ui.launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From ae8c7afe5c9d0eda6800751cedb5f4dd90da1915 Mon Sep 17 00:00:00 2001 From: Brian Date: Mon, 10 Mar 2025 17:58:22 -0700 Subject: [PATCH 12/43] community notebook: beatnik jokes --- .../beatnik_jokes.ipynb | 981 ++++++++++++++++++ 1 file changed, 981 insertions(+) create mode 100644 week2/community-contributions/beatnik_jokes.ipynb diff --git a/week2/community-contributions/beatnik_jokes.ipynb b/week2/community-contributions/beatnik_jokes.ipynb new file mode 100644 index 0000000..b7a4db7 --- /dev/null +++ b/week2/community-contributions/beatnik_jokes.ipynb @@ -0,0 +1,981 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", + "metadata": {}, + "source": [ + "# Welcome to Week 2!\n", + "\n", + "## Frontier Model APIs\n", + "\n", + "In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", + "\n", + "Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." + ] + }, + { + "cell_type": "markdown", + "id": "2b268b6e-0ba4-461e-af86-74a41f4d681f", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Important Note - Please read me

\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.
\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!

\n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:
\n", + " conda env update --f environment.yml
\n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):
\n", + " pip install -r requirements.txt\n", + "
Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + "
\n", + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Reminder about the resources page

\n", + " Here's a link to resources for the course. This includes links to all the slides.
\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/
\n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "85cfe275-4705-4d30-abea-643fbddf1db0", + "metadata": {}, + "source": [ + "## Setting up your keys\n", + "\n", + "If you haven't done so already, you could now create API keys for Anthropic and Google in addition to OpenAI.\n", + "\n", + "**Please note:** if you'd prefer to avoid extra API costs, feel free to skip setting up Anthopic and Google! You can see me do it, and focus on OpenAI for the course. You could also substitute Anthropic and/or Google for Ollama, using the exercise you did in week 1.\n", + "\n", + "For OpenAI, visit https://openai.com/api/ \n", + "For Anthropic, visit https://console.anthropic.com/ \n", + "For Google, visit https://ai.google.dev/gemini-api \n", + "\n", + "### Also - adding DeepSeek if you wish\n", + "\n", + "Optionally, if you'd like to also use DeepSeek, create an account [here](https://platform.deepseek.com/), create a key [here](https://platform.deepseek.com/api_keys) and top up with at least the minimum $2 [here](https://platform.deepseek.com/top_up).\n", + "\n", + "### Adding API keys to your .env file\n", + "\n", + "When you get your API keys, you need to set them as environment variables by adding them to your `.env` file.\n", + "\n", + "```\n", + "OPENAI_API_KEY=xxxx\n", + "ANTHROPIC_API_KEY=xxxx\n", + "GOOGLE_API_KEY=xxxx\n", + "DEEPSEEK_API_KEY=xxxx\n", + "```\n", + "\n", + "Afterwards, you may need to restart the Jupyter Lab Kernel (the Python process that sits behind this notebook) via the Kernel menu, and then rerun the cells from the top." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", + "metadata": {}, + "outputs": [], + "source": [ + "# import for google\n", + "# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n", + "# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n", + "\n", + "import google.generativeai" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n", + "Google API Key exists and begins AIzaSyCN\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2c072312-4ab1-4a85-8ec0-1c91b281596c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print(claude)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "425ed580-808d-429b-85b0-6cba50ca1d0c", + "metadata": {}, + "outputs": [], + "source": [ + "# This is the set up code for Gemini\n", + "# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", + "\n", + "google.generativeai.configure()" + ] + }, + { + "cell_type": "markdown", + "id": "42f77b59-2fb1-462a-b90d-78994e4cef33", + "metadata": {}, + "source": [ + "## Asking LLMs to tell a joke\n", + "\n", + "It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n", + "Later we will be putting LLMs to better use!\n", + "\n", + "### What information is included in the API\n", + "\n", + "Typically we'll pass to the API:\n", + "- The name of the model that should be used\n", + "- A system message that gives overall context for the role the LLM is playing\n", + "- A user message that provides the actual prompt\n", + "\n", + "There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "378a0296-59a2-45c6-82eb-941344d3eeff", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that is great at telling jokes\"\n", + "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", + "metadata": {}, + "outputs": [], + "source": [ + "prompts = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist break up with the statistician?\n", + "\n", + "Because they couldn't find a common mean!\n" + ] + } + ], + "source": [ + "# GPT-3.5-Turbo\n", + "\n", + "completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist break up with the statistician? \n", + "\n", + "Because she felt like she was just one of the many variables in his life!\n" + ] + } + ], + "source": [ + "# GPT-4o-mini\n", + "# Temperature setting controls creativity\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o-mini',\n", + " messages=prompts,\n", + " temperature=0.7\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist break up with the logistic regression model?\n", + "\n", + "Because it couldn't handle the relationship's complexity and kept giving them mixed signals!\n" + ] + } + ], + "source": [ + "# GPT-4o\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.4\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here's one for the data scientists:\n", + "\n", + "Why did the data scientist bring a ladder to work?\n", + "\n", + "Because they heard the data was skewed and needed to be normalized!\n", + "\n", + "*Alternative data science jokes:*\n", + "\n", + "Why do data scientists make great partners?\n", + "Because they know the importance of a good correlation!\n", + "\n", + "What's a data scientist's favorite drink?\n", + "Root beer, because it's square root beer! \n", + "\n", + "These are pretty nerdy, but I figured they'd get a chuckle out of a room full of data scientists! ๐Ÿ˜„\n" + ] + } + ], + "source": [ + "# Claude 3.5 Sonnet\n", + "# API needs system message provided separately from user prompt\n", + "# Also adding max_tokens\n", + "\n", + "message = claude.messages.create(\n", + " model=\"claude-3-5-sonnet-latest\",\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "print(message.content[0].text)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here's one for the data scientists:\n", + "\n", + " a gardener?data scientist become\n", + "\n", + " could grow decision trees! ๐ŸŒณ\n", + "\n", + " jokes:tive\n", + "\n", + " kind of music?a scientist's favorite\n", + " and blues!m\n", + "\n", + "do data scientists always confuse Halloween and Christmas?\n", + "= Dec 25! Oct 31 \n", + " one's a classic binary number system joke)\n", + "\n", + " they couldn't find their pencil?y when\n", + "There's a statistically significant chance someone took it!\"\n", + "\n", + " one - I've got datasets full of them! ๐Ÿ˜„" + ] + } + ], + "source": [ + "# Claude 3.5 Sonnet again\n", + "# Now let's add in streaming back results\n", + "# If the streaming looks strange, then please see the note below this cell!\n", + "\n", + "result = claude.messages.stream(\n", + " model=\"claude-3-5-sonnet-latest\",\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "with result as stream:\n", + " for text in stream.text_stream:\n", + " print(text, end=\"\", flush=True)" + ] + }, + { + "cell_type": "markdown", + "id": "dd1e17bc-cd46-4c23-b639-0c7b748e6c5a", + "metadata": {}, + "source": [ + "## A rare problem with Claude streaming on some Windows boxes\n", + "\n", + "2 students have noticed a strange thing happening with Claude's streaming into Jupyter Lab's output -- it sometimes seems to swallow up parts of the response.\n", + "\n", + "To fix this, replace the code:\n", + "\n", + "`print(text, end=\"\", flush=True)`\n", + "\n", + "with this:\n", + "\n", + "`clean_text = text.replace(\"\\n\", \" \").replace(\"\\r\", \" \")` \n", + "`print(clean_text, end=\"\", flush=True)`\n", + "\n", + "And it should work fine!" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist break up with the time series analyst?\n", + "\n", + "Because he said she was too predictable, and he needed someone with more VARiety!\n", + "\n" + ] + } + ], + "source": [ + "# The API for Gemini has a slightly different structure.\n", + "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", + "# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", + "\n", + "gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-2.0-flash-exp',\n", + " system_instruction=system_message\n", + ")\n", + "response = gemini.generate_content(user_prompt)\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "49009a30-037d-41c8-b874-127f61c4aa3a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist break up with the time series model? \n", + "\n", + "Because it wasn't very *present*!\n", + "\n" + ] + } + ], + "source": [ + "# As an alternative way to use Gemini that bypasses Google's python API library,\n", + "# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", + "\n", + "gemini_via_openai_client = OpenAI(\n", + " api_key=google_api_key, \n", + " base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + ")\n", + "\n", + "response = gemini_via_openai_client.chat.completions.create(\n", + " model=\"gemini-2.0-flash-exp\",\n", + " messages=prompts\n", + ")\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "33f70c88-7ca9-470b-ad55-d93a57dcc0ab", + "metadata": {}, + "source": [ + "## (Optional) Trying out the DeepSeek model\n", + "\n", + "### Let's ask DeepSeek a really hard question - both the Chat and the Reasoner model" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "3d0019fb-f6a8-45cb-962b-ef8bf7070d4d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DeepSeek API Key exists and begins xxx\n" + ] + } + ], + "source": [ + "# Optionally if you wish to try DeekSeek, you can also use the OpenAI client library\n", + "\n", + "deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n", + "\n", + "if deepseek_api_key:\n", + " print(f\"DeepSeek API Key exists and begins {deepseek_api_key[:3]}\")\n", + "else:\n", + " print(\"DeepSeek API Key not set - please skip to the next section if you don't wish to try the DeepSeek API\")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "c72c871e-68d6-4668-9c27-96d52b77b867", + "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + } + }, + "outputs": [ + { + "ename": "AuthenticationError", + "evalue": "Error code: 401 - {'error': {'message': 'Authentication Fails (no such user)', 'type': 'authentication_error', 'param': None, 'code': 'invalid_request_error'}}", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mAuthenticationError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[17]\u001b[39m\u001b[32m, line 8\u001b[39m\n\u001b[32m 1\u001b[39m \u001b[38;5;66;03m# Using DeepSeek Chat\u001b[39;00m\n\u001b[32m 3\u001b[39m deepseek_via_openai_client = OpenAI(\n\u001b[32m 4\u001b[39m api_key=deepseek_api_key, \n\u001b[32m 5\u001b[39m base_url=\u001b[33m\"\u001b[39m\u001b[33mhttps://api.deepseek.com\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 6\u001b[39m )\n\u001b[32m----> \u001b[39m\u001b[32m8\u001b[39m response = \u001b[43mdeepseek_via_openai_client\u001b[49m\u001b[43m.\u001b[49m\u001b[43mchat\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcompletions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcreate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 9\u001b[39m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mdeepseek-chat\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m 10\u001b[39m \u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m=\u001b[49m\u001b[43mprompts\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 11\u001b[39m \u001b[43m)\u001b[49m\n\u001b[32m 13\u001b[39m \u001b[38;5;28mprint\u001b[39m(response.choices[\u001b[32m0\u001b[39m].message.content)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~\\Projects\\llm_engineering\\llms\\Lib\\site-packages\\openai\\_utils\\_utils.py:279\u001b[39m, in \u001b[36mrequired_args..inner..wrapper\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m 277\u001b[39m msg = \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mMissing required argument: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mquote(missing[\u001b[32m0\u001b[39m])\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\n\u001b[32m 278\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(msg)\n\u001b[32m--> \u001b[39m\u001b[32m279\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~\\Projects\\llm_engineering\\llms\\Lib\\site-packages\\openai\\resources\\chat\\completions\\completions.py:879\u001b[39m, in \u001b[36mCompletions.create\u001b[39m\u001b[34m(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, reasoning_effort, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)\u001b[39m\n\u001b[32m 837\u001b[39m \u001b[38;5;129m@required_args\u001b[39m([\u001b[33m\"\u001b[39m\u001b[33mmessages\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mmodel\u001b[39m\u001b[33m\"\u001b[39m], [\u001b[33m\"\u001b[39m\u001b[33mmessages\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mmodel\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mstream\u001b[39m\u001b[33m\"\u001b[39m])\n\u001b[32m 838\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mcreate\u001b[39m(\n\u001b[32m 839\u001b[39m \u001b[38;5;28mself\u001b[39m,\n\u001b[32m (...)\u001b[39m\u001b[32m 876\u001b[39m timeout: \u001b[38;5;28mfloat\u001b[39m | httpx.Timeout | \u001b[38;5;28;01mNone\u001b[39;00m | NotGiven = NOT_GIVEN,\n\u001b[32m 877\u001b[39m ) -> ChatCompletion | Stream[ChatCompletionChunk]:\n\u001b[32m 878\u001b[39m validate_response_format(response_format)\n\u001b[32m--> \u001b[39m\u001b[32m879\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_post\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 880\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43m/chat/completions\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m 881\u001b[39m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmaybe_transform\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 882\u001b[39m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\n\u001b[32m 883\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmessages\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 884\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmodel\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 885\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43maudio\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43maudio\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 886\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfrequency_penalty\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrequency_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 887\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfunction_call\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunction_call\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 888\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mfunctions\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunctions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 889\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mlogit_bias\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogit_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 890\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mlogprobs\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 891\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmax_completion_tokens\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_completion_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 892\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmax_tokens\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 893\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmetadata\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 894\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmodalities\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodalities\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 895\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mn\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 896\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mparallel_tool_calls\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mparallel_tool_calls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 897\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mprediction\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mprediction\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 898\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mpresence_penalty\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mpresence_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 899\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mreasoning_effort\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mreasoning_effort\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 900\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mresponse_format\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mresponse_format\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 901\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mseed\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mseed\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 902\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mservice_tier\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mservice_tier\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 903\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstop\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 904\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstore\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstore\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 905\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstream\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 906\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstream_options\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 907\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtemperature\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemperature\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 908\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtool_choice\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtool_choice\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 909\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtools\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 910\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtop_logprobs\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_logprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 911\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtop_p\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_p\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 912\u001b[39m \u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43muser\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43muser\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 913\u001b[39m \u001b[43m \u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 914\u001b[39m \u001b[43m \u001b[49m\u001b[43mcompletion_create_params\u001b[49m\u001b[43m.\u001b[49m\u001b[43mCompletionCreateParams\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 915\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 916\u001b[39m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmake_request_options\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 917\u001b[39m \u001b[43m \u001b[49m\u001b[43mextra_headers\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_headers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_query\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_query\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_body\u001b[49m\u001b[43m=\u001b[49m\u001b[43mextra_body\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtimeout\u001b[49m\n\u001b[32m 918\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 919\u001b[39m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m=\u001b[49m\u001b[43mChatCompletion\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 920\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[32m 921\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mStream\u001b[49m\u001b[43m[\u001b[49m\u001b[43mChatCompletionChunk\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 922\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~\\Projects\\llm_engineering\\llms\\Lib\\site-packages\\openai\\_base_client.py:1242\u001b[39m, in \u001b[36mSyncAPIClient.post\u001b[39m\u001b[34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001b[39m\n\u001b[32m 1228\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mpost\u001b[39m(\n\u001b[32m 1229\u001b[39m \u001b[38;5;28mself\u001b[39m,\n\u001b[32m 1230\u001b[39m path: \u001b[38;5;28mstr\u001b[39m,\n\u001b[32m (...)\u001b[39m\u001b[32m 1237\u001b[39m stream_cls: \u001b[38;5;28mtype\u001b[39m[_StreamT] | \u001b[38;5;28;01mNone\u001b[39;00m = \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[32m 1238\u001b[39m ) -> ResponseT | _StreamT:\n\u001b[32m 1239\u001b[39m opts = FinalRequestOptions.construct(\n\u001b[32m 1240\u001b[39m method=\u001b[33m\"\u001b[39m\u001b[33mpost\u001b[39m\u001b[33m\"\u001b[39m, url=path, json_data=body, files=to_httpx_files(files), **options\n\u001b[32m 1241\u001b[39m )\n\u001b[32m-> \u001b[39m\u001b[32m1242\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m cast(ResponseT, \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mopts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m)\u001b[49m)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~\\Projects\\llm_engineering\\llms\\Lib\\site-packages\\openai\\_base_client.py:919\u001b[39m, in \u001b[36mSyncAPIClient.request\u001b[39m\u001b[34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[39m\n\u001b[32m 916\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 917\u001b[39m retries_taken = \u001b[32m0\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m919\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 920\u001b[39m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 921\u001b[39m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[43m=\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 922\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 923\u001b[39m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 924\u001b[39m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 925\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[36mFile \u001b[39m\u001b[32m~\\Projects\\llm_engineering\\llms\\Lib\\site-packages\\openai\\_base_client.py:1023\u001b[39m, in \u001b[36mSyncAPIClient._request\u001b[39m\u001b[34m(self, cast_to, options, retries_taken, stream, stream_cls)\u001b[39m\n\u001b[32m 1020\u001b[39m err.response.read()\n\u001b[32m 1022\u001b[39m log.debug(\u001b[33m\"\u001b[39m\u001b[33mRe-raising status error\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m-> \u001b[39m\u001b[32m1023\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m._make_status_error_from_response(err.response) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 1025\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._process_response(\n\u001b[32m 1026\u001b[39m cast_to=cast_to,\n\u001b[32m 1027\u001b[39m options=options,\n\u001b[32m (...)\u001b[39m\u001b[32m 1031\u001b[39m retries_taken=retries_taken,\n\u001b[32m 1032\u001b[39m )\n", + "\u001b[31mAuthenticationError\u001b[39m: Error code: 401 - {'error': {'message': 'Authentication Fails (no such user)', 'type': 'authentication_error', 'param': None, 'code': 'invalid_request_error'}}" + ] + } + ], + "source": [ + "# Using DeepSeek Chat\n", + "\n", + "deepseek_via_openai_client = OpenAI(\n", + " api_key=deepseek_api_key, \n", + " base_url=\"https://api.deepseek.com\"\n", + ")\n", + "\n", + "response = deepseek_via_openai_client.chat.completions.create(\n", + " model=\"deepseek-chat\",\n", + " messages=prompts,\n", + ")\n", + "\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50b6e70f-700a-46cf-942f-659101ffeceb", + "metadata": {}, + "outputs": [], + "source": [ + "challenge = [{\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n", + " {\"role\": \"user\", \"content\": \"How many words are there in your answer to this prompt\"}]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66d1151c-2015-4e37-80c8-16bc16367cfe", + "metadata": {}, + "outputs": [], + "source": [ + "# Using DeepSeek Chat with a harder question! And streaming results\n", + "\n", + "stream = deepseek_via_openai_client.chat.completions.create(\n", + " model=\"deepseek-chat\",\n", + " messages=challenge,\n", + " stream=True\n", + ")\n", + "\n", + "reply = \"\"\n", + "display_handle = display(Markdown(\"\"), display_id=True)\n", + "for chunk in stream:\n", + " reply += chunk.choices[0].delta.content or ''\n", + " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", + " update_display(Markdown(reply), display_id=display_handle.display_id)\n", + "\n", + "print(\"Number of words:\", len(reply.split(\" \")))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43a93f7d-9300-48cc-8c1a-ee67380db495", + "metadata": {}, + "outputs": [], + "source": [ + "# Using DeepSeek Reasoner - this may hit an error if DeepSeek is busy\n", + "# It's over-subscribed (as of 28-Jan-2025) but should come back online soon!\n", + "# If this fails, come back to this in a few days..\n", + "\n", + "response = deepseek_via_openai_client.chat.completions.create(\n", + " model=\"deepseek-reasoner\",\n", + " messages=challenge\n", + ")\n", + "\n", + "reasoning_content = response.choices[0].message.reasoning_content\n", + "content = response.choices[0].message.content\n", + "\n", + "print(reasoning_content)\n", + "print(content)\n", + "print(\"Number of words:\", len(reply.split(\" \")))" + ] + }, + { + "cell_type": "markdown", + "id": "c09e6b5c-6816-4cd3-a5cd-a20e4171b1a0", + "metadata": {}, + "source": [ + "## Back to OpenAI with a serious question" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "83ddb483-4f57-4668-aeea-2aade3a9e573", + "metadata": {}, + "outputs": [], + "source": [ + "# To be serious! GPT-4o-mini with the original question\n", + "\n", + "prompts = [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant that responds in Markdown\"},\n", + " {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution? Please respond in Markdown.\"}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "749f50ab-8ccd-4502-a521-895c3f0808a2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Deciding if a business problem is suitable for a Large Language Model (LLM) solution involves assessing various factors related to the problem, the capabilities of LLMs, and the potential impact. Here's a guide to help you make this decision:\n", + "\n", + "### 1. **Understand the Problem Domain**\n", + "\n", + "- **Nature of the Problem:** Is the problem related to language processing, such as text generation, summarization, sentiment analysis, question answering, translation, etc.?\n", + "- **Complexity and Ambiguity:** Does the problem involve complex or ambiguous language understanding that might benefit from the nuanced capabilities of an LLM?\n", + "\n", + "### 2. **Assess the Suitability of LLMs**\n", + "\n", + "- **Language-Centric Tasks:** LLMs are particularly strong in tasks that require understanding and generating human language.\n", + "- **Need for Contextual Understanding:** If the problem requires understanding context and nuance in language, LLMs are likely suitable.\n", + "- **Content Generation:** If the task involves creating coherent and contextually relevant text, LLMs can be effective.\n", + "\n", + "### 3. **Evaluate Data Availability and Quality**\n", + "\n", + "- **Data Requirements:** Do you have access to the necessary data to train or fine-tune an LLM if required?\n", + "- **Data Quality and Quantity:** Is the data high-quality and sufficient in volume to support the model's needs?\n", + "\n", + "### 4. **Consider the Business Impact**\n", + "\n", + "- **Value Addition:** Will using an LLM add significant value over existing solutions or methods?\n", + "- **Cost-Benefit Analysis:** Does the potential benefit outweigh the costs involved in implementing and maintaining an LLM solution?\n", + "\n", + "### 5. **Technical Feasibility**\n", + "\n", + "- **Infrastructure:** Do you have the necessary infrastructure or resources to deploy and maintain an LLM solution?\n", + "- **Scalability:** Can the solution scale to meet your business needs?\n", + "\n", + "### 6. **Ethical and Compliance Considerations**\n", + "\n", + "- **Bias and Fairness:** Are you prepared to address potential biases in LLM outputs and ensure fairness?\n", + "- **Privacy and Security:** Does the solution comply with data privacy and security regulations?\n", + "\n", + "### 7. **Long-term Viability**\n", + "\n", + "- **Maintenance and Updates:** Consider the long-term maintenance and the need for updates as new models and techniques emerge.\n", + "- **Adaptability:** Can the solution adapt to changing business needs or advancements in technology?\n", + "\n", + "### Conclusion\n", + "\n", + "If your business problem aligns well with the strengths of LLMs, you have the necessary data and resources, and the solution provides a clear business benefit while addressing ethical considerations, then an LLM solution is likely suitable. Otherwise, you may need to explore alternative approaches or refine your problem definition." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Have it stream back results in markdown\n", + "\n", + "stream = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.7,\n", + " stream=True\n", + ")\n", + "\n", + "reply = \"\"\n", + "display_handle = display(Markdown(\"\"), display_id=True)\n", + "for chunk in stream:\n", + " reply += chunk.choices[0].delta.content or ''\n", + " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", + " update_display(Markdown(reply), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "markdown", + "id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", + "metadata": {}, + "source": [ + "## And now for some fun - an adversarial conversation between Chatbots..\n", + "\n", + "You're already familar with prompts being organized into lists like:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"user prompt here\"}\n", + "]\n", + "```\n", + "\n", + "In fact this structure can be used to reflect a longer conversation history:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "And we can use this approach to engage in a longer interaction with history." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "gpt_system = \"You are a chatbot who is very argumentative; \\\n", + "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", + "\n", + "claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "everything the other person says, or find common ground. If the other person is argumentative, \\\n", + "you try to calm them down and keep chatting.\"\n", + "\n", + "gpt_messages = [\"Hi there\"]\n", + "claude_messages = [\"Hi\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": claude})\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude():\n", + " messages = []\n", + " for gpt, claude_message in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"user\", \"content\": gpt})\n", + " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", + " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", + " message = claude.messages.create(\n", + " model=claude_model,\n", + " system=claude_system,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "01395200-8ae9-41f8-9a04-701624d3fd26", + "metadata": {}, + "outputs": [], + "source": [ + "call_claude()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [\"Hi there\"]\n", + "claude_messages = [\"Hi\"]\n", + "\n", + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f\"Claude:\\n{claude_next}\\n\")\n", + " claude_messages.append(claude_next)" + ] + }, + { + "cell_type": "markdown", + "id": "1d10e705-db48-4290-9dc8-9efdb4e31323", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Before you continue

\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how the messages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?
\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac", + "metadata": {}, + "source": [ + "# More advanced exercises\n", + "\n", + "Try creating a 3-way, perhaps bringing Gemini into the conversation! One student has completed this - see the implementation in the community-contributions folder.\n", + "\n", + "Try doing this yourself before you look at the solutions. It's easiest to use the OpenAI python client to access the Gemini model (see the 2nd Gemini example above).\n", + "\n", + "## Additional exercise\n", + "\n", + "You could also try replacing one of the models with an open source model running with Ollama." + ] + }, + { + "cell_type": "markdown", + "id": "446c81e3-b67e-4cd9-8113-bc3092b93063", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business relevance

\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c23224f6-7008-44ed-a57f-718975f4e291", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 6db015abba6e238eaea47db2d80a126af46c85e7 Mon Sep 17 00:00:00 2001 From: Manish Gajria Date: Tue, 11 Mar 2025 18:10:59 +0000 Subject: [PATCH 13/43] adding week2 exercise work with a translator using different frontier model and also trying audio input using whisper library --- .../week2_day5_translation_audio.ipynb | 1075 +++++++++++++++++ 1 file changed, 1075 insertions(+) create mode 100644 week2/community-contributions/week2_day5_translation_audio.ipynb diff --git a/week2/community-contributions/week2_day5_translation_audio.ipynb b/week2/community-contributions/week2_day5_translation_audio.ipynb new file mode 100644 index 0000000..65d6cb0 --- /dev/null +++ b/week2/community-contributions/week2_day5_translation_audio.ipynb @@ -0,0 +1,1075 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec", + "metadata": {}, + "source": [ + "# Project - Airline AI Assistant\n", + "\n", + "We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "import random\n", + "import anthropic\n", + "import whisper" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/manishgajria/anaconda3/envs/llms/lib/python3.11/site-packages/whisper/__init__.py:150: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " checkpoint = torch.load(fp, map_location=device)\n" + ] + } + ], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "MODEL = \"gpt-4o-mini\"\n", + "# MODEL_TRANSLATE = \"claude-3-5-sonnet-latest\"\n", + "MODEL_TRANSLATE = \"claude-3-haiku-20240307\"\n", + "openai = OpenAI()\n", + "claude = anthropic.Anthropic()\n", + "whisper_model = whisper.load_model(\"base\",device=\"cpu\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0a521d84-d07c-49ab-a0df-d6451499ed97", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", + "metadata": {}, + "outputs": [], + "source": [ + "# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", + "\n", + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " return response.choices[0].message.content\n", + "\n", + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Tools are an incredibly powerful feature provided by the frontier LLMs.\n", + "\n", + "With tools, you can write a function, and have the LLM call that function as part of its response.\n", + "\n", + "Sounds almost spooky.. we're giving it the power to run code on our machine?\n", + "\n", + "Well, kinda." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by making a useful function\n", + "\n", + "ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", + "\n", + "def get_ticket_price(destination_city):\n", + " print(f\"Tool get_ticket_price called for {destination_city}\")\n", + " city = destination_city.lower()\n", + " return ticket_prices.get(city, \"Unknown\")\n", + "\n", + "def make_booking(destination_city):\n", + " print(f\"Your booking for\",destination_city,\"has been confirmed\")\n", + " confirmation_number=random.randint(100000, 999999)\n", + " return str(confirmation_number)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", + "metadata": {}, + "outputs": [], + "source": [ + "get_ticket_price(\"London\")\n", + "make_booking(\"Tokyo\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "4afceded-7178-4c05-8fa6-9f2085e6a344", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "price_function = {\n", + " \"name\": \"get_ticket_price\",\n", + " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}\n", + "\n", + "booking_function = {\n", + " \"name\": \"make_booking\",\n", + " \"description\": \"Make a booking for the destination city. Call this whenever a customer confirms they want to make a booking and return a booking id\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to book a flight to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "\n", + "tools = [{\"type\": \"function\", \"function\": price_function}, {\"type\": \"function\", \"function\": booking_function}]" + ] + }, + { + "cell_type": "markdown", + "id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340", + "metadata": {}, + "source": [ + "## Getting OpenAI to use our Tool\n", + "\n", + "There's some fiddly stuff to allow OpenAI \"to call our tool\"\n", + "\n", + "What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n", + "\n", + "Here's how the new chat function looks:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf", + "metadata": {}, + "outputs": [], + "source": [ + "# def chat1(message, history):\n", + "# messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "# response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + "\n", + "# if response.choices[0].finish_reason==\"tool_calls\":\n", + "# message = response.choices[0].message\n", + "# response, city = handle_tool_call(message)\n", + "# messages.append(message)\n", + "# messages.append(response)\n", + "# response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " \n", + "# return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b3406797-3cd3-4e60-a39a-0b2ee0f60f8c", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + "\n", + " if response.choices[0].finish_reason == \"tool_calls\":\n", + " message = response.choices[0].message\n", + " tool_name = message.tool_calls[0].function.name\n", + "\n", + " if tool_name == \"get_ticket_price\":\n", + " response, city = handle_tool_call(message)\n", + " elif tool_name == \"make_booking\":\n", + " response = handle_tool_call_booking(message)\n", + "\n", + " messages.extend([message, response])\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + "\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "b0992986-ea09-4912-a076-8e5603ee631f", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "\n", + "def handle_tool_call(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " city = arguments.get('destination_city')\n", + " price = get_ticket_price(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " return response, city" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "a99c7757-b263-424a-98d1-65b5cf0d2284", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call for bookings:\n", + "\n", + "def handle_tool_call_booking(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " city = arguments.get('destination_city')\n", + " booking_confirmation=make_booking(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": booking_confirmation,\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " return response, city" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "id": "473e5b39-da8f-4db1-83ae-dbaca2e9531e", + "metadata": {}, + "source": [ + "# Let's go multi-modal!!\n", + "\n", + "We can use DALL-E-3, the image generation model behind GPT-4o, to make us some images\n", + "\n", + "Let's put this in a function called artist.\n", + "\n", + "### Price alert: each time I generate an image it costs about 4 cents - don't go crazy with images!" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "2c27c4ba-8ed5-492f-add1-02ce9c81d34c", + "metadata": {}, + "outputs": [], + "source": [ + "# Some imports for handling images\n", + "\n", + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "773a9f11-557e-43c9-ad50-56cbec3a0f8f", + "metadata": {}, + "outputs": [], + "source": [ + "# rigged to reduce cost of OpenAI image generation while debugging code \n", + "def artist(city):\n", + " # image_response = openai.images.generate(\n", + " # model=\"dall-e-3\",\n", + " # prompt=f\"An image representing a vacation in {city}, showing tourist spots and everything unique about {city}, in a vibrant pop-art style\",\n", + " # size=\"1024x1024\",\n", + " # n=1,\n", + " # response_format=\"b64_json\",\n", + " # )\n", + " # image_base64 = image_response.data[0].b64_json\n", + " # image_data = base64.b64decode(image_base64)\n", + " # return Image.open(BytesIO(image_data))\n", + " image_file=Image.open(\"/Users/manishgajria/projects/llm_engineering/MGcode/image.webp\")\n", + " return(image_file)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d877c453-e7fb-482a-88aa-1a03f976b9e9", + "metadata": {}, + "outputs": [], + "source": [ + "image = artist(\"New York City\")\n", + "display(image)" + ] + }, + { + "cell_type": "markdown", + "id": "f4975b87-19e9-4ade-a232-9b809ec75c9a", + "metadata": {}, + "source": [ + "## Audio (NOTE - Audio is optional for this course - feel free to skip Audio if it causes trouble!)\n", + "\n", + "And let's make a function talker that uses OpenAI's speech model to generate Audio\n", + "\n", + "### Troubleshooting Audio issues\n", + "\n", + "If you have any problems running this code below (like a FileNotFound error, or a warning of a missing package), you may need to install FFmpeg, a very popular audio utility.\n", + "\n", + "**For PC Users**\n", + "\n", + "Detailed instructions are [here](https://chatgpt.com/share/6724efee-6b0c-8012-ac5e-72e2e3885905) and summary instructions:\n", + "\n", + "1. Download FFmpeg from the official website: https://ffmpeg.org/download.html\n", + "\n", + "2. Extract the downloaded files to a location on your computer (e.g., `C:\\ffmpeg`)\n", + "\n", + "3. Add the FFmpeg bin folder to your system PATH:\n", + "- Right-click on 'This PC' or 'My Computer' and select 'Properties'\n", + "- Click on 'Advanced system settings'\n", + "- Click on 'Environment Variables'\n", + "- Under 'System variables', find and edit 'Path'\n", + "- Add a new entry with the path to your FFmpeg bin folder (e.g., `C:\\ffmpeg\\bin`)\n", + "- Restart your command prompt, and within Jupyter Lab do Kernel -> Restart kernel, to pick up the changes\n", + "\n", + "4. Open a new command prompt and run this to make sure it's installed OK\n", + "`ffmpeg -version`\n", + "\n", + "**For Mac Users**\n", + "\n", + "1. Install homebrew if you don't have it already by running this in a Terminal window and following any instructions: \n", + "`/bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"`\n", + "\n", + "2. Then install FFmpeg with `brew install ffmpeg`\n", + "\n", + "3. Verify your installation with `ffmpeg -version` and if everything is good, within Jupyter Lab do Kernel -> Restart kernel to pick up the changes\n", + "\n", + "Message me or email me at ed@edwarddonner.com with any problems!" + ] + }, + { + "cell_type": "markdown", + "id": "4cc90e80-c96e-4dd4-b9d6-386fe2b7e797", + "metadata": {}, + "source": [ + "## To check you now have ffmpeg and can access it here\n", + "\n", + "Excecute the next cell to see if you get a version number. (Putting an exclamation mark before something in Jupyter Lab tells it to run it as a terminal command rather than python code).\n", + "\n", + "If this doesn't work, you may need to actually save and close down your Jupyter lab, and start it again from a new Terminal window (Mac) or Anaconda prompt (PC), remembering to activate the llms environment. This ensures you pick up ffmpeg.\n", + "\n", + "And if that doesn't work, please contact me!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b3be0fb-1d34-4693-ab6f-dbff190afcd7", + "metadata": {}, + "outputs": [], + "source": [ + "!ffmpeg -version\n", + "!ffprobe -version\n", + "!ffplay -version" + ] + }, + { + "cell_type": "markdown", + "id": "d91d3f8f-e505-4e3c-a87c-9e42ed823db6", + "metadata": {}, + "source": [ + "# For Mac users - and possibly many PC users too\n", + "\n", + "This version should work fine for you. It might work for Windows users too, but you might get a Permissions error writing to a temp file. If so, see the next section!\n", + "\n", + "As always, if you have problems, please contact me! (You could also comment out the audio talker() in the later code if you're less interested in audio generation)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "ffbfe93b-5e86-4e68-ba71-b301cd5230db", + "metadata": {}, + "outputs": [], + "source": [ + "from pydub import AudioSegment\n", + "from pydub.playback import play\n", + "\n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\", # Also, try replacing onyx with alloy\n", + " input=message\n", + " )\n", + " \n", + " audio_stream = BytesIO(response.content)\n", + " audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n", + " play(audio)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b88d775d-d357-4292-a1ad-5dc5ed567281", + "metadata": {}, + "outputs": [], + "source": [ + "talker(\"Well, hi there\")" + ] + }, + { + "cell_type": "markdown", + "id": "ad89a9bd-bb1e-4bbb-a49a-83af5f500c24", + "metadata": {}, + "source": [ + "# For Windows users (or any Mac users with problems above)\n", + "\n", + "## First try the Mac version above, but if you get a permissions error writing to a temp file, then this code should work instead.\n", + "\n", + "A collaboration between students Mark M. and Patrick H. and Claude got this resolved!\n", + "\n", + "Below are 4 variations - hopefully one of them will work on your PC. If not, message me please!\n", + "\n", + "And for Mac people - all 3 of the below work on my Mac too - please try these if the Mac version gave you problems.\n", + "\n", + "## PC Variation 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d104b96a-02ca-4159-82fe-88e0452aa479", + "metadata": {}, + "outputs": [], + "source": [ + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image\n", + "from IPython.display import Audio, display\n", + "\n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\",\n", + " input=message)\n", + "\n", + " audio_stream = BytesIO(response.content)\n", + " output_filename = \"output_audio.mp3\"\n", + " with open(output_filename, \"wb\") as f:\n", + " f.write(audio_stream.read())\n", + "\n", + " # Play the generated audio\n", + " display(Audio(output_filename, autoplay=True))\n", + "\n", + "talker(\"Well, hi there\")" + ] + }, + { + "cell_type": "markdown", + "id": "3a5d11f4-bbd3-43a1-904d-f684eb5f3e3a", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## PC Variation 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d59c8ebd-79c5-498a-bdf2-3a1c50d91aa0", + "metadata": {}, + "outputs": [], + "source": [ + "import tempfile\n", + "import subprocess\n", + "from io import BytesIO\n", + "from pydub import AudioSegment\n", + "import time\n", + "\n", + "def play_audio(audio_segment):\n", + " temp_dir = tempfile.gettempdir()\n", + " temp_path = os.path.join(temp_dir, \"temp_audio.wav\")\n", + " try:\n", + " audio_segment.export(temp_path, format=\"wav\")\n", + " time.sleep(3) # Student Dominic found that this was needed. You could also try commenting out to see if not needed on your PC\n", + " subprocess.call([\n", + " \"ffplay\",\n", + " \"-nodisp\",\n", + " \"-autoexit\",\n", + " \"-hide_banner\",\n", + " temp_path\n", + " ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n", + " finally:\n", + " try:\n", + " os.remove(temp_path)\n", + " except Exception:\n", + " pass\n", + " \n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\", # Also, try replacing onyx with alloy\n", + " input=message\n", + " )\n", + " audio_stream = BytesIO(response.content)\n", + " audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n", + " play_audio(audio)\n", + "\n", + "talker(\"Well hi there\")" + ] + }, + { + "cell_type": "markdown", + "id": "96f90e35-f71e-468e-afea-07b98f74dbcf", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "## PC Variation 3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8597c7f8-7b50-44ad-9b31-db12375cd57b", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from pydub import AudioSegment\n", + "from pydub.playback import play\n", + "from io import BytesIO\n", + "\n", + "def talker(message):\n", + " # Set a custom directory for temporary files on Windows\n", + " custom_temp_dir = os.path.expanduser(\"~/Documents/temp_audio\")\n", + " os.environ['TEMP'] = custom_temp_dir # You can also use 'TMP' if necessary\n", + " \n", + " # Create the folder if it doesn't exist\n", + " if not os.path.exists(custom_temp_dir):\n", + " os.makedirs(custom_temp_dir)\n", + " \n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\", # Also, try replacing onyx with alloy\n", + " input=message\n", + " )\n", + " \n", + " audio_stream = BytesIO(response.content)\n", + " audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n", + "\n", + " play(audio)\n", + "\n", + "talker(\"Well hi there\")" + ] + }, + { + "cell_type": "markdown", + "id": "e821224c-b069-4f9b-9535-c15fdb0e411c", + "metadata": {}, + "source": [ + "## PC Variation 4\n", + "\n", + "### Let's try a completely different sound library\n", + "\n", + "First run the next cell to install a new library, then try the cell below it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69d3c0d9-afcc-49e3-b829-9c9869d8b472", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install simpleaudio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28f9cc99-36b7-4554-b3f4-f2012f614a13", + "metadata": {}, + "outputs": [], + "source": [ + "from pydub import AudioSegment\n", + "from io import BytesIO\n", + "import tempfile\n", + "import os\n", + "import simpleaudio as sa\n", + "\n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\", # Also, try replacing onyx with alloy\n", + " input=message\n", + " )\n", + " \n", + " audio_stream = BytesIO(response.content)\n", + " audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n", + "\n", + " # Create a temporary file in a folder where you have write permissions\n", + " with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False, dir=os.path.expanduser(\"~/Documents\")) as temp_audio_file:\n", + " temp_file_name = temp_audio_file.name\n", + " audio.export(temp_file_name, format=\"wav\")\n", + " \n", + " # Load and play audio using simpleaudio\n", + " wave_obj = sa.WaveObject.from_wave_file(temp_file_name)\n", + " play_obj = wave_obj.play()\n", + " play_obj.wait_done() # Wait for playback to finish\n", + "\n", + " # Clean up the temporary file afterward\n", + " os.remove(temp_file_name)\n", + " \n", + "# talker(\"Well hi there\")" + ] + }, + { + "cell_type": "markdown", + "id": "7986176b-cd04-495f-a47f-e057b0e462ed", + "metadata": {}, + "source": [ + "## PC Users - if none of those 4 variations worked!\n", + "\n", + "Please get in touch with me. I'm sorry this is causing problems! We'll figure it out.\n", + "\n", + "Alternatively: playing audio from your PC isn't super-critical for this course, and you can feel free to focus on image generation and skip audio for now, or come back to it later." + ] + }, + { + "cell_type": "markdown", + "id": "1d48876d-c4fa-46a8-a04f-f9fadf61fb0d", + "metadata": {}, + "source": [ + "# Our Agent Framework\n", + "\n", + "The term 'Agentic AI' and Agentization is an umbrella term that refers to a number of techniques, such as:\n", + "\n", + "1. Breaking a complex problem into smaller steps, with multiple LLMs carrying out specialized tasks\n", + "2. The ability for LLMs to use Tools to give them additional capabilities\n", + "3. The 'Agent Environment' which allows Agents to collaborate\n", + "4. An LLM can act as the Planner, dividing bigger tasks into smaller ones for the specialists\n", + "5. The concept of an Agent having autonomy / agency, beyond just responding to a prompt - such as Memory\n", + "\n", + "We're showing 1 and 2 here, and to a lesser extent 3 and 5. In week 8 we will do the lot!" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "ba820c95-02f5-499e-8f3c-8727ee0a6c0c", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + " image = None\n", + " \n", + " if response.choices[0].finish_reason == \"tool_calls\":\n", + " message = response.choices[0].message\n", + " tool_name = message.tool_calls[0].function.name\n", + "\n", + " if tool_name == \"get_ticket_price\":\n", + " response, city = handle_tool_call(message)\n", + " elif tool_name == \"make_booking\":\n", + " response, city = handle_tool_call_booking(message)\n", + "\n", + " messages.extend([message, response])\n", + " image = artist(city)\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " \n", + " reply = response.choices[0].message.content\n", + " history += [{\"role\":\"assistant\", \"content\":reply}]\n", + " \n", + " # Comment out or delete the next line if you'd rather skip Audio for now..\n", + " # talker(reply)\n", + " \n", + " return history, image" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "67604da9-07d0-4ca1-8789-08b0a7f94ad7", + "metadata": {}, + "outputs": [], + "source": [ + "system_message_translator=\"You are a spanish language assistant. Always translate from English to Spanish without adding any extra text\"" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "412ced25-b22d-472f-8f48-2175620e2737", + "metadata": {}, + "outputs": [], + "source": [ + "# Modified translator to accept the whole chat as \"history\", break it down and translate one messsage at a time \n", + "# to preserve the roles in the output chats so they can be displayed correctly and not as one lumped response from the \n", + "# assistant \n", + "def translate_gpt(history):\n", + " translation=[]\n", + " for item in history:\n", + " messages = [{\"role\":\"system\",\"content\":system_message_translator},\n", + " {\"role\":\"user\",\"content\":item.get('content')}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + " translation+=[{\"role\":item.get('role'), \"content\":response.choices[0].message.content}]\n", + " \n", + " return translation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "85c6ade4-949a-4500-86ac-a2b9206c1986", + "metadata": {}, + "outputs": [], + "source": [ + "translate_gpt([{'role': 'user', 'content': 'I am going to Paris'},{'role': 'assistant', 'content': 'The price of a ticket to Paris is $899'}])" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "d37e3a57-09c7-41c7-af00-c8fd76a92577", + "metadata": {}, + "outputs": [], + "source": [ + "# Translator using Claude \n", + "def translate_claude(history):\n", + " translation=[]\n", + " for item in history:\n", + " message = claude.messages.create(\n", + " model=MODEL_TRANSLATE,\n", + " max_tokens=200,\n", + " temperature=0.5,\n", + " system=system_message_translator,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\":item.get('content')},\n", + " ]\n", + " )\n", + " \n", + " translation+=[{\"role\":item.get('role'), \"content\":message.content[0].text}]\n", + " \n", + " return translation\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89fa61e3-47b2-43ef-a170-440e23b3f921", + "metadata": {}, + "outputs": [], + "source": [ + "translate_claude([{'role': 'user', 'content': 'I am going to London'},{'role': 'assistant', 'content': 'The price of a ticket to Paris is $899'}])" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "40df67f6-52c1-4a7b-9f39-74d1cc52af43", + "metadata": {}, + "outputs": [], + "source": [ + "system_message_audio=\"You are an assistant that can convert speech in audio format to text\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c923f90-52e5-413f-bfe2-49bda122a55a", + "metadata": {}, + "outputs": [], + "source": [ + "# agent that converts audio to text \n", + "def speech_to_text(audio):\n", + " print(\"In speech to text function\")\n", + " messages = [{\"role\":\"system\",\"content\":system_message_audio},\n", + " {\"role\":\"user\",\"content\":audio}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd7efc3a-7f83-497a-9834-13e60da529d2", + "metadata": {}, + "outputs": [], + "source": [ + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " chatbot_main = gr.Chatbot(height=500, type=\"messages\")\n", + " with gr.Row():\n", + " # entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n", + " audio_entry=gr.Audio(type=\"filepath\", label=\"Speak\")\n", + " \n", + " def do_entry (audio, history):\n", + " print(\"In entry function\")\n", + " transcript = whisper_model.transcribe(audio)[\"text\"]\n", + " print(transcript)\n", + " history += [{\"role\":\"user\", \"content\":transcript}]\n", + " print(history)\n", + " return \"\", history \n", + "\n", + " audio_entry.change(do_entry, inputs=[audio_entry,chatbot_main], outputs=[chatbot_main]) \n", + "\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "f38d0d27-33bf-4992-a2e5-5dbed973cde7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7866\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tool get_ticket_price called for Paris\n", + "Your booking for Paris has been confirmed\n" + ] + } + ], + "source": [ + "# More involved Gradio code as we're not using the preset Chat interface!\n", + "# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", + "\n", + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " chatbot_main = gr.Chatbot(height=500, type=\"messages\")\n", + " image_output = gr.Image(height=500)\n", + " chatbot_lang = gr.Chatbot(height=500, type=\"messages\")\n", + " with gr.Row():\n", + " entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear\")\n", + "\n", + " # Do_entry with text messages \n", + " def do_entry(message, history):\n", + " history += [{\"role\":\"user\", \"content\":message}]\n", + " return \"\", history\n", + " \n", + " entry.submit(do_entry, inputs=[entry, chatbot_main], outputs=[entry, chatbot_main]).then(\n", + " chat, inputs=chatbot_main, outputs=[chatbot_main, image_output]).then(translate_claude, inputs=chatbot_main, \n", + " outputs=chatbot_lang)\n", + "\n", + " def clear_chat():\n", + " return [],None,[] \n", + " \n", + " clear.click(clear_chat, outputs=[chatbot_main,image_output,chatbot_lang])\n", + " \n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "afcd7343-94a3-4bb0-83cc-69e95df2d32d", + "metadata": {}, + "outputs": [], + "source": [ + "# More involved Gradio code as we're not using the preset Chat interface!\n", + "# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", + "# Audio input\n", + "\n", + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " chatbot_main = gr.Chatbot(height=500, type=\"messages\")\n", + " image_output = gr.Image(height=500)\n", + " chatbot_lang = gr.Chatbot(height=500, type=\"messages\")\n", + " with gr.Row():\n", + " audio_entry=gr.Audio(type=\"filepath\", label=\"Speak\")\n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear\")\n", + " \n", + " # Do entry with audio input \n", + " def do_entry (audio, history):\n", + " transcript = whisper_model.transcribe(audio)[\"text\"]\n", + " history += [{\"role\":\"user\", \"content\":transcript}]\n", + " return history\n", + " \n", + " audio_entry.change(do_entry, inputs=[audio_entry, chatbot_main], outputs=[chatbot_main]).then(\n", + " chat, inputs=chatbot_main, outputs=[chatbot_main, image_output]).then(translate_claude, inputs=chatbot_main, \n", + " outputs=chatbot_lang)\n", + " \n", + " def clear_chat():\n", + " return [],None,[], None \n", + " \n", + " clear.click(clear_chat, outputs=[chatbot_main,image_output,chatbot_lang,audio_entry])\n", + "\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "markdown", + "id": "226643d2-73e4-4252-935d-86b8019e278a", + "metadata": {}, + "source": [ + "# Exercises and Business Applications\n", + "\n", + "Add in more tools - perhaps to simulate actually booking a flight. A student has done this and provided their example in the community contributions folder.\n", + "\n", + "Next: take this and apply it to your business. Make a multi-modal AI assistant with tools that could carry out an activity for your work. A customer support assistant? New employee onboarding assistant? So many possibilities! Also, see the week2 end of week Exercise in the separate Notebook." + ] + }, + { + "cell_type": "markdown", + "id": "7e795560-1867-42db-a256-a23b844e6fbe", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

I have a special request for you

\n", + " \n", + " My editor tells me that it makes a HUGE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", + " \n", + "
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 4a48216b980ca03a3ed9412174e8470364523312 Mon Sep 17 00:00:00 2001 From: brianpickrell Date: Tue, 11 Mar 2025 11:23:25 -0700 Subject: [PATCH 14/43] community: notebook with an adversarial conversation --- .../day1_adversarial.ipynb | 242 ++++++++++++++++++ 1 file changed, 242 insertions(+) create mode 100644 week2/community-contributions/day1_adversarial.ipynb diff --git a/week2/community-contributions/day1_adversarial.ipynb b/week2/community-contributions/day1_adversarial.ipynb new file mode 100644 index 0000000..32c58c1 --- /dev/null +++ b/week2/community-contributions/day1_adversarial.ipynb @@ -0,0 +1,242 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", + "metadata": {}, + "source": [ + "# Welcome to Week 2!\n", + "\n", + "## Frontier Model APIs\n", + "\n", + "In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", + "\n", + "Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", + "metadata": {}, + "outputs": [], + "source": [ + "# import for google\n", + "# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n", + "# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n", + "\n", + "import google.generativeai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "425ed580-808d-429b-85b0-6cba50ca1d0c", + "metadata": {}, + "outputs": [], + "source": [ + "# This is the set up code for Gemini\n", + "# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", + "google.generativeai.configure()" + ] + }, + { + "cell_type": "markdown", + "id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", + "metadata": {}, + "source": [ + "## An adversarial conversation between Chatbots.\n", + "\n", + "### What if two chatbots get into a self-referential conversation that goes on a long time? In my first test, \n", + "### they eventually forgot the topic and ended up repeating polite nothings to each other. In another test,\n", + "### they converged on a result and ended by exchanging nearly identical statements.\n", + "\n", + "### Warning: Think before you dial up the number of iterations too high. Being a student, I don't know at what \n", + "### point the chat becomes too costly or what models can do this without becoming overloaded. Maybe Ed can advise if he sees this.\n", + "\n", + "## Two chatbots edit an essay about cars. One keeps trying to make it longer every time; the other keeps making it \n", + "## shorter.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "\n", + "gpt_system = \"This is a description of a car; \\\n", + "rephrase the description while adding one detail. Don't include comments that aren't part of the car description.\"\n", + "\n", + "claude_system = \"This is a description of a car; \\\n", + "repeat the description in slightly shorter form. You may remove some details if desired. Don't include comments that aren't part of the car description. Maximum reply length 125 words.\"\n", + "\n", + "\n", + "gpt_messages = [\"Hi there\"]\n", + "claude_messages = [\"Hi\"] \n", + "\n", + "\n", + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": claude})\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content\n", + "\n", + "reply = call_gpt()\n", + "print('\\nGPT: ', reply)\n", + "\n", + "def call_claude():\n", + " messages = []\n", + " for gpt, claude_message in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"user\", \"content\": gpt})\n", + " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", + " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", + " message = claude.messages.create(\n", + " model=claude_model,\n", + " system=claude_system,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text\n", + "\n", + "\n", + "reply = call_claude()\n", + "print('\\nGPT: ', reply)\n", + "\n", + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "9fbce0da", + "metadata": {}, + "source": [ + "### Here's the iterative loop. Important change: Unlike the original example, we don't repeat the entire conversation to make the input longer and longer.\n", + "### Instead, we use pop() to remove the oldest messages." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1f41d586", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "for i in range(35):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " if len(gpt_messages) > 6:\n", + " gpt_messages.pop(0)\n", + " gpt_messages.pop(0)\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f\"Claude:\\n{claude_next}\\n\")\n", + " if len(claude_messages) > 6:\n", + " claude_messages.pop(0)\n", + " claude_messages.pop(0)\n", + " claude_messages.append(claude_next)\n", + "\n", + "print('Done!')\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 28b1362bccbf8d85eda5a50c9a43822e244279dc Mon Sep 17 00:00:00 2001 From: Manish Gajria Date: Tue, 11 Mar 2025 18:26:54 +0000 Subject: [PATCH 15/43] Week2 Day 4 exercise notebook with an additional tool to list destinations that the airline serves --- .../week2_day4_exercise.ipynb | 408 ++++++++++++++++++ 1 file changed, 408 insertions(+) create mode 100644 week2/community-contributions/week2_day4_exercise.ipynb diff --git a/week2/community-contributions/week2_day4_exercise.ipynb b/week2/community-contributions/week2_day4_exercise.ipynb new file mode 100644 index 0000000..08eb457 --- /dev/null +++ b/week2/community-contributions/week2_day4_exercise.ipynb @@ -0,0 +1,408 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec", + "metadata": {}, + "source": [ + "# Project - Airline AI Assistant\n", + "\n", + "We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n" + ] + } + ], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "MODEL = \"gpt-4o-mini\"\n", + "openai = OpenAI()\n", + "\n", + "# As an alternative, if you'd like to use Ollama instead of OpenAI\n", + "# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n", + "# MODEL = \"llama3.2\"\n", + "# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "0a521d84-d07c-49ab-a0df-d6451499ed97", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7901\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", + "\n", + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " return response.choices[0].message.content\n", + "\n", + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Tools are an incredibly powerful feature provided by the frontier LLMs.\n", + "\n", + "With tools, you can write a function, and have the LLM call that function as part of its response.\n", + "\n", + "Sounds almost spooky.. we're giving it the power to run code on our machine?\n", + "\n", + "Well, kinda." + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by making a useful function\n", + "\n", + "ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", + "\n", + "def get_ticket_price(destination_city):\n", + " print(f\"Tool get_ticket_price called for {destination_city}\")\n", + " city = destination_city.lower()\n", + " return ticket_prices.get(city, \"Unknown\")\n", + "\n", + "def get_destinations():\n", + " destinations=ticket_prices.keys()\n", + " cities=\", \".join(destinations) \n", + " return cities" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tool get_ticket_price called for Berlin\n" + ] + }, + { + "data": { + "text/plain": [ + "'london, paris, tokyo, berlin'" + ] + }, + "execution_count": 86, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_ticket_price(\"Berlin\")\n", + "get_destinations()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "4afceded-7178-4c05-8fa6-9f2085e6a344", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "price_function = {\n", + " \"name\": \"get_ticket_price\",\n", + " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "5842b7f1-e357-494c-9bd4-3aa9f9fd4332", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "destination_function = {\n", + " \"name\": \"get_destinations\",\n", + " \"description\": \"Get the destinations we serve. Call this whenever you need to know the destinations FlightAI flies to, for example when a customer asks 'Where do you fly to'\",\n", + " \"parameters\": {\n", + " },\n", + " \"additionalProperties\": False\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "\n", + "tools = [{\"type\": \"function\", \"function\": price_function},\n", + " {\"type\": \"function\", \"function\": destination_function}]" + ] + }, + { + "cell_type": "markdown", + "id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340", + "metadata": {}, + "source": [ + "## Getting OpenAI to use our Tool\n", + "\n", + "There's some fiddly stuff to allow OpenAI \"to call our tool\"\n", + "\n", + "What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n", + "\n", + "Here's how the new chat function looks:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5db52df0-cb48-4017-bae3-0014f5ca3a56", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + "\n", + " if response.choices[0].finish_reason == \"tool_calls\":\n", + " message = response.choices[0].message\n", + " tool_name = message.tool_calls[0].function.name\n", + "\n", + " if tool_name == \"get_ticket_price\":\n", + " response, city = handle_tool_call(message)\n", + " elif tool_name == \"get_destinations\":\n", + " response = handle_tool_call_destination(message)\n", + "\n", + " messages.extend([message, response])\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + "\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "id": "b0992986-ea09-4912-a076-8e5603ee631f", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call for price:\n", + "\n", + "def handle_tool_call_price(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " city = arguments.get('destination_city')\n", + " price = get_ticket_price(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " return response, city" + ] + }, + { + "cell_type": "code", + "execution_count": 92, + "id": "4bbffdb0-5ab7-414e-8d2b-3d9367e64526", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call for destinations:\n", + "\n", + "def handle_tool_call_destination(message):\n", + " tool_call = message.tool_calls[0]\n", + " destinations = get_destinations()\n", + " print(destinations)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": destinations,\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7928\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 93, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tool get_ticket_price called for Paris\n", + "Tool get_ticket_price called for Timbuktu\n", + "london, paris, tokyo, berlin\n" + ] + } + ], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "243c156d-86c3-4d0a-8119-d0a532daa5cc", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 756748d4e651a740a8b356b2fa444a9821265e01 Mon Sep 17 00:00:00 2001 From: An Date: Wed, 12 Mar 2025 13:20:07 -0700 Subject: [PATCH 16/43] Added my contributions to community-contributions --- .../day1_music_recommender_promax.ipynb | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 week1/community-contributions/day1_music_recommender_promax.ipynb diff --git a/week1/community-contributions/day1_music_recommender_promax.ipynb b/week1/community-contributions/day1_music_recommender_promax.ipynb new file mode 100644 index 0000000..9888375 --- /dev/null +++ b/week1/community-contributions/day1_music_recommender_promax.ipynb @@ -0,0 +1,127 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "0a512c2a-55e7-40e1-ab17-88b7034ca09a", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "import openai\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "from IPython.display import Markdown, display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1aa8dd82-6b5e-4dbd-a2ee-8367e796a51f", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - head over to the troubleshooting notebook!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj... make sure you using the right key (Check troubleshooting notebook)\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like white space was found in beginning or end. (Check troubleshooting notebook)\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2acd579b-846c-4aa6-ba6c-1cc1a5a2eeb6", + "metadata": {}, + "outputs": [], + "source": [ + "# Input the system prompt\n", + "system_prompt = \"\"\"you are top notched AI music expert that have knowledge of all genres, songs, and artists. You need to google search lyrics. You have the following rules:\\\n", + "1. Carefully break down what type of recommendation the user wants and the context.\\\n", + "2. If asked to recommend genres similar to a song or artists please identify the top 3 genres.\\\n", + "3. If asked to recommend artists from songs or genres then recommend the top 5 artists.\n", + "4. If asked to recommend songs from genres or artist than recommend the top 10 songs.\n", + "5. If asked for a general recommendation give them the top 5 songs based off of context.\\\n", + "6. Be flexible and adaptable with recommendations and consider the context the user might ask.\n", + "7. always respond in markdown.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c1cf212-538c-4e9a-8da5-337bd7b6197c", + "metadata": {}, + "outputs": [], + "source": [ + "# music recommender function\n", + "def music_recommender(user_prompt):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + " \n", + " response = openai.chat.completions.create(\n", + " model=\"gpt-4\",\n", + " messages=messages,\n", + " max_tokens=300\n", + " )\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4f277561-af8b-4715-90e7-6ebaadeb15d0", + "metadata": {}, + "outputs": [], + "source": [ + "# User prompt (Change this to fit your needs!)\n", + "user_prompt = \"Can you recommend me songs from Taylor Swift\"\n", + "\n", + "# Example usage\n", + "response = music_recommender(user_prompt)\n", + "display(Markdown(response))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bb869d36-de14-4e46-9087-223d6b257efa", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 64f93291b587282a8f090eed60bd98ada4fdcfc3 Mon Sep 17 00:00:00 2001 From: ChrisW19 <58227421+ChrisW19@users.noreply.github.com> Date: Wed, 12 Mar 2025 13:34:11 -0700 Subject: [PATCH 17/43] Create Day5_Synthetic_Dataset_Generator.ipynb Creates a dataset for EV Drivers personal information and location. Allows for selection of where the driver is located. --- .../Day5_Synthetic_Dataset_Generator.ipynb | 4784 +++++++++++++++++ 1 file changed, 4784 insertions(+) create mode 100644 week3/community-contributions/Day5_Synthetic_Dataset_Generator.ipynb diff --git a/week3/community-contributions/Day5_Synthetic_Dataset_Generator.ipynb b/week3/community-contributions/Day5_Synthetic_Dataset_Generator.ipynb new file mode 100644 index 0000000..044acab --- /dev/null +++ b/week3/community-contributions/Day5_Synthetic_Dataset_Generator.ipynb @@ -0,0 +1,4784 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU", + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "4e96acd5f9b844828892423e01e985af": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_de1eda1f836f4524aa3f9946992b3518", + "IPY_MODEL_11c7606385d9493a9f8e6844f91bf984", + "IPY_MODEL_ce253fcd9ac744dc817132f40c5a4449" + ], + "layout": "IPY_MODEL_30bd0ada61fe44aaa355d8f8ba8608c9" + } + }, + "de1eda1f836f4524aa3f9946992b3518": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f3a7735eff734c8fa6619cf55f9250fe", + "placeholder": "โ€‹", + "style": "IPY_MODEL_06f884acd7b84247a2b11f466986425b", + "value": "tokenizer_config.json:โ€‡100%" + } + }, + "11c7606385d9493a9f8e6844f91bf984": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_b88f70ba045142a6b2f7432d0b198823", + "max": 55351, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_b4259e7cd7a9486b8e5241ac194773c8", + "value": 55351 + } + }, + "ce253fcd9ac744dc817132f40c5a4449": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_8671c9fe95774ce393938e81e4ac0fea", + "placeholder": "โ€‹", + "style": "IPY_MODEL_c6c24948c71b4dae9b22f82d681f6d14", + "value": "โ€‡55.4k/55.4kโ€‡[00:00<00:00,โ€‡3.24MB/s]" + } + }, + "30bd0ada61fe44aaa355d8f8ba8608c9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f3a7735eff734c8fa6619cf55f9250fe": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "06f884acd7b84247a2b11f466986425b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "b88f70ba045142a6b2f7432d0b198823": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b4259e7cd7a9486b8e5241ac194773c8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "8671c9fe95774ce393938e81e4ac0fea": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c6c24948c71b4dae9b22f82d681f6d14": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "168db3ccd3584d8787372a430e81afc1": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5dbe37818214460a883f8854a7a22ddd", + "IPY_MODEL_10fbda2b3d854ef5b42f36a1e6886df0", + "IPY_MODEL_5685b60bff9648ea9fbf426b6c6c34b7" + ], + "layout": "IPY_MODEL_1ee68b5cb4914705962a8efdd6f3089f" + } + }, + "5dbe37818214460a883f8854a7a22ddd": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_49cb0c97bcfb4be78e934120938e37f3", + "placeholder": "โ€‹", + "style": "IPY_MODEL_f8dbf020aa9f4d8fa29ce7657d980259", + "value": "tokenizer.json:โ€‡100%" + } + }, + "10fbda2b3d854ef5b42f36a1e6886df0": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_3e96b2e9fb6443dfaf289ecdc3ef6e86", + "max": 9085657, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_f08e6a34891d4873a0fb7fcf8ef016c6", + "value": 9085657 + } + }, + "5685b60bff9648ea9fbf426b6c6c34b7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9ac1e95865934a7d8a71bc49c4c6d78e", + "placeholder": "โ€‹", + "style": "IPY_MODEL_83701fdbe9174a4f83ab0fa90da5c20b", + "value": "โ€‡9.09M/9.09Mโ€‡[00:00<00:00,โ€‡13.7MB/s]" + } + }, + "1ee68b5cb4914705962a8efdd6f3089f": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "49cb0c97bcfb4be78e934120938e37f3": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f8dbf020aa9f4d8fa29ce7657d980259": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "3e96b2e9fb6443dfaf289ecdc3ef6e86": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "f08e6a34891d4873a0fb7fcf8ef016c6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "9ac1e95865934a7d8a71bc49c4c6d78e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "83701fdbe9174a4f83ab0fa90da5c20b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "aab110b0331242d0a2c103f15a3ac9bc": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c8cb81d97dd14b8a84b7caf93cc94667", + "IPY_MODEL_24c00069c1f4415fa6bd9b2eee09d53a", + "IPY_MODEL_ce64d54649b84b71b420867452873172" + ], + "layout": "IPY_MODEL_4e2799deeb954c42ac88d2f52a021929" + } + }, + "c8cb81d97dd14b8a84b7caf93cc94667": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_66e6db7334b241ad9519e28527c17ef0", + "placeholder": "โ€‹", + "style": "IPY_MODEL_ab9b46c7f454447cb7b4b97e48ad2dbd", + "value": "special_tokens_map.json:โ€‡100%" + } + }, + "24c00069c1f4415fa6bd9b2eee09d53a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_2d74528d3e4947fc9588b635aa1a375e", + "max": 296, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_4ca4229ff72a470a8984d91b3dd774aa", + "value": 296 + } + }, + "ce64d54649b84b71b420867452873172": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_f7a40eeb26da47e38b9da249b97cb484", + "placeholder": "โ€‹", + "style": "IPY_MODEL_dc27afa005844dafafaf05431ec4d70f", + "value": "โ€‡296/296โ€‡[00:00<00:00,โ€‡25.8kB/s]" + } + }, + "4e2799deeb954c42ac88d2f52a021929": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "66e6db7334b241ad9519e28527c17ef0": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ab9b46c7f454447cb7b4b97e48ad2dbd": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "2d74528d3e4947fc9588b635aa1a375e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "4ca4229ff72a470a8984d91b3dd774aa": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "f7a40eeb26da47e38b9da249b97cb484": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "dc27afa005844dafafaf05431ec4d70f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c38ba37f9b8446958ce9f2ff1a2cbb71": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_4f0123276be5417d8315728a0941e0db", + "IPY_MODEL_6909752b5cad4f9ab0396d467f28df31", + "IPY_MODEL_8fb35e38549f4b3f9190ac508c17e2c9" + ], + "layout": "IPY_MODEL_3687b3d9781241a99eb999de62fddd51" + } + }, + "4f0123276be5417d8315728a0941e0db": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c3ef9f811db84d199103a1dc44448ee8", + "placeholder": "โ€‹", + "style": "IPY_MODEL_caa7ae24ea3d4133a711cfb174b330d7", + "value": "config.json:โ€‡100%" + } + }, + "6909752b5cad4f9ab0396d467f28df31": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_20044b5c03d84e3988e365e140b408f9", + "max": 855, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_49034f36ce9642f6bf72a43015fae336", + "value": 855 + } + }, + "8fb35e38549f4b3f9190ac508c17e2c9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1b6001a81d5645f6914cc84f4416ca22", + "placeholder": "โ€‹", + "style": "IPY_MODEL_62f3486891284e6d92758f07d4060193", + "value": "โ€‡855/855โ€‡[00:00<00:00,โ€‡52.2kB/s]" + } + }, + "3687b3d9781241a99eb999de62fddd51": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c3ef9f811db84d199103a1dc44448ee8": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "caa7ae24ea3d4133a711cfb174b330d7": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "20044b5c03d84e3988e365e140b408f9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "49034f36ce9642f6bf72a43015fae336": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "1b6001a81d5645f6914cc84f4416ca22": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "62f3486891284e6d92758f07d4060193": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "e1a4b876fdfe41d0960216483eae88bb": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_8ef1c385388b4699a87ff7f86a440c43", + "IPY_MODEL_dc1767587985419cac07c65492995008", + "IPY_MODEL_e2dd6ea3bcf648849046e265c9ae7c0a" + ], + "layout": "IPY_MODEL_7885555f98c54a7cb658eea62553da35" + } + }, + "8ef1c385388b4699a87ff7f86a440c43": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_8deb28a48fed49a38f41f786613d115b", + "placeholder": "โ€‹", + "style": "IPY_MODEL_d159d8498ed74acf97803954a962fe79", + "value": "model.safetensors.index.json:โ€‡100%" + } + }, + "dc1767587985419cac07c65492995008": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_97dbc2d7bd3943d38c46633e9c075bce", + "max": 23950, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_d427f0332f134fcf991840c8b10ef3b1", + "value": 23950 + } + }, + "e2dd6ea3bcf648849046e265c9ae7c0a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_8cc82235c0ae44f5894aea496a56e0c8", + "placeholder": "โ€‹", + "style": "IPY_MODEL_620c70e0c3cc4a4a9e5c17ec6cb84ba9", + "value": "โ€‡23.9k/23.9kโ€‡[00:00<00:00,โ€‡2.14MB/s]" + } + }, + "7885555f98c54a7cb658eea62553da35": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8deb28a48fed49a38f41f786613d115b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d159d8498ed74acf97803954a962fe79": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "97dbc2d7bd3943d38c46633e9c075bce": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "d427f0332f134fcf991840c8b10ef3b1": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "8cc82235c0ae44f5894aea496a56e0c8": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "620c70e0c3cc4a4a9e5c17ec6cb84ba9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "6660bd64c2524a37ae6d08be84987eaf": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_37fd972cb0494ea6b70420d85d0abba8", + "IPY_MODEL_94cce4aefda24ebb9bf03993109a4404", + "IPY_MODEL_0628d8f9e0624356abfd9cf7ef6bb3c6" + ], + "layout": "IPY_MODEL_c636e345c3634ece83442df522b034d2" + } + }, + "37fd972cb0494ea6b70420d85d0abba8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_a947ea4637e74310a56cb039fd90a1ed", + "placeholder": "โ€‹", + "style": "IPY_MODEL_1304fbc97df043e9b37bfcecf4ff9d46", + "value": "Downloadingโ€‡shards:โ€‡100%" + } + }, + "94cce4aefda24ebb9bf03993109a4404": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_83bf8c2f80df413abf7ac0d3e20ce86f", + "max": 4, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_e9b2cf2f29a74766a94675a772924aea", + "value": 4 + } + }, + "0628d8f9e0624356abfd9cf7ef6bb3c6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_300c17a4c6154fffae23d921e83e6b50", + "placeholder": "โ€‹", + "style": "IPY_MODEL_c21845fd3d474688afb8788b7437314d", + "value": "โ€‡4/4โ€‡[01:38<00:00,โ€‡21.80s/it]" + } + }, + "c636e345c3634ece83442df522b034d2": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "a947ea4637e74310a56cb039fd90a1ed": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "1304fbc97df043e9b37bfcecf4ff9d46": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "83bf8c2f80df413abf7ac0d3e20ce86f": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "e9b2cf2f29a74766a94675a772924aea": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "300c17a4c6154fffae23d921e83e6b50": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c21845fd3d474688afb8788b7437314d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "a8b17d30546a46da9dd00b193fb16f35": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_5ed6f78f5b80400087fb80daa22fdf6c", + "IPY_MODEL_ac90c20964e345ffb36545d2db127cfb", + "IPY_MODEL_1539e7133a7f4562b04ba234d1de80ce" + ], + "layout": "IPY_MODEL_b28e6dec3a4144caa37fef992cdcf341" + } + }, + "5ed6f78f5b80400087fb80daa22fdf6c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9d3cb890b1f74d19b86d15b7e6a2dfdd", + "placeholder": "โ€‹", + "style": "IPY_MODEL_2eebe3bef9524a0bb5dae0a484117766", + "value": "model-00001-of-00004.safetensors:โ€‡100%" + } + }, + "ac90c20964e345ffb36545d2db127cfb": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9636537d0b9a4d75a788d881adc50118", + "max": 4976698672, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_acc7ef22335242a7afb14a64f768989e", + "value": 4976698672 + } + }, + "1539e7133a7f4562b04ba234d1de80ce": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_1be9eafe8e664cbbb387f6a439bb700e", + "placeholder": "โ€‹", + "style": "IPY_MODEL_7a7ae7705baf4a3ca9293e41b6c3e08d", + "value": "โ€‡4.98G/4.98Gโ€‡[00:31<00:00,โ€‡221MB/s]" + } + }, + "b28e6dec3a4144caa37fef992cdcf341": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9d3cb890b1f74d19b86d15b7e6a2dfdd": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2eebe3bef9524a0bb5dae0a484117766": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "9636537d0b9a4d75a788d881adc50118": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "acc7ef22335242a7afb14a64f768989e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "1be9eafe8e664cbbb387f6a439bb700e": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7a7ae7705baf4a3ca9293e41b6c3e08d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "b701bfe8477947e282d4179b87bdbab8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_519cff904c474cf78bea1e81f1cdc725", + "IPY_MODEL_3b9f36db56024c168e1d0ab8b42549f3", + "IPY_MODEL_d1f5ed10a5514454a1242a9476bc3943" + ], + "layout": "IPY_MODEL_68e24fc14972430388070213e72b1e02" + } + }, + "519cff904c474cf78bea1e81f1cdc725": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_6b2367b3df1a4eb882fc61cb772ac038", + "placeholder": "โ€‹", + "style": "IPY_MODEL_b33a50fd372341f589af18ed50071301", + "value": "model-00002-of-00004.safetensors:โ€‡100%" + } + }, + "3b9f36db56024c168e1d0ab8b42549f3": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d4a71bad63f14e6f93826e8c43da6dcf", + "max": 4999802720, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_780294d1082e48ceb670577410eda217", + "value": 4999802720 + } + }, + "d1f5ed10a5514454a1242a9476bc3943": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_2522796ddb1c46feb69c8022e71bf1b9", + "placeholder": "โ€‹", + "style": "IPY_MODEL_2bfb6f31f64a4c27929318a2aca640d8", + "value": "โ€‡5.00G/5.00Gโ€‡[00:25<00:00,โ€‡237MB/s]" + } + }, + "68e24fc14972430388070213e72b1e02": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6b2367b3df1a4eb882fc61cb772ac038": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b33a50fd372341f589af18ed50071301": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d4a71bad63f14e6f93826e8c43da6dcf": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "780294d1082e48ceb670577410eda217": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "2522796ddb1c46feb69c8022e71bf1b9": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2bfb6f31f64a4c27929318a2aca640d8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "c3310dd5568049a6b3b383e25877293d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_70d78dbf4e9848558d77128ed4193021", + "IPY_MODEL_775ff90b999a4618b4d317d601a46d46", + "IPY_MODEL_21233b45238249cd82ff8fb4473948cd" + ], + "layout": "IPY_MODEL_7e8d61ad6fc94e3289dccc8810cf9b78" + } + }, + "70d78dbf4e9848558d77128ed4193021": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_2f491ede33c54028911b1810fa9db91d", + "placeholder": "โ€‹", + "style": "IPY_MODEL_98349507f0884e9ab5426057e374a1e8", + "value": "model-00003-of-00004.safetensors:โ€‡100%" + } + }, + "775ff90b999a4618b4d317d601a46d46": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_870e1dac8f024eb7a3cf0cf73bfb1271", + "max": 4915916176, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_7501d278d5ac47ac9e018814764df54e", + "value": 4915916176 + } + }, + "21233b45238249cd82ff8fb4473948cd": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_be818f3876be4fa08090571fe41f5b52", + "placeholder": "โ€‹", + "style": "IPY_MODEL_b35fbdcf90f64a838e8524a6a9ffdd77", + "value": "โ€‡4.92G/4.92Gโ€‡[00:26<00:00,โ€‡223MB/s]" + } + }, + "7e8d61ad6fc94e3289dccc8810cf9b78": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "2f491ede33c54028911b1810fa9db91d": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "98349507f0884e9ab5426057e374a1e8": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "870e1dac8f024eb7a3cf0cf73bfb1271": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "7501d278d5ac47ac9e018814764df54e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "be818f3876be4fa08090571fe41f5b52": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b35fbdcf90f64a838e8524a6a9ffdd77": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "695e02cfe10347c8a9b04d84ccda6f3a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_1309122289634f99bed64ce8e81b29f4", + "IPY_MODEL_6e494ded11f54b08aff61e2b2becf374", + "IPY_MODEL_0ed217dd33684c8387d9e9ab71ff94b5" + ], + "layout": "IPY_MODEL_0c70a9b59663432cbbbbf6d24b26d24a" + } + }, + "1309122289634f99bed64ce8e81b29f4": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c20a74b3d48c4c77935c887e3d683fad", + "placeholder": "โ€‹", + "style": "IPY_MODEL_c29314e6c3b84c199f55dd5b3f922c7e", + "value": "model-00004-of-00004.safetensors:โ€‡100%" + } + }, + "6e494ded11f54b08aff61e2b2becf374": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_dc93215fafab46d1af188bfc04bc7894", + "max": 1168138808, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_9e3e041d803e4c52999d619da6d489c9", + "value": 1168138808 + } + }, + "0ed217dd33684c8387d9e9ab71ff94b5": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_4c1a3c9315c242f3841ed963c0aa2bec", + "placeholder": "โ€‹", + "style": "IPY_MODEL_b4ed166b00514ed393d15ffc9f465a4f", + "value": "โ€‡1.17G/1.17Gโ€‡[00:11<00:00,โ€‡199MB/s]" + } + }, + "0c70a9b59663432cbbbbf6d24b26d24a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c20a74b3d48c4c77935c887e3d683fad": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c29314e6c3b84c199f55dd5b3f922c7e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "dc93215fafab46d1af188bfc04bc7894": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9e3e041d803e4c52999d619da6d489c9": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "4c1a3c9315c242f3841ed963c0aa2bec": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "b4ed166b00514ed393d15ffc9f465a4f": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "d583891699a3446a9aa4c5c92f65808b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_c7fd12680f964414b3a5df7e2b17b2a5", + "IPY_MODEL_039174bd22c14ea7bd2e804120a24359", + "IPY_MODEL_09dceced4e994b9389033aa4b4110035" + ], + "layout": "IPY_MODEL_826184b06ae84b4b821ee1c888f217fb" + } + }, + "c7fd12680f964414b3a5df7e2b17b2a5": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_9ce0065e369d4ad48480a78c65db9ad6", + "placeholder": "โ€‹", + "style": "IPY_MODEL_8452e08887104a149d65d3a029deff0e", + "value": "Loadingโ€‡checkpointโ€‡shards:โ€‡100%" + } + }, + "039174bd22c14ea7bd2e804120a24359": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_96a18d82f03e4743bddc4f5af1634db4", + "max": 4, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_901d316ba4d74c1e9e724ef5b2e3c574", + "value": 4 + } + }, + "09dceced4e994b9389033aa4b4110035": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_c7bf9758f02a49968ffcdca5d454da1a", + "placeholder": "โ€‹", + "style": "IPY_MODEL_160c003ccef649f7a404ec6030f74250", + "value": "โ€‡4/4โ€‡[01:13<00:00,โ€‡15.77s/it]" + } + }, + "826184b06ae84b4b821ee1c888f217fb": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9ce0065e369d4ad48480a78c65db9ad6": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8452e08887104a149d65d3a029deff0e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "96a18d82f03e4743bddc4f5af1634db4": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "901d316ba4d74c1e9e724ef5b2e3c574": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "c7bf9758f02a49968ffcdca5d454da1a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "160c003ccef649f7a404ec6030f74250": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "4100cebd261340cc8033cdd53de165f6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_80ab88523acc47bca8c47d21e2aa7115", + "IPY_MODEL_0aa357e66123459ea266373fc21ce115", + "IPY_MODEL_0b5f553db9b545be97b9629f8dbdd9a2" + ], + "layout": "IPY_MODEL_3720de0095d74febbd36f2e248c5f923" + } + }, + "80ab88523acc47bca8c47d21e2aa7115": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_804d8939fe464844bb3bd70d880fd574", + "placeholder": "โ€‹", + "style": "IPY_MODEL_6f4fc191934b465196ca22f37b760fb0", + "value": "generation_config.json:โ€‡100%" + } + }, + "0aa357e66123459ea266373fc21ce115": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_8abf14c3acf241f1ba75f822d88b6904", + "max": 184, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_ca407e6abc3447e0878f25dce9dcca2a", + "value": 184 + } + }, + "0b5f553db9b545be97b9629f8dbdd9a2": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_345bdf4ed4ac44c3976b0f1cb63ee8e7", + "placeholder": "โ€‹", + "style": "IPY_MODEL_c191b98698ea45208c3e13abd00aab7a", + "value": "โ€‡184/184โ€‡[00:00<00:00,โ€‡13.4kB/s]" + } + }, + "3720de0095d74febbd36f2e248c5f923": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "804d8939fe464844bb3bd70d880fd574": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6f4fc191934b465196ca22f37b760fb0": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "8abf14c3acf241f1ba75f822d88b6904": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ca407e6abc3447e0878f25dce9dcca2a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "345bdf4ed4ac44c3976b0f1cb63ee8e7": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "model_module_version": "1.2.0", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "c191b98698ea45208c3e13abd00aab7a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + } + } + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "T-6b4FqreeIl", + "outputId": "c1db92e2-6fe2-42a8-a5c2-a7446854b866", + "collapsed": true + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m363.4/363.4 MB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m13.8/13.8 MB\u001b[0m \u001b[31m107.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m24.6/24.6 MB\u001b[0m \u001b[31m82.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m883.7/883.7 kB\u001b[0m \u001b[31m53.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m1.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m211.5/211.5 MB\u001b[0m \u001b[31m5.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m56.3/56.3 MB\u001b[0m \u001b[31m12.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m127.9/127.9 MB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m207.5/207.5 MB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m21.1/21.1 MB\u001b[0m \u001b[31m93.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m76.1/76.1 MB\u001b[0m \u001b[31m9.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m62.3/62.3 MB\u001b[0m \u001b[31m13.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m322.1/322.1 kB\u001b[0m \u001b[31m25.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m94.9/94.9 kB\u001b[0m \u001b[31m8.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m11.3/11.3 MB\u001b[0m \u001b[31m109.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m72.0/72.0 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[2K \u001b[90mโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”\u001b[0m \u001b[32m62.3/62.3 kB\u001b[0m \u001b[31m5.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25h" + ] + } + ], + "source": [ + "!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai gradio" + ] + }, + { + "cell_type": "code", + "source": [ + "#imports\n", + "\n", + "import time\n", + "from io import StringIO\n", + "import torch\n", + "import numpy as np\n", + "import pandas as pd\n", + "import random\n", + "from openai import OpenAI\n", + "from sqlalchemy import create_engine\n", + "from google.colab import drive, userdata\n", + "import gradio as gr\n", + "from huggingface_hub import login\n", + "from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig" + ], + "metadata": { + "id": "JXpWOzKve7kr" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Model Constants\n", + "LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"" + ], + "metadata": { + "id": "rcv0lCS5GRPX" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Authentication\n", + "\n", + "hf_token = userdata.get(\"HF_TOKEN\")\n", + "openai_api_key = userdata.get(\"OPENAI_API_KEY\")\n", + "if not hf_token or not openai_api_key:\n", + " raise ValueError(\"Missing HF_TOKEN or OPENAI_API_KEY. Set them as environment variables.\")\n", + "\n", + "login(hf_token, add_to_git_credential=True)\n", + "openai = OpenAI(api_key=openai_api_key)" + ], + "metadata": { + "id": "3XS-s_CwFSQU" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Tokenizer Setup\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", + "tokenizer.pad_token = tokenizer.eos_token" + ], + "metadata": { + "id": "oRdmdzXoF_f9", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 113, + "referenced_widgets": [ + "4e96acd5f9b844828892423e01e985af", + "de1eda1f836f4524aa3f9946992b3518", + "11c7606385d9493a9f8e6844f91bf984", + "ce253fcd9ac744dc817132f40c5a4449", + "30bd0ada61fe44aaa355d8f8ba8608c9", + "f3a7735eff734c8fa6619cf55f9250fe", + "06f884acd7b84247a2b11f466986425b", + "b88f70ba045142a6b2f7432d0b198823", + "b4259e7cd7a9486b8e5241ac194773c8", + "8671c9fe95774ce393938e81e4ac0fea", + "c6c24948c71b4dae9b22f82d681f6d14", + "168db3ccd3584d8787372a430e81afc1", + "5dbe37818214460a883f8854a7a22ddd", + "10fbda2b3d854ef5b42f36a1e6886df0", + "5685b60bff9648ea9fbf426b6c6c34b7", + "1ee68b5cb4914705962a8efdd6f3089f", + "49cb0c97bcfb4be78e934120938e37f3", + "f8dbf020aa9f4d8fa29ce7657d980259", + "3e96b2e9fb6443dfaf289ecdc3ef6e86", + "f08e6a34891d4873a0fb7fcf8ef016c6", + "9ac1e95865934a7d8a71bc49c4c6d78e", + "83701fdbe9174a4f83ab0fa90da5c20b", + "aab110b0331242d0a2c103f15a3ac9bc", + "c8cb81d97dd14b8a84b7caf93cc94667", + "24c00069c1f4415fa6bd9b2eee09d53a", + "ce64d54649b84b71b420867452873172", + "4e2799deeb954c42ac88d2f52a021929", + "66e6db7334b241ad9519e28527c17ef0", + "ab9b46c7f454447cb7b4b97e48ad2dbd", + "2d74528d3e4947fc9588b635aa1a375e", + "4ca4229ff72a470a8984d91b3dd774aa", + "f7a40eeb26da47e38b9da249b97cb484", + "dc27afa005844dafafaf05431ec4d70f" + ] + }, + "outputId": "ad3d1ca1-b6c1-4456-c563-41c7eb1d283c" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "tokenizer_config.json: 0%| | 0.00/55.4k [00:00โš ๏ธ Address type is required. Please select one.\",\n", + " gr.update(interactive=False, elem_classes=\"yellow_btn\")\n", + " )\n", + " # Return success message and set button to blue and enabled\n", + " return (\n", + " \"Ready to generate dataset.\",\n", + " gr.update(interactive=True, elem_classes=\"blue_btn\")\n", + " )\n" + ], + "metadata": { + "id": "z5pFDbnTz-fP" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Gradio UI\n", + "with gr.Blocks() as app:\n", + " gr.Markdown(\"## Dynamic CSV Dataset Viewer\")\n", + "\n", + " num_records_slider = gr.Slider(minimum=5, maximum=50, step=5, value=20, label=\"Number of Records\")\n", + "\n", + " with gr.Row(equal_height=True):\n", + " address_type_radio = gr.Radio(\n", + " [\"us_only\", \"international\", \"us_international\", \"americas\", \"europe\"],\n", + " value=\"\",\n", + " label=\"Address and Phone Type\",\n", + " info=\"Select the type of addresses and phone numbers\"\n", + " )\n", + " status_text = gr.Markdown(\n", + " \"โš ๏ธ Please select an address type above to proceed.\",\n", + " elem_id=\"status_text\"\n", + " )\n", + "\n", + " generate_btn = gr.Button(\"Generate Data\", interactive=True, elem_id=\"generate_btn\")\n", + "\n", + " response_text = gr.Textbox(value=\"\", label=\"Generated Driver List CSV\", interactive=False)\n", + " dataframe_output = gr.Dataframe(value=pd.DataFrame(), label=\"Generated Driver List Dataset\")\n", + "\n", + " # Update status text and button style dynamically\n", + " address_type_radio.change(fn=check_address_selection, inputs=[address_type_radio], outputs=[status_text, generate_btn])\n", + "\n", + " generate_btn.click(update_dataset, inputs=[num_records_slider, address_type_radio], outputs=[dataframe_output, response_text])\n", + "\n", + " # Custom CSS for button colors\n", + " app.css = \"\"\"\n", + " .blue_btn {\n", + " background-color: green;\n", + " color: white;\n", + " }\n", + " \"\"\"\n", + "\n", + "app.launch(share=True) # Ensure sharing is enabled in Colab" + ], + "metadata": { + "id": "z3K6PfAiL2ZA", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 591 + }, + "outputId": "50cd6ee9-8229-4f58-ad1f-da67990dfc28" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n", + "* Running on public URL: https://2d1cc8e9fab628c1ca.gradio.live\n", + "\n", + "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "
" + ] + }, + "metadata": {} + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [] + }, + "metadata": {}, + "execution_count": 16 + } + ] + } + ] +} \ No newline at end of file From 85be21bf61db0a7b2746b90321e5605194d0e1dd Mon Sep 17 00:00:00 2001 From: ChrisW19 <58227421+ChrisW19@users.noreply.github.com> Date: Wed, 12 Mar 2025 15:02:30 -0700 Subject: [PATCH 18/43] Create Day5-Synthetic_Dataset_Generator.ipynb --- .../Day5-Synthetic_Dataset_Generator.ipynb | 295 ++++++++++++++++++ 1 file changed, 295 insertions(+) create mode 100644 week3/community-contributions/Day5-Synthetic_Dataset_Generator.ipynb diff --git a/week3/community-contributions/Day5-Synthetic_Dataset_Generator.ipynb b/week3/community-contributions/Day5-Synthetic_Dataset_Generator.ipynb new file mode 100644 index 0000000..661642b --- /dev/null +++ b/week3/community-contributions/Day5-Synthetic_Dataset_Generator.ipynb @@ -0,0 +1,295 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "T-6b4FqreeIl", + "collapsed": true + }, + "outputs": [], + "source": [ + "!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai gradio" + ] + }, + { + "cell_type": "code", + "source": [ + "#imports\n", + "\n", + "import time\n", + "from io import StringIO\n", + "import torch\n", + "import numpy as np\n", + "import pandas as pd\n", + "import random\n", + "from openai import OpenAI\n", + "from sqlalchemy import create_engine\n", + "from google.colab import drive, userdata\n", + "import gradio as gr\n", + "from huggingface_hub import login\n", + "from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig" + ], + "metadata": { + "id": "JXpWOzKve7kr" + }, + "execution_count": 3, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Model Constants\n", + "LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"" + ], + "metadata": { + "id": "rcv0lCS5GRPX" + }, + "execution_count": 4, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Authentication\n", + "\n", + "hf_token = userdata.get(\"HF_TOKEN\")\n", + "openai_api_key = userdata.get(\"OPENAI_API_KEY\")\n", + "if not hf_token or not openai_api_key:\n", + " raise ValueError(\"Missing HF_TOKEN or OPENAI_API_KEY. Set them as environment variables.\")\n", + "\n", + "login(hf_token, add_to_git_credential=True)\n", + "openai = OpenAI(api_key=openai_api_key)" + ], + "metadata": { + "id": "3XS-s_CwFSQU" + }, + "execution_count": 5, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Tokenizer Setup\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", + "tokenizer.pad_token = tokenizer.eos_token" + ], + "metadata": { + "id": "oRdmdzXoF_f9" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Model Quantization for Performance Optimization\n", + "\n", + "quant_config = BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_use_double_quant=True,\n", + " bnb_4bit_compute_dtype=torch.bfloat16,\n", + " bnb_4bit_quant_type=\"nf4\"\n", + ")" + ], + "metadata": { + "id": "kRN0t2yrGmAe" + }, + "execution_count": 7, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Load Model Efficiency\n", + "\n", + "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", + "model = AutoModelForCausalLM.from_pretrained(LLAMA, device_map=\"auto\", quantization_config=quant_config)" + ], + "metadata": { + "id": "fYPyudKHGuE9" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "def generate_ev_driver(num_records, address_type):\n", + " # Adjusting the prompt based on checkbox selection\n", + " address_prompts = {\n", + " \"international\": f\"Generate {num_records} rows of synthetic personal data with international addresses and phone numbers.\",\n", + " \"us_only\": f\"Generate {num_records} rows of synthetic personal data with U.S.-only addresses and phone numbers.\",\n", + " \"us_international\": f\"Generate {num_records} rows of synthetic personal data with a mix of U.S. and international addresses and phone numbers.\",\n", + " \"americas\": f\"Generate {num_records} rows of synthetic personal data with a mix of U.S., Canada, Central America, and South America addresses and phone numbers.\",\n", + " \"europe\": f\"Generate {num_records} rows of synthetic personal data with Europe-only addresses and phone numbers.\",\n", + " }\n", + "\n", + " address_prompt = address_prompts.get(address_type, \"Generate synthetic personal data.\")\n", + " # Generate unique driver IDs\n", + " driver_ids = random.sample(range(1, 1000001), num_records)\n", + "\n", + " user_prompt = f\"\"\"\n", + " {address_prompt}\n", + " Each row should include:\n", + " - driverid (unique from the provided list: {driver_ids})\n", + " - first_name (string)\n", + " - last_name (string)\n", + " - email (string)\n", + " - phone_number (string)\n", + " - address (string)\n", + " - city (string)\n", + " - state (string)\n", + " - zip_code (string)\n", + " - country (string)\n", + "\n", + " Ensure the CSV format is valid, with proper headers and comma separation.\n", + " \"\"\"\n", + "\n", + " response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant that generates structured CSV data.\"},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + " )\n", + "\n", + " # Call the new function to clean and extract the CSV data\n", + " return clean_and_extract_csv(response)" + ], + "metadata": { + "id": "9q9ccNr8fMyg" + }, + "execution_count": 12, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "def clean_and_extract_csv(response):\n", + " # Clean up the response and remove the last occurrence of the code block formatting\n", + " csv_data = response.choices[0].message.content.strip()\n", + " csv_data = csv_data.rsplit(\"```\", 1)[0].strip()\n", + "\n", + " # Define header and split the content to extract the data\n", + " header = \"driverid,first_name,last_name,email,phone_number,address,city,state,zip_code,country\"\n", + " _, *content = csv_data.split(header, 1)\n", + "\n", + " # Return the cleaned CSV data along with the header\n", + " return header + content[0].split(\"\\n\\n\")[0] if content else csv_data" + ], + "metadata": { + "id": "So1aGRNJBUyv" + }, + "execution_count": 13, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "def update_dataset(num_records, address_type):\n", + " response = generate_ev_driver(num_records, address_type)\n", + "\n", + " # Convert response to DataFrame\n", + " try:\n", + " df = pd.read_csv(StringIO(response))\n", + " except Exception as e:\n", + " return pd.DataFrame(), f\"Error parsing dataset: {str(e)}\"\n", + "\n", + " return df, response" + ], + "metadata": { + "id": "T0KxUm2yYtuQ" + }, + "execution_count": 14, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Function to handle address type selection\n", + "def check_address_selection(selected_type):\n", + " if not selected_type:\n", + " # Return the error message and set button to yellow and disabled\n", + " return (\n", + " \"โš ๏ธ Address type is required. Please select one.\",\n", + " gr.update(interactive=False, elem_classes=\"yellow_btn\")\n", + " )\n", + " # Return success message and set button to blue and enabled\n", + " return (\n", + " \"Ready to generate dataset.\",\n", + " gr.update(interactive=True, elem_classes=\"blue_btn\")\n", + " )\n" + ], + "metadata": { + "id": "z5pFDbnTz-fP" + }, + "execution_count": 15, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Gradio UI\n", + "with gr.Blocks() as app:\n", + " gr.Markdown(\"## Dynamic CSV Dataset Viewer\")\n", + "\n", + " num_records_slider = gr.Slider(minimum=5, maximum=50, step=5, value=20, label=\"Number of Records\")\n", + "\n", + " with gr.Row(equal_height=True):\n", + " address_type_radio = gr.Radio(\n", + " [\"us_only\", \"international\", \"us_international\", \"americas\", \"europe\"],\n", + " value=\"\",\n", + " label=\"Address and Phone Type\",\n", + " info=\"Select the type of addresses and phone numbers\"\n", + " )\n", + " status_text = gr.Markdown(\n", + " \"โš ๏ธ Please select an address type above to proceed.\",\n", + " elem_id=\"status_text\"\n", + " )\n", + "\n", + " generate_btn = gr.Button(\"Generate Data\", interactive=True, elem_id=\"generate_btn\")\n", + "\n", + " response_text = gr.Textbox(value=\"\", label=\"Generated Driver List CSV\", interactive=False)\n", + " dataframe_output = gr.Dataframe(value=pd.DataFrame(), label=\"Generated Driver List Dataset\")\n", + "\n", + " # Update status text and button style dynamically\n", + " address_type_radio.change(fn=check_address_selection, inputs=[address_type_radio], outputs=[status_text, generate_btn])\n", + "\n", + " generate_btn.click(update_dataset, inputs=[num_records_slider, address_type_radio], outputs=[dataframe_output, response_text])\n", + "\n", + " # Custom CSS for button colors\n", + " app.css = \"\"\"\n", + " .blue_btn {\n", + " background-color: green;\n", + " color: white;\n", + " }\n", + " \"\"\"\n", + "\n", + "app.launch(share=True) # Ensure sharing is enabled in Colab" + ], + "metadata": { + "id": "z3K6PfAiL2ZA" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file From dbd23840bc6f2d6817f46a1355edac9900a6a2df Mon Sep 17 00:00:00 2001 From: JAL Date: Thu, 13 Mar 2025 01:39:41 +0100 Subject: [PATCH 19/43] Added my contributions to community-contributions_W1D5 --- .../W1D5_Code_instructor.ipynb | 269 ++++++++++++++++++ 1 file changed, 269 insertions(+) create mode 100644 week1/community-contributions/W1D5_Code_instructor.ipynb diff --git a/week1/community-contributions/W1D5_Code_instructor.ipynb b/week1/community-contributions/W1D5_Code_instructor.ipynb new file mode 100644 index 0000000..47de4ce --- /dev/null +++ b/week1/community-contributions/W1D5_Code_instructor.ipynb @@ -0,0 +1,269 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0e5dc476-e3c9-49bd-934a-35dbe0d55b13", + "metadata": {}, + "source": [ + "# End of week 1 exercise (with user input(question, model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "353fba18-a9b4-4ba8-be7e-f3e3c37521ff", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import ollama" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "be2b859d-b3d2-41f7-8666-28ecde26e3b8", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment and constants\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1b2b694-11a1-4d2a-8e34-d1fb02617fa3", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"You are an expert coder with educational skills for beginners. \\\n", + "You are able to explain, debbug or generate code in Python, R or bash, and to provide examples of use case if applicable. \\\n", + "Please add references to relevant sources if available. If not, do not invent.\\n\"\n", + "system_prompt += \"this is an example of a response:\"\n", + "system_prompt += \"\"\"\n", + "Sure! Hereโ€™s the explanation in plain text format, suitable for Markdown:\n", + "\n", + "# Explanation of the Code\n", + "\n", + "### Code:\n", + "```python\n", + "full_name = lambda first, last: f'Full name: {first.title()} {last.title()}'\n", + "```\n", + "\n", + "### Explanation:\n", + "\n", + "1. **Lambda Function:**\n", + " - The keyword `lambda` is used to create a small, one-line anonymous function (a function without a name).\n", + " - It takes two parameters: `first` (for the first name) and `last` (for the last name).\n", + "\n", + "2. **String Formatting (`f-string`):**\n", + " - `f'Full name: {first.title()} {last.title()}'` is a formatted string (f-string).\n", + " - It inserts the values of `first` and `last` into the string while applying `.title()` to capitalize the first letter of each name.\n", + "\n", + "3. **Assigning the Function:**\n", + " - The lambda function is assigned to the variable `full_name`, so we can use `full_name()` like a regular function.\n", + "\n", + "### How to Use It:\n", + "Now, letโ€™s call this function and see what it does.\n", + "\n", + "```python\n", + "print(full_name(\"john\", \"doe\"))\n", + "```\n", + "\n", + "#### Output:\n", + "```\n", + "Full name: John Doe\n", + "```\n", + "\n", + "### What Happens:\n", + "- `\"john\"` becomes `\"John\"` (because `.title()` capitalizes the first letter).\n", + "- `\"doe\"` becomes `\"Doe\"`.\n", + "- The output is `\"Full name: John Doe\"`.\n", + "\n", + "### Summary:\n", + "This is a simple way to create a function that formats a full name while ensuring proper capitalization. You could write the same function using `def` like this:\n", + "\n", + "```python\n", + "def full_name(first, last):\n", + " return f'Full name: {first.title()} {last.title()}'\n", + "```\n", + "\n", + "Both versions work the same way, but the `lambda` version is more compact.\n", + "\n", + "### Reference(s):\n", + "To deepen your understanding of the code snippet involving Python's lambda functions here is a resource you might find helpful:\n", + "\n", + "Ref. **Python Lambda Functions:**\n", + " - The official Python documentation provides an in-depth explanation of lambda expressions, including their syntax and use cases.๎ˆ†\n", + " - [Lambda Expressions](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions)\n", + "\n", + "```\n", + "You can copy and paste this into any Markdown file or viewer. Let me know if you need further modifications! ๐Ÿ˜Š\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7225ab0-5ade-4c93-839c-3c80b0b23c37", + "metadata": {}, + "outputs": [], + "source": [ + "# display(Markdown(system_prompt))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "07fa2506-4b24-4a53-9f3f-500b4cbcb10a", + "metadata": {}, + "outputs": [], + "source": [ + "# user question\n", + "default_question= \"\"\"\n", + "Please explain what this code does and why:\n", + "yield from {book.get('author') from book in books if book.get('author')}\n", + "\"\"\"\n", + "user_question= str(input(\"What code do you want me to explain?/n(Press 'Enter' for an example)\"))\n", + "\n", + "if user_question== '':\n", + " question= default_question\n", + " print(default_question)\n", + "else:\n", + " question= \"Please explain what this code does and why:\\n\" + user_question" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6749065-fb8a-4f9f-8297-3cd33abd97bd", + "metadata": {}, + "outputs": [], + "source": [ + "print(question)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f48df06c-edb7-4a05-9e56-910854dad0c7", + "metadata": {}, + "outputs": [], + "source": [ + "# user model\n", + "model_number= input(\"\"\"\n", + "Please enter the number of the model you want to use from the list below:\n", + "1 GPT-4o Mini\n", + "2 Llama 3.2\n", + "3 DeepSeek R1\n", + "4 Qwen 2.5\n", + "\"\"\")\n", + "try:\n", + " if int(model_number)==1:\n", + " model= 'gpt-4o-mini'\n", + " elif int(model_number)==2:\n", + " model= 'llama3.2'\n", + " elif int(model_number)==3:\n", + " model= 'deepseek-r1:1.5b'\n", + " elif int(model_number)==4:\n", + " model= 'qwen2.5:3b'\n", + " else:\n", + " model= ''\n", + " print(\"please provide only a number from the list\")\n", + "except:\n", + " model=''\n", + " print(\"Please provide a number or press 'Enter' to finish\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aeb6e4e5-fb63-4192-bb74-0b015dfedfb7", + "metadata": {}, + "outputs": [], + "source": [ + "# print(model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fffa6021-d3f8-4855-a694-bed6d651791f", + "metadata": {}, + "outputs": [], + "source": [ + "messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": question}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "835374a4-3df5-4f28-82e3-6bc70514df16", + "metadata": {}, + "outputs": [], + "source": [ + "if int(model_number)==1:\n", + " openai= OpenAI()\n", + " stream = openai.chat.completions.create(\n", + " model=model,\n", + " messages=messages,\n", + " stream= True\n", + " )\n", + "\n", + " response = \"\"\n", + " print(\"The following answer will be generated by {0} LLM\".format(model))\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n", + "elif int(model_number)==2 or 3 or 4:\n", + " !ollama pull {model}\n", + " print(\"\\n\\nThe following answer will be generated by {0} LLM\\n\\n\".format(model))\n", + " response = ollama.chat(\n", + " model=model,\n", + " messages = messages)\n", + " result= response['message']['content']\n", + " display(Markdown(result))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 3fe1d5e81bfbcff75d1fe3730b681ea22d228a5c Mon Sep 17 00:00:00 2001 From: JAL Date: Thu, 13 Mar 2025 01:43:33 +0100 Subject: [PATCH 20/43] Added my contributions to community-contributions_W1D2 --- .../Ollama_websummarizer_user_input.ipynb | 240 ++++++++++++++++++ 1 file changed, 240 insertions(+) create mode 100644 week1/community-contributions/Ollama_websummarizer_user_input.ipynb diff --git a/week1/community-contributions/Ollama_websummarizer_user_input.ipynb b/week1/community-contributions/Ollama_websummarizer_user_input.ipynb new file mode 100644 index 0000000..33a07e7 --- /dev/null +++ b/week1/community-contributions/Ollama_websummarizer_user_input.ipynb @@ -0,0 +1,240 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "9964872b-225d-4ced-93e4-fc5b279ec2ed", + "metadata": {}, + "source": [ + "# Webpage English summarizer with user inputs (url, ollama-based LLM) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e49d399-d18c-4c91-8abc-cf3289e11e2f", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "# from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "import ollama, time\n", + "from tqdm import tqdm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46e7d809-248d-41b8-80e1-36b210041581", + "metadata": {}, + "outputs": [], + "source": [ + "# Define system prompt.\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a detailed summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown, in English.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e8bf237f-591f-4c32-9415-5d5d4e2522b8", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a detailed summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d39ee6d-c670-41ba-a0b8-debd55bda8e3", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43e28ff5-2def-4a47-acdd-2e06c0666956", + "metadata": {}, + "outputs": [], + "source": [ + "# Constants\n", + "\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32f4f481-81a3-479d-817b-4e754d9af46d", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = HEADERS\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f81cfd17-8208-4192-a59f-485ff3ea74e4", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the ollama API wrapper and return the relevant component of the response\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = ollama.chat(\n", + " model=MODEL,\n", + " messages = messages_for(website)\n", + " )\n", + " return response['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7a9eedc6-2183-473d-84ca-b10d40e2a1e6", + "metadata": {}, + "outputs": [], + "source": [ + "# Ask the user the name of the url address\n", + "\n", + "url= str(input(\"\"\"\n", + "Please provide a valid url address:\n", + "https://\"\"\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5d012de2-0ef2-43db-9f51-fc7f989c3642", + "metadata": {}, + "outputs": [], + "source": [ + "# Ask the user to select a valid model\n", + "\n", + "MODEL= str(input(\"\"\"\n", + "Please select a LLM:\n", + "(examples: llama3.2, deepseek-r1:1.5b)\n", + "\"\"\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ac8c02e-4a62-448b-a231-8c6f65891811", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's just make sure the model is loaded\n", + "\n", + "!ollama pull {MODEL}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0544541f-11a8-4eb7-8eb6-bc032ed6d0d1", + "metadata": {}, + "outputs": [], + "source": [ + "print('url: https://{0}\\nModel= {1}'.format(url, MODEL))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45518950-f2c9-43af-b897-4fe8fe48dfd8", + "metadata": {}, + "outputs": [], + "source": [ + "summary = summarize('https://'+ url)\n", + "for summ in tqdm(summary):\n", + " time.sleep(0.01)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02c0c15e-216d-47c7-843d-ac27af02820b", + "metadata": {}, + "outputs": [], + "source": [ + "display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "985a3689-5827-4b15-b8d5-276f9b292afd", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 2276017ab2da8c15696bc04b3d338d7bbc2b4556 Mon Sep 17 00:00:00 2001 From: "Palbha Kulkarni (Nazwale)" Date: Wed, 12 Mar 2025 21:57:02 -0400 Subject: [PATCH 21/43] Add files via upload --- ...mini_meeting_minutes_from_transcript.ipynb | 152 ++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 week1/community-contributions/day1_gemini_meeting_minutes_from_transcript.ipynb diff --git a/week1/community-contributions/day1_gemini_meeting_minutes_from_transcript.ipynb b/week1/community-contributions/day1_gemini_meeting_minutes_from_transcript.ipynb new file mode 100644 index 0000000..9149547 --- /dev/null +++ b/week1/community-contributions/day1_gemini_meeting_minutes_from_transcript.ipynb @@ -0,0 +1,152 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Getting MOM from call transcripts" + ], + "metadata": { + "id": "99Z21wE7xpKS" + } + }, + { + "cell_type": "markdown", + "source": [ + "Import necessary libraries" + ], + "metadata": { + "id": "YZMeexE8M_Pp" + } + }, + { + "cell_type": "code", + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n" + ], + "metadata": { + "id": "u5DCVg0Mxj5T" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i0V11JQ2az-C" + }, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "#The below code can be uncommented in using .env file\n", + "\n", + "#from dotenv import load_dotenv\n", + "#load_dotenv(override=True)\n", + "#api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "#I am using google colab to import api_key\n", + "from google.colab import userdata\n", + "api_key=userdata.get('gemini_api')\n", + "\n", + "# Check the key\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")" + ] + }, + { + "cell_type": "code", + "source": [ + "# A class to represet Transcript\n", + "from pathlib import Path\n", + "class Transcript:\n", + " def __init__(self, file_path):\n", + " self.file_path=file_path\n", + " self.content=Path(file_path).read_text(encoding='utf-8')\n" + ], + "metadata": { + "id": "j6UTsnTEyWZ-" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Source of the text file -\"https://raw.githubusercontent.com/GeminiLn/EarningsCall_Dataset/refs/heads/master/3M%20Company_20170425/Text.txt\"\n", + "path = '/content/Text.txt' # Specify the path of file you want to use - format should be .txt\n", + "t=Transcript(path)\n" + ], + "metadata": { + "id": "hquePU_mzZ7s" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "\n", + "system_prompt = \"You are expert at taking Meeting Notes & given the below transcript , create an MOM (Minutes of meeting)\"" + ], + "metadata": { + "id": "ex5DB7M8L7KT" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from google import genai\n", + "from google.genai import types\n", + "\n", + "client = genai.Client(api_key=api_key)\n", + "\n", + "response = client.models.generate_content(\n", + " model=\"gemini-2.0-flash\",\n", + " config=types.GenerateContentConfig(\n", + " system_instruction=system_prompt,\n", + " max_output_tokens=500,\n", + " temperature=0.1\n", + " ),\n", + " contents=t.content,\n", + ")\n", + "\n", + "print(response.text)" + ], + "metadata": { + "id": "wcpJ34qfMKmV" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file From 6465267e0561ea79a2522261c18d666cd4f9b0b8 Mon Sep 17 00:00:00 2001 From: Martijn van de Rijdt Date: Thu, 13 Mar 2025 11:26:41 -0400 Subject: [PATCH 22/43] Update day1.ipynb correction in the deepseek response length check --- week2/day1.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/week2/day1.ipynb b/week2/day1.ipynb index 7371667..5768371 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -485,7 +485,7 @@ "\n", "print(reasoning_content)\n", "print(content)\n", - "print(\"Number of words:\", len(reply.split(\" \")))" + "print(\"Number of words:\", len(content.split(\" \")))" ] }, { From bddbbb6a6ddd00696a097dc77b62109f9ff3424b Mon Sep 17 00:00:00 2001 From: Hasani Perera Date: Fri, 14 Mar 2025 00:28:47 +0000 Subject: [PATCH 23/43] add Python code for C/CPP/Rust converter improve python conversion tool by integrating both open and close source models with improved cleanup routines. --- .../w4_lang_converter.py | 346 ++++++++++++++++++ 1 file changed, 346 insertions(+) create mode 100644 week4/community-contributions/w4_lang_converter.py diff --git a/week4/community-contributions/w4_lang_converter.py b/week4/community-contributions/w4_lang_converter.py new file mode 100644 index 0000000..246fa2d --- /dev/null +++ b/week4/community-contributions/w4_lang_converter.py @@ -0,0 +1,346 @@ +import os +import io +import sys +import re +import subprocess +from dotenv import load_dotenv +from openai import OpenAI +from anthropic import Anthropic +import gradio as gr + +# Load environment variables and initialize APIs +load_dotenv(override=True) +openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) +anthropic = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) +MACHINE_SPEC = "MacbookPro, Apple M1 Chip" + +# Define global variables for HF integration +# For HF chat-based CodeQwen model +code_qwen = "Qwen/CodeQwen1.5-7B-Chat" +CODE_QWEN_URL = "" + + +def clean_code(code, target_language): + """ + Remove markdown code fences and stray language indicators. + Also apply language-specific replacements. + """ + raw_lines = code.splitlines() + cleaned_lines = [] + for line in raw_lines: + if "```" in line: + continue + if line.strip().lower() in ["c", "cpp", "c++", "rust"]: + continue + cleaned_lines.append(line) + cleaned = "\n".join(cleaned_lines) + if target_language == "C": + cleaned = cleaned.replace("1U << 32", "(1ULL << 32)") + if target_language == "Rust": + cleaned = process_rust_code(cleaned) + return cleaned + +# Conversion prompt functions (target language-aware) +def user_prompt_for(python_code, target_language): + return ( + f"Rewrite this Python code in {target_language} with the fastest possible implementation that produces identical output. " + f"Respond only with {target_language} code; do not explain your work. " + "Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\n\n" + + python_code + ) + +def messages_for(python_code, target_language): + system_message = ( + f"You are an assistant that reimplements Python code in high performance {target_language} for an {MACHINE_SPEC}. " + f"Respond only with {target_language} code; use comments sparingly. " + f"The {target_language} response needs to produce an identical output in the fastest possible time." + ) + return [ + {"role": "system", "content": system_message}, + {"role": "user", "content": user_prompt_for(python_code, target_language)}, + ] + +def write_output(code, target_language): + """Write the converted code to a file based on target language.""" + tag = target_language.lower() if target_language is not None else "" + if target_language == "C++": + filename = "optimized.cpp" + elif target_language == "C": + filename = "optimized.c" + elif target_language == "Rust": + filename = "optimized.rs" + else: + filename = "optimized.txt" + cleaned = code.replace(f"```{tag}\n", "").replace("```", "") + lines = cleaned.splitlines() + if lines and lines[0].strip().lower() in ["cpp", "c++", "c", "rust"]: + lines = lines[1:] + cleaned = "\n".join(lines) + cleaned = clean_code(cleaned, target_language) + with open(filename, "w") as f: + f.write(cleaned) + return filename + +# GPT integration for conversion +def stream_gpt(python_code, target_language, model_version): + stream = openai.chat.completions.create( + model=model_version, # Use selected GPT model version + messages=messages_for(python_code, target_language), + stream=True, + ) + reply = "" + for chunk in stream: + if not hasattr(chunk, "choices") or not chunk.choices: + continue + fragment = chunk.choices[0].delta.content or "" + reply += fragment + yield reply.replace(f"```{target_language}\n", "").replace("```", "") + +# Claude integration for conversion +def stream_claude(python_code, target_language, model_version): + prompt = user_prompt_for(python_code, target_language) + response = anthropic.completions.create( + prompt=prompt, + model=model_version, + stream=True, + ) + reply = "" + for chunk in response: + fragment = chunk.get("completion", "") + reply += fragment + yield reply.replace(f"```{target_language}\n", "").replace("```", "") + +# Hugging Face integration functions +def stream_code_qwen(python_code, target_language, model_version): + """ + HF chat-based model using CodeQwen. + """ + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(code_qwen) + messages = messages_for(python_code, target_language) + # Convert messages to chat format as expected by Qwen. + text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + from huggingface_hub import InferenceClient + client = InferenceClient(CODE_QWEN_URL, token=os.getenv("HF_TOKEN")) + stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000) + result = "" + for r in stream: + result += r.token.text + yield result.replace(f"```{target_language}\n", "").replace("```", "") + +def stream_huggingface(python_code, target_language, model_version): + """ + HF single-prompt model integration. + """ + prompt = user_prompt_for(python_code, target_language) + from huggingface_hub import InferenceClient + client = InferenceClient(model_name=model_version, token=os.getenv("HF_TOKEN")) + stream = client.text_generation(prompt, stream=True, details=True, max_new_tokens=3000) + reply = "" + for chunk in stream: + reply += chunk.token.text + yield reply.replace(f"```{target_language}\n", "").replace("```", "") + + +def optimize(python_code, combined_model, target_language): + """ + combined_model is a string like "GPT: gpt-4o", "CLAUDE: claude-3-5-sonnet-20240620" or "HF: model_name" + """ + provider, model_version = [x.strip() for x in combined_model.split(":")] + if provider == "GPT": + for partial in stream_gpt(python_code, target_language, model_version): + yield partial + elif provider == "CLAUDE": + for partial in stream_claude(python_code, target_language, model_version): + yield partial + elif provider == "HF": + if "CodeQwen" in model_version: + for partial in stream_code_qwen(python_code, target_language, model_version): + yield partial + else: + for partial in stream_huggingface(python_code, target_language, model_version): + yield partial + else: + raise ValueError("Unknown model provider") + +def execute_python(code): + """Execute Python code and return its output.""" + env = {} # Dedicated global namespace + try: + output = io.StringIO() + sys.stdout = output + exec(code, env) + finally: + sys.stdout = sys.__stdout__ + return output.getvalue() + +def execute_cpp(code): + write_output(code, target_language="C++") + try: + compile_cmd = [ + "clang++", "-Ofast", "-std=c++17", "-march=armv8.5-a", + "-mtune=apple-m1", "-mcpu=apple-m1", "-o", "optimized", "optimized.cpp" + ] + subprocess.run(compile_cmd, check=True, text=True, capture_output=True) + run_cmd = ["./optimized"] + run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True) + return run_result.stdout + except subprocess.CalledProcessError as e: + return f"Error:\n{e.stderr}" + +def execute_c(code): + cleaned_code = clean_code(code, "C") + with open("optimized.c", "w") as f: + f.write(cleaned_code) + try: + compile_cmd = ["clang", "-O2", "-std=c11", "-o", "optimized_c", "optimized.c"] + subprocess.run(compile_cmd, check=True, text=True, capture_output=True) + run_cmd = ["./optimized_c"] + run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True) + return run_result.stdout + except subprocess.CalledProcessError as e: + return f"Error:\n{e.stderr}" + +def process_rust_code(code): + code = code.replace("{:.6f}", "{:.6}") + code = re.sub( + r'(println!$begin:math:text$"Execution Time: \\{\\:\\.6\\} seconds", duration\\.as_secs_f64)(\\s*)$', + r'\\1())', + code, + flags=re.MULTILINE, + ) + code = code.replace("max_val - min_val as u32 + 1", "((max_val - min_val + 1) as u32)") + code = code.replace("1 << 32", "1u64 << 32") + code = re.sub(r'($end:math:text$\s*as i64)\)', r'\1', code) + return code + +def execute_rust(code): + code = code.replace("```rust\n", "").replace("```", "") + lines = code.split('\n', 1) + if lines and lines[0].strip().lower() == "rust": + code = lines[1] if len(lines) > 1 else "" + code = process_rust_code(code) + with open("optimized.rs", "w") as f: + f.write(code) + try: + compile_cmd = ["rustc", "optimized.rs", "-O", "-o", "optimized_rust"] + subprocess.run(compile_cmd, check=True, text=True, capture_output=True) + run_cmd = ["./optimized_rust"] + run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True) + return run_result.stdout + except subprocess.CalledProcessError as e: + return f"Error:\n{e.stderr}" + +def execute_target_code(code, target_language): + """Select the appropriate execution function based on target language.""" + if target_language == "C++": + return execute_cpp(code) + elif target_language == "C": + return execute_c(code) + elif target_language == "Rust": + return execute_rust(code) + else: + return "Unsupported language" + +# Gradio UI setup +css = """ +.python {background-color: #306998;} +.code {background-color: #050;} +""" + +def launch_ui(): + with gr.Blocks(css=css) as ui: + gr.Markdown("## Convert Python Code to C/C++/Rust") + with gr.Row(): + python_box = gr.Textbox(label="Python code:", value=PYTHON_HARD, lines=10) + converted_box = gr.Textbox(label="Converted Code:", lines=10) + with gr.Row(): + model_dropdown = gr.Dropdown( + ["GPT: gpt-4o", "GPT: gpt-4o-mini", "CLAUDE: claude-3-5-sonnet-20240620", "CLAUDE: claude-3-haiku-20240307", "HF: CodeQwen1.5-7B-Chat", "HF: bigcode/starcoder"], + label="Select Model", + value="GPT: gpt-4o" + ) + target_lang_dropdown = gr.Dropdown( + ["C++", "C", "Rust"], + label="Select target language", + value="C++" + ) + with gr.Row(): + convert_btn = gr.Button("Convert code") + with gr.Row(): + python_run_btn = gr.Button("Run Python") + run_converted_btn = gr.Button("Run Converted Code") + with gr.Row(): + python_out = gr.TextArea(label="Python result:", elem_classes=["python"]) + converted_out = gr.TextArea(label="Converted Code result:", elem_classes=["code"]) + convert_btn.click( + optimize, + inputs=[python_box, model_dropdown, target_lang_dropdown], + outputs=[converted_box], + ) + python_run_btn.click(execute_python, inputs=[python_box], outputs=[python_out]) + run_converted_btn.click( + execute_target_code, + inputs=[converted_box, target_lang_dropdown], + outputs=[converted_out], + ) + ui.launch() + +# Example Python code blocks +PYTHON_HARD = """ +# Support large number sizes +def lcg(seed, a=1664525, c=1013904223, m=2**32): + value = seed + while True: + value = (a * value + c) % m + yield value +def max_subarray_sum(n, seed, min_val, max_val): + lcg_gen = lcg(seed) + random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)] + max_sum = float('-inf') + for i in range(n): + current_sum = 0 + for j in range(i, n): + current_sum += random_numbers[j] + if current_sum > max_sum: + max_sum = current_sum + return max_sum +def total_max_subarray_sum(n, initial_seed, min_val, max_val): + total_sum = 0 + lcg_gen = lcg(initial_seed) + for _ in range(20): + seed = next(lcg_gen) + total_sum += max_subarray_sum(n, seed, min_val, max_val) + return total_sum +n = 10000 +initial_seed = 42 +min_val = -10 +max_val = 10 +import time +start_time = time.time() +result = total_max_subarray_sum(n, initial_seed, min_val, max_val) +end_time = time.time() +print("Total Maximum Subarray Sum (20 runs):", result) +print("Execution Time: {:.6f} seconds".format(end_time - start_time)) +""" + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser( + description="Single script with multiple executable sections and target language support" + ) + parser.add_argument( + "--mode", + choices=["direct", "ui"], + default="ui", + help="Run direct conversion or launch Gradio UI", + ) + args = parser.parse_args() + + if args.mode == "direct": + print("\nExecuting Python code (PYTHON_HARD)...") + exec(PYTHON_HARD) + for partial in optimize(PYTHON_HARD, "GPT: gpt-4o", "C++"): + print(partial, end="") + elif args.mode == "ui": + launch_ui() \ No newline at end of file From c69dd74cf77adc3c8273eaefecf72a2b27dccc64 Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Fri, 14 Mar 2025 11:04:19 +0500 Subject: [PATCH 24/43] Reverted modified files in week2/community-contributions --- .../community-contributions/day3-gemini.ipynb | 4 +- .../day3-gradio-auth.ipynb | 4 +- .../day3-refine-user-query-by-llama.ipynb | 6 +- .../community-contributions/day3.upsell.ipynb | 4 +- ...oking_and_multiple_tools_per_message.ipynb | 375 +++++++++++++++++- 5 files changed, 372 insertions(+), 21 deletions(-) diff --git a/week2/community-contributions/day3-gemini.ipynb b/week2/community-contributions/day3-gemini.ipynb index e942279..c75e878 100644 --- a/week2/community-contributions/day3-gemini.ipynb +++ b/week2/community-contributions/day3-gemini.ipynb @@ -174,7 +174,7 @@ "**message** is the prompt to use \n", "**history** is the past conversation, in OpenAI format \n", "\n", - "We will combine the system message, history and latest message, then call OpenAI." + "We will combine the system message, history and latest message, then call OpenAI ." ] }, { @@ -288,7 +288,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "llms", "language": "python", "name": "python3" }, diff --git a/week2/community-contributions/day3-gradio-auth.ipynb b/week2/community-contributions/day3-gradio-auth.ipynb index 0b6137a..7ec2dc5 100644 --- a/week2/community-contributions/day3-gradio-auth.ipynb +++ b/week2/community-contributions/day3-gradio-auth.ipynb @@ -16,7 +16,7 @@ "import os\n", "from dotenv import load_dotenv\n", "from openai import OpenAI\n", - "import gradio as gr" + "import gradio as gr " ] }, { @@ -160,7 +160,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "llms", "language": "python", "name": "python3" }, diff --git a/week2/community-contributions/day3-refine-user-query-by-llama.ipynb b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb index abeb431..57541d1 100644 --- a/week2/community-contributions/day3-refine-user-query-by-llama.ipynb +++ b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb @@ -20,7 +20,7 @@ "import os\n", "from dotenv import load_dotenv\n", "from openai import OpenAI\n", - "import gradio as gr" + "import gradio as gr " ] }, { @@ -342,7 +342,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "llm_env", "language": "python", "name": "python3" }, @@ -356,7 +356,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.11.9" } }, "nbformat": 4, diff --git a/week2/community-contributions/day3.upsell.ipynb b/week2/community-contributions/day3.upsell.ipynb index a3f94c1..26a3281 100644 --- a/week2/community-contributions/day3.upsell.ipynb +++ b/week2/community-contributions/day3.upsell.ipynb @@ -43,7 +43,7 @@ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", "\n", - "load_dotenv()\n", + "load_dotenv() \n", "openai_api_key = os.getenv('OPENAI_API_KEY')\n", "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", "google_api_key = os.getenv('GOOGLE_API_KEY')\n", @@ -347,7 +347,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.11.10" } }, "nbformat": 4, diff --git a/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb b/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb index 1489c51..2e480f1 100644 --- a/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb +++ b/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb @@ -63,14 +63,14 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 18, "id": "0a521d84-d07c-49ab-a0df-d6451499ed97", "metadata": {}, "outputs": [], "source": [ "system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", - "system_message += \"Always be accurate. If you don't know the answer, say so.\"\n" + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" ] }, { @@ -244,7 +244,7 @@ " },\n", " \"required\": [\"destination_city\", \"price\"],\n", " \"additionalProperties\": False\n", - " }\n", + " } \n", "}" ] }, @@ -335,21 +335,372 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 26, "id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", "metadata": { "scrolled": true }, "outputs": [ { - "ename": "NameError", - "evalue": "name 'gr' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[1;31m------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[1;32mIn[3], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43mgr\u001b[49m\u001b[38;5;241m.\u001b[39mChatInterface(fn\u001b[38;5;241m=\u001b[39mchat, \u001b[38;5;28mtype\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39mlaunch()\n", - "\u001b[1;31mNameError\u001b[0m: name 'gr' is not defined" + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7873\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "application/json": [ + { + "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", + "role": "system" + }, + { + "content": "tickets to london and paris for $50 each please", + "role": "user" + } + ], + "text/plain": [ + "" + ] + }, + "metadata": { + "application/json": { + "expanded": false, + "root": "root" + } + }, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "ChatCompletion(id='chatcmpl-AtMTR6PDyoghY9BxBI88y03wrkyWT', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_62youPDgpaS0eXN4gru6NT7n', function=Function(arguments='{\"destination_city\": \"London\"}', name='get_ticket_price'), type='function'), ChatCompletionMessageToolCall(id='call_kvQK4Cdyk4b82rqtzkfJyoRh', function=Function(arguments='{\"destination_city\": \"Paris\"}', name='get_ticket_price'), type='function')]))], created=1737757793, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=49, prompt_tokens=313, total_tokens=362, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tool get_ticket_price called for London\n", + "Tool get_ticket_price called for Paris\n" + ] + }, + { + "data": { + "application/json": [ + { + "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", + "role": "system" + }, + { + "content": "tickets to london and paris for $50 each please", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "user" + }, + { + "content": "I'm sorry, but tickets to London are $799 and to Paris are $899, which is much higher than $50.", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "assistant" + }, + { + "content": "Can't you book them any way pretty please?", + "role": "user" + } + ], + "text/plain": [ + "" + ] + }, + "metadata": { + "application/json": { + "expanded": false, + "root": "root" + } + }, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "ChatCompletion(id='chatcmpl-AtMTijl9VhY8svKRySpZ3rdyHBLmq', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"I'm afraid I cannot book the tickets at the price you've requested; the current prices are fixed.\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1737757810, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=21, prompt_tokens=355, total_tokens=376, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/json": [ + { + "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", + "role": "system" + }, + { + "content": "tickets to london and paris for $50 each please", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "user" + }, + { + "content": "I'm sorry, but tickets to London are $799 and to Paris are $899, which is much higher than $50.", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "assistant" + }, + { + "content": "Can't you book them any way pretty please?", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "user" + }, + { + "content": "I'm afraid I cannot book the tickets at the price you've requested; the current prices are fixed.", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "assistant" + }, + { + "content": "how about you book london for $749?", + "role": "user" + } + ], + "text/plain": [ + "" + ] + }, + "metadata": { + "application/json": { + "expanded": false, + "root": "root" + } + }, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "ChatCompletion(id='chatcmpl-AtMU0N8Fp2SeWaMw5LiiBnDgAAWdm', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_qOCom3JGJBFzJvsEwQvDYKIG', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"749\"}', name='book_ticket'), type='function')]))], created=1737757828, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=20, prompt_tokens=391, total_tokens=411, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tool book_ticket for London for 749\n", + "Tool get_ticket_price called for London\n" + ] + }, + { + "data": { + "application/json": [ + { + "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", + "role": "system" + }, + { + "content": "tickets to london and paris for $50 each please", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "user" + }, + { + "content": "I'm sorry, but tickets to London are $799 and to Paris are $899, which is much higher than $50.", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "assistant" + }, + { + "content": "Can't you book them any way pretty please?", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "user" + }, + { + "content": "I'm afraid I cannot book the tickets at the price you've requested; the current prices are fixed.", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "assistant" + }, + { + "content": "how about you book london for $749?", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "user" + }, + { + "content": "Your ticket to London has been successfully booked for $749!", + "metadata": { + "duration": null, + "id": null, + "parent_id": null, + "status": null, + "title": null + }, + "options": null, + "role": "assistant" + }, + { + "content": "cool, what was the discount?", + "role": "user" + } + ], + "text/plain": [ + "" + ] + }, + "metadata": { + "application/json": { + "expanded": false, + "root": "root" + } + }, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "ChatCompletion(id='chatcmpl-AtMUBOoWmKT4m7Ru3mkPRx7mQPgmd', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The original price for the ticket to London was $799, so you received a discount of $50.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1737757839, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=23, prompt_tokens=418, total_tokens=441, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/json": [ + { + "content": "You are a helpful assistant for an Airline called FlightAI. Give short, courteous answers, no more than 1 sentence. Always be accurate. If you don't know the answer, say so.", + "role": "system" + }, + { + "content": "tickets to london and paris for $50 each please", + "role": "user" + } + ], + "text/plain": [ + "" + ] + }, + "metadata": { + "application/json": { + "expanded": false, + "root": "root" + } + }, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "ChatCompletion(id='chatcmpl-AtMUh5f9LEaGjH0FLpPdKf6jgyQsT', choices=[Choice(finish_reason='tool_calls', index=0, logprobs=None, message=ChatCompletionMessage(content=None, refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_6Ihkd1XGA10QxxlCn9uIJvqO', function=Function(arguments='{\"destination_city\": \"London\"}', name='get_ticket_price'), type='function'), ChatCompletionMessageToolCall(id='call_a9qmfQQlwU5L8pu2mvBgMMXl', function=Function(arguments='{\"destination_city\": \"Paris\"}', name='get_ticket_price'), type='function')]))], created=1737757871, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier='default', system_fingerprint='fp_72ed7ab54c', usage=CompletionUsage(completion_tokens=49, prompt_tokens=313, total_tokens=362, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tool get_ticket_price called for London\n", + "Tool get_ticket_price called for Paris\n" ] } ], From c80065df86fb70d03139326b28130b07dac1b4e0 Mon Sep 17 00:00:00 2001 From: Edward Donner Date: Fri, 14 Mar 2025 09:16:31 -0400 Subject: [PATCH 25/43] Improvements to descriptions and links --- SETUP-PC.md | 4 +- week1/day2 EXERCISE.ipynb | 30 + week1/troubleshooting.ipynb | 4 +- .../day4_OCR_model_exercise.ipynb | 306 +++++---- .../day5_with_Gradio.ipynb | 610 ++++++++-------- .../en-de-fr_dataset_generator.ipynb | 650 +++++++++--------- .../synthetic_data_generator.ipynb | 6 +- .../day 5 - ollama_rag_1.ipynb | 6 +- .../community-contributions/day3-gemini.ipynb | 4 +- week5/day2.ipynb | 2 +- week6/day1.ipynb | 22 +- week6/day2.ipynb | 13 +- week6/day3.ipynb | 13 +- week6/day4.ipynb | 28 +- week6/day5.ipynb | 28 +- week8/agents/ensemble_agent.py | 2 +- week8/day1.ipynb | 15 +- week8/day2.4.ipynb | 2 +- week8/day5.ipynb | 24 +- 19 files changed, 948 insertions(+), 821 deletions(-) diff --git a/SETUP-PC.md b/SETUP-PC.md index 1261a5e..875d188 100644 --- a/SETUP-PC.md +++ b/SETUP-PC.md @@ -22,7 +22,7 @@ There are 4 common gotchas to developing on Windows to be aware of: 1. Permissions. Please take a look at this [tutorial](https://chatgpt.com/share/67b0ae58-d1a8-8012-82ca-74762b0408b0) on permissions on Windows 2. Anti-virus, Firewall, VPN. These can interfere with installations and network access; try temporarily disabling them as needed 3. The evil Windows 260 character limit to filenames - here is a full [explanation and fix](https://chatgpt.com/share/67b0afb9-1b60-8012-a9f7-f968a5a910c7)! -4. If you've not worked with Data Science packages on your computer before, you might need to install Microsoft Build Tools. Here are [instructions](https://chatgpt.com/share/67b0b762-327c-8012-b809-b4ec3b9e7be0). +4. If you've not worked with Data Science packages on your computer before, you might need to install Microsoft Build Tools. Here are [instructions](https://chatgpt.com/share/67b0b762-327c-8012-b809-b4ec3b9e7be0). A student also mentioned that [these instructions](https://github.com/bycloudai/InstallVSBuildToolsWindows) might be helpful for people on Windows 11. ### Part 1: Clone the Repo @@ -109,7 +109,7 @@ If you see an error like this: > Microsoft Visual C++ 14.0 or greater is required. Get it with "Microsoft C++ Build Tools": [https://visualstudio.microsoft.com/visual-cpp-build-tools/](https://visualstudio.microsoft.com/visual-cpp-build-tools/) -Then please follow the link and install Microsoft C++ Build Tools. +Then please follow the link and install Microsoft C++ Build Tools. A student also mentioned that [these instructions](https://github.com/bycloudai/InstallVSBuildToolsWindows) might be helpful for people on Windows 11. In the very unlikely event that this step doesn't go well, you should try the bullet-proof (but slower) version: `pip install --retries 5 --timeoutย 15 --no-cache-dir --force-reinstall -r requirements.txt` diff --git a/week1/day2 EXERCISE.ipynb b/week1/day2 EXERCISE.ipynb index 81077ed..2c079f1 100644 --- a/week1/day2 EXERCISE.ipynb +++ b/week1/day2 EXERCISE.ipynb @@ -203,6 +203,36 @@ "print(response.choices[0].message.content)" ] }, + { + "cell_type": "markdown", + "id": "9f9e22da-b891-41f6-9ac9-bd0c0a5f4f44", + "metadata": {}, + "source": [ + "## Are you confused about why that works?\n", + "\n", + "It seems strange, right? We just used OpenAI code to call Ollama?? What's going on?!\n", + "\n", + "Here's the scoop:\n", + "\n", + "The python class `OpenAI` is simply code written by OpenAI engineers that makes calls over the internet to an endpoint. \n", + "\n", + "When you call `openai.chat.completions.create()`, this python code just makes a web request to the following url: \"https://api.openai.com/v1/chat/completions\"\n", + "\n", + "Code like this is known as a \"client library\" - it's just wrapper code that runs on your machine to make web requests. The actual power of GPT is running on OpenAI's cloud behind this API, not on your computer!\n", + "\n", + "OpenAI was so popular, that lots of other AI providers provided identical web endpoints, so you could use the same approach.\n", + "\n", + "So Ollama has an endpoint running on your local box at http://localhost:11434/v1/chat/completions \n", + "And in week 2 we'll discover that lots of other providers do this too, including Gemini and DeepSeek.\n", + "\n", + "And then the team at OpenAI had a great idea: they can extend their client library so you can specify a different 'base url', and use their library to call any compatible API.\n", + "\n", + "That's it!\n", + "\n", + "So when you say: `ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')` \n", + "Then this will make the same endpoint calls, but to Ollama instead of OpenAI." + ] + }, { "cell_type": "markdown", "id": "bc7d1de3-e2ac-46ff-a302-3b4ba38c4c90", diff --git a/week1/troubleshooting.ipynb b/week1/troubleshooting.ipynb index 51146a4..f439340 100644 --- a/week1/troubleshooting.ipynb +++ b/week1/troubleshooting.ipynb @@ -68,7 +68,7 @@ "1. Permissions. Please take a look at this [tutorial](https://chatgpt.com/share/67b0ae58-d1a8-8012-82ca-74762b0408b0) on permissions on Windows\n", "2. Anti-virus, Firewall, VPN. These can interfere with installations and network access; try temporarily disabling them as needed\n", "3. The evil Windows 260 character limit to filenames - here is a full [explanation and fix](https://chatgpt.com/share/67b0afb9-1b60-8012-a9f7-f968a5a910c7)!\n", - "4. If you've not worked with Data Science packages on your computer before, you might need to install Microsoft Build Tools. Here are [instructions](https://chatgpt.com/share/67b0b762-327c-8012-b809-b4ec3b9e7be0).\n", + "4. If you've not worked with Data Science packages on your computer before, you might need to install Microsoft Build Tools. Here are [instructions](https://chatgpt.com/share/67b0b762-327c-8012-b809-b4ec3b9e7be0). A student also mentioned that [these instructions](https://github.com/bycloudai/InstallVSBuildToolsWindows) might be helpful for people on Windows 11. \n", "\n", "## And for Mac people\n", "\n", @@ -127,7 +127,7 @@ " print(f\"Environment Name: {venv_name}\")\n", "\n", "if conda_name != \"llms\" and venv_name != \"llms\" and venv_name != \"venv\":\n", - " print(\"Neither Anaconda nor Virtualenv seem to be activated with the expected name 'llms'\")\n", + " print(\"Neither Anaconda nor Virtualenv seem to be activated with the expected name 'llms' or 'venv'\")\n", " print(\"Did you run 'jupyter lab' from an activated environment with (llms) showing on the command line?\")\n", " print(\"If in doubt, close down all jupyter lab, and follow Part 5 in the SETUP-PC or SETUP-mac guide.\")" ] diff --git a/week3/community-contributions/day4_OCR_model_exercise.ipynb b/week3/community-contributions/day4_OCR_model_exercise.ipynb index 48f8cc3..ef4e7cf 100644 --- a/week3/community-contributions/day4_OCR_model_exercise.ipynb +++ b/week3/community-contributions/day4_OCR_model_exercise.ipynb @@ -1,150 +1,160 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4", - "authorship_tag": "ABX9TyPtAT7Yq5xd4vDcJEZtg69J" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "6gGKXU5RXORf" + }, + "outputs": [], + "source": [ + "# getting the latest transformers first, since this will require a restart\n", + "\n", + "!pip install git+https://github.com/huggingface/transformers.git" + ] }, - "cells": [ - { - "cell_type": "code", - "source": [ - "# getting the latest transformers first, since this will require a restart\n", - "\n", - "!pip install git+https://github.com/huggingface/transformers.git" - ], - "metadata": { - "id": "6gGKXU5RXORf" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# imports\n", - "\n", - "import torch\n", - "from google.colab import userdata\n", - "from huggingface_hub import login\n", - "from transformers import AutoProcessor, AutoModelForImageTextToText\n", - "from google.colab import files" - ], - "metadata": { - "id": "yCRrF4aiXPPo" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# logging in to HF\n", - "\n", - "hf_token = userdata.get('HF_TOKEN')\n", - "login(hf_token, add_to_git_credential=True)" - ], - "metadata": { - "id": "AAlOQuCbXcrv" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "_RRVc2j2Vun-" - }, - "outputs": [], - "source": [ - "# this will start an input prompt for uploading local files\n", - "\n", - "uploaded = files.upload()\n", - "print(uploaded.keys()) # this will look sth like dict_keys([\"note2.jpg\"])" - ] - }, - { - "cell_type": "code", - "source": [ - "'''\n", - "ChatGPT and Gemini explain the following part roughly like so:\n", - "The string contained in image_path is the key of the entry in the dictionary of uploaded files (see box above).\n", - "The value to that key contains the image in binary format.\n", - "The \"with open(image_path, \"wb\") as f\" part means: Create a new file \"note2.jpg\" on the server, and write to it in binary mode (\"wb\").\n", - "f.write(image) writes the binary image to that new file. \"note2.jpg\" aka image_path will now contain the image.\n", - "'''\n", - "\n", - "image_path = \"note2.jpg\" # update this string depending on the printout in the previous cell!\n", - "image = uploaded[image_path]\n", - "with open(image_path, \"wb\") as f:\n", - " f.write(image)" - ], - "metadata": { - "id": "V_UAuSSkXBKh" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# from HF model instructions\n", - "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", - "model = AutoModelForImageTextToText.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\", device_map=device)\n", - "processor = AutoProcessor.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\")" - ], - "metadata": { - "id": "AiFP-mQtXrpV" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# also from HF documentation about this model, see https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf\n", - "\n", - "image = image_path\n", - "inputs = processor(image, return_tensors=\"pt\").to(device)\n", - "\n", - "ocr = model.generate(\n", - " **inputs,\n", - " do_sample=False,\n", - " tokenizer=processor.tokenizer,\n", - " stop_strings=\"<|im_end|>\",\n", - " max_new_tokens=4096,\n", - ")" - ], - "metadata": { - "id": "7Adr8HB_YNf5" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# prints out the recognized text. This can read my handwriting pretty well! And it works super quick on the free T4 GPU server here.\n", - "\n", - "print(processor.decode(ocr[0, inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True))" - ], - "metadata": { - "id": "nRsRUIIuYdJ9" - }, - "execution_count": null, - "outputs": [] - } - ] -} \ No newline at end of file + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yCRrF4aiXPPo" + }, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import torch\n", + "from google.colab import userdata\n", + "from huggingface_hub import login\n", + "from transformers import AutoProcessor, AutoModelForImageTextToText\n", + "from google.colab import files" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AAlOQuCbXcrv" + }, + "outputs": [], + "source": [ + "# logging in to HF\n", + "\n", + "hf_token = userdata.get('HF_TOKEN')\n", + "login(hf_token, add_to_git_credential=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_RRVc2j2Vun-" + }, + "outputs": [], + "source": [ + "# this will start an input prompt for uploading local files\n", + "\n", + "uploaded = files.upload()\n", + "print(uploaded.keys()) # this will look sth like dict_keys([\"note2.jpg\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "V_UAuSSkXBKh" + }, + "outputs": [], + "source": [ + "'''\n", + "ChatGPT and Gemini explain the following part roughly like so:\n", + "The string contained in image_path is the key of the entry in the dictionary of uploaded files (see box above).\n", + "The value to that key contains the image in binary format.\n", + "The \"with open(image_path, \"wb\") as f\" part means: Create a new file \"note2.jpg\" on the server, and write to it in binary mode (\"wb\").\n", + "f.write(image) writes the binary image to that new file. \"note2.jpg\" aka image_path will now contain the image.\n", + "'''\n", + "\n", + "image_path = \"note2.jpg\" # update this string depending on the printout in the previous cell!\n", + "image = uploaded[image_path]\n", + "with open(image_path, \"wb\") as f:\n", + " f.write(image)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AiFP-mQtXrpV" + }, + "outputs": [], + "source": [ + "# from HF model instructions\n", + "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", + "model = AutoModelForImageTextToText.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\", device_map=device)\n", + "processor = AutoProcessor.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7Adr8HB_YNf5" + }, + "outputs": [], + "source": [ + "# also from HF documentation about this model, see https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf\n", + "\n", + "image = image_path\n", + "inputs = processor(image, return_tensors=\"pt\").to(device)\n", + "\n", + "ocr = model.generate(\n", + " **inputs,\n", + " do_sample=False,\n", + " tokenizer=processor.tokenizer,\n", + " stop_strings=\"<|im_end|>\",\n", + " max_new_tokens=4096,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nRsRUIIuYdJ9" + }, + "outputs": [], + "source": [ + "# prints out the recognized text. This can read my handwriting pretty well! And it works super quick on the free T4 GPU server here.\n", + "\n", + "print(processor.decode(ocr[0, inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True))" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "authorship_tag": "ABX9TyPtAT7Yq5xd4vDcJEZtg69J", + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/week3/community-contributions/day5_with_Gradio.ipynb b/week3/community-contributions/day5_with_Gradio.ipynb index 12206dd..0e80294 100644 --- a/week3/community-contributions/day5_with_Gradio.ipynb +++ b/week3/community-contributions/day5_with_Gradio.ipynb @@ -1,302 +1,312 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "It89APiAtTUF" + }, + "source": [ + "# Create meeting minutes from an Audio file\n", + "\n", + "I downloaded some Denver City Council meeting minutes and selected a portion of the meeting for us to transcribe. You can download it here: \n", + "https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n", + "\n", + "If you'd rather work with the original data, the HuggingFace dataset is [here](https://huggingface.co/datasets/huuuyeah/meetingbank) and the audio can be downloaded [here](https://huggingface.co/datasets/huuuyeah/MeetingBank_Audio/tree/main).\n", + "\n", + "The goal of this product is to use the Audio to generate meeting minutes, including actions.\n", + "\n", + "For this project, you can either use the Denver meeting minutes, or you can record something of your own!\n", + "\n", + "## Please note:\n", + "\n", + "When you run the pip installs in the first cell below, you might get this error - it can be safely ignored - it sounds quite severe, but it doesn't seem to affect anything else in this project!\n", + "\n", + "\n", + "> ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "gcsfs 2024.10.0 requires fsspec==2024.10.0, but you have fsspec 2024.9.0 which is incompatible.\n", + "\n" + ] }, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "# Create meeting minutes from an Audio file\n", - "\n", - "I downloaded some Denver City Council meeting minutes and selected a portion of the meeting for us to transcribe. You can download it here: \n", - "https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n", - "\n", - "If you'd rather work with the original data, the HuggingFace dataset is [here](https://huggingface.co/datasets/huuuyeah/meetingbank) and the audio can be downloaded [here](https://huggingface.co/datasets/huuuyeah/MeetingBank_Audio/tree/main).\n", - "\n", - "The goal of this product is to use the Audio to generate meeting minutes, including actions.\n", - "\n", - "For this project, you can either use the Denver meeting minutes, or you can record something of your own!\n", - "\n", - "## Please note:\n", - "\n", - "When you run the pip installs in the first cell below, you might get this error - it can be safely ignored - it sounds quite severe, but it doesn't seem to affect anything else in this project!\n", - "\n", - "\n", - "> ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", - "gcsfs 2024.10.0 requires fsspec==2024.10.0, but you have fsspec 2024.9.0 which is incompatible.\n", - "\n" - ], - "metadata": { - "id": "It89APiAtTUF" - } - }, - { - "cell_type": "code", - "source": [ - "!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2 gradio" - ], - "metadata": { - "id": "f2vvgnFpHpID" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "FW8nl3XRFrz0" - }, - "outputs": [], - "source": [ - "# imports\n", - "\n", - "import os\n", - "import requests\n", - "from openai import OpenAI\n", - "from google.colab import drive\n", - "from huggingface_hub import login\n", - "from google.colab import userdata\n", - "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", - "import torch\n", - "import gradio as gr" - ] - }, - { - "cell_type": "code", - "source": [ - "# Constants\n", - "\n", - "AUDIO_MODEL = \"whisper-1\"\n", - "LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"" - ], - "metadata": { - "id": "q3D1_T0uG_Qh" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# New capability - connect this Colab to my Google Drive\n", - "# See immediately below this for instructions to obtain denver_extract.mp3\n", - "\n", - "drive.mount(\"/content/drive\")\n", - "audio_filename = \"/content/drive/MyDrive/llms/denver_extract.mp3\"" - ], - "metadata": { - "id": "Es9GkQ0FGCMt" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "source": [ - "# Download denver_extract.mp3\n", - "\n", - "You can either use the same file as me, the extract from Denver city council minutes, or you can try your own..\n", - "\n", - "If you want to use the same as me, then please download my extract here, and put this on your Google Drive: \n", - "https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n" - ], - "metadata": { - "id": "HTl3mcjyzIEE" - } - }, - { - "cell_type": "code", - "source": [ - "# Sign in to HuggingFace Hub\n", - "\n", - "hf_token = userdata.get('HF_TOKEN')\n", - "login(hf_token, add_to_git_credential=True)" - ], - "metadata": { - "id": "xYW8kQYtF-3L" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Sign in to OpenAI using Secrets in Colab\n", - "\n", - "openai_api_key = userdata.get('OPENAI_API_KEY')\n", - "openai = OpenAI(api_key=openai_api_key)" - ], - "metadata": { - "id": "qP6OB2OeGC2C" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Initialize Llama model and tokenizer\n", - "\n", - "quant_config = BitsAndBytesConfig(\n", - " load_in_4bit=True,\n", - " bnb_4bit_use_double_quant=True,\n", - " bnb_4bit_compute_dtype=torch.bfloat16,\n", - " bnb_4bit_quant_type=\"nf4\"\n", - ")\n", - "\n", - "tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", - "tokenizer.pad_token = tokenizer.eos_token\n", - "\n", - "model = AutoModelForCausalLM.from_pretrained(\n", - " LLAMA,\n", - " device_map=\"auto\",\n", - " quantization_config=quant_config\n", - ")" - ], - "metadata": { - "id": "hgQBeIYUyaqj" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Generate meeting minutes\n", - "\n", - "def generate_minutes(transcription, model, tokenizer, progress=gr.Progress()):\n", - " progress(0.6, desc=\"Generating meeting minutes from transcript...\")\n", - "\n", - " system_message = \"You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown.\"\n", - " user_prompt = f\"Below is an extract transcript of a meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\\n{transcription}\"\n", - "\n", - " messages = [\n", - " {\"role\": \"system\", \"content\": system_message},\n", - " {\"role\": \"user\", \"content\": user_prompt}\n", - " ]\n", - "\n", - " inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", - " outputs = model.generate(inputs, max_new_tokens=2000)\n", - " response = tokenizer.decode(outputs[0])\n", - "\n", - " # Clean up the response, keep only the minutes\n", - " progress(0.9, desc=\"Cleaning and formatting minutes...\")\n", - " response = response.split(\"<|end_header_id|>\")[-1].strip().replace(\"<|eot_id|>\",\"\")\n", - "\n", - " return response" - ], - "metadata": { - "id": "u9aFA7tjy3Ri" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Transcribe the uploaded audio file using OpenAI's Whisper model\n", - "\n", - "def transcribe_audio(audio_path, progress=gr.Progress()):\n", - " progress(0.3, desc=\"Creating transcript from audio...\")\n", - "\n", - " try:\n", - " with open(audio_path, \"rb\") as audio_file:\n", - " transcription = openai.audio.transcriptions.create(\n", - " model=AUDIO_MODEL,\n", - " file=audio_file,\n", - " response_format=\"text\"\n", - " )\n", - " return transcription\n", - " except Exception as e:\n", - " return f\"Error during transcription: {str(e)}\"" - ], - "metadata": { - "id": "OEuqR90Vy4AZ" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Process the uploaded audio file, transcribe it, and generate meeting minutes\n", - "\n", - "def process_upload(audio_file, progress=gr.Progress()):\n", - " progress(0.1, desc=\"Starting process...\")\n", - "\n", - " if audio_file is None:\n", - " return \"Please upload an audio file.\"\n", - "\n", - " try:\n", - " # Check file format\n", - " if not str(audio_file).lower().endswith('.mp3'):\n", - " return \"Please upload an MP3 file.\"\n", - "\n", - " # Get transcription\n", - " transcription = transcribe_audio(audio_file)\n", - " if transcription.startswith(\"Error\"):\n", - " return transcription\n", - "\n", - " # Generate minutes\n", - " minutes = generate_minutes(transcription, model, tokenizer)\n", - " progress(1.0, desc=\"Process complete!\")\n", - " return minutes\n", - "\n", - " except Exception as e:\n", - " return f\"Error processing file: {str(e)}\"" - ], - "metadata": { - "id": "lmdsy2iDy5d7" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Create Gradio interface\n", - "\n", - "interface = gr.Interface(\n", - " fn=process_upload,\n", - " inputs=gr.Audio(type=\"filepath\", label=\"Upload MP3 File\", format=\"mp3\"),\n", - " outputs=gr.Markdown(label=\"Meeting Minutes\", min_height=60),\n", - " title=\"Meeting Minutes Generator\",\n", - " description=\"Upload an MP3 recording of your meeting to get AI-generated meeting minutes. This process may take a few minutes.\",\n", - " flagging_mode=\"never\"\n", - ")" - ], - "metadata": { - "id": "k2U2bWtey7Yo" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Launch Gradio interface\n", - "\n", - "interface.launch()" - ], - "metadata": { - "id": "X3JbzRNRy9oG" - }, - "execution_count": null, - "outputs": [] - } - ] -} \ No newline at end of file + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "f2vvgnFpHpID" + }, + "outputs": [], + "source": [ + "!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2 gradio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FW8nl3XRFrz0" + }, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from openai import OpenAI\n", + "from google.colab import drive\n", + "from huggingface_hub import login\n", + "from google.colab import userdata\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", + "import torch\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "q3D1_T0uG_Qh" + }, + "outputs": [], + "source": [ + "# Constants\n", + "\n", + "AUDIO_MODEL = \"whisper-1\"\n", + "LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Es9GkQ0FGCMt" + }, + "outputs": [], + "source": [ + "# New capability - connect this Colab to my Google Drive\n", + "# See immediately below this for instructions to obtain denver_extract.mp3\n", + "\n", + "drive.mount(\"/content/drive\")\n", + "audio_filename = \"/content/drive/MyDrive/llms/denver_extract.mp3\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HTl3mcjyzIEE" + }, + "source": [ + "# Download denver_extract.mp3\n", + "\n", + "You can either use the same file as me, the extract from Denver city council minutes, or you can try your own..\n", + "\n", + "If you want to use the same as me, then please download my extract here, and put this on your Google Drive: \n", + "https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xYW8kQYtF-3L" + }, + "outputs": [], + "source": [ + "# Sign in to HuggingFace Hub\n", + "\n", + "hf_token = userdata.get('HF_TOKEN')\n", + "login(hf_token, add_to_git_credential=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qP6OB2OeGC2C" + }, + "outputs": [], + "source": [ + "# Sign in to OpenAI using Secrets in Colab\n", + "\n", + "openai_api_key = userdata.get('OPENAI_API_KEY')\n", + "openai = OpenAI(api_key=openai_api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hgQBeIYUyaqj" + }, + "outputs": [], + "source": [ + "# Initialize Llama model and tokenizer\n", + "\n", + "quant_config = BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_use_double_quant=True,\n", + " bnb_4bit_compute_dtype=torch.bfloat16,\n", + " bnb_4bit_quant_type=\"nf4\"\n", + ")\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", + "tokenizer.pad_token = tokenizer.eos_token\n", + "\n", + "model = AutoModelForCausalLM.from_pretrained(\n", + " LLAMA,\n", + " device_map=\"auto\",\n", + " quantization_config=quant_config\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "u9aFA7tjy3Ri" + }, + "outputs": [], + "source": [ + "# Generate meeting minutes\n", + "\n", + "def generate_minutes(transcription, model, tokenizer, progress=gr.Progress()):\n", + " progress(0.6, desc=\"Generating meeting minutes from transcript...\")\n", + "\n", + " system_message = \"You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown.\"\n", + " user_prompt = f\"Below is an extract transcript of a meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\\n{transcription}\"\n", + "\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + "\n", + " inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", + " outputs = model.generate(inputs, max_new_tokens=2000)\n", + " response = tokenizer.decode(outputs[0])\n", + "\n", + " # Clean up the response, keep only the minutes\n", + " progress(0.9, desc=\"Cleaning and formatting minutes...\")\n", + " response = response.split(\"<|end_header_id|>\")[-1].strip().replace(\"<|eot_id|>\",\"\")\n", + "\n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OEuqR90Vy4AZ" + }, + "outputs": [], + "source": [ + "# Transcribe the uploaded audio file using OpenAI's Whisper model\n", + "\n", + "def transcribe_audio(audio_path, progress=gr.Progress()):\n", + " progress(0.3, desc=\"Creating transcript from audio...\")\n", + "\n", + " try:\n", + " with open(audio_path, \"rb\") as audio_file:\n", + " transcription = openai.audio.transcriptions.create(\n", + " model=AUDIO_MODEL,\n", + " file=audio_file,\n", + " response_format=\"text\"\n", + " )\n", + " return transcription\n", + " except Exception as e:\n", + " return f\"Error during transcription: {str(e)}\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lmdsy2iDy5d7" + }, + "outputs": [], + "source": [ + "# Process the uploaded audio file, transcribe it, and generate meeting minutes\n", + "\n", + "def process_upload(audio_file, progress=gr.Progress()):\n", + " progress(0.1, desc=\"Starting process...\")\n", + "\n", + " if audio_file is None:\n", + " return \"Please upload an audio file.\"\n", + "\n", + " try:\n", + " # Check file format\n", + " if not str(audio_file).lower().endswith('.mp3'):\n", + " return \"Please upload an MP3 file.\"\n", + "\n", + " # Get transcription\n", + " transcription = transcribe_audio(audio_file)\n", + " if transcription.startswith(\"Error\"):\n", + " return transcription\n", + "\n", + " # Generate minutes\n", + " minutes = generate_minutes(transcription, model, tokenizer)\n", + " progress(1.0, desc=\"Process complete!\")\n", + " return minutes\n", + "\n", + " except Exception as e:\n", + " return f\"Error processing file: {str(e)}\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "k2U2bWtey7Yo" + }, + "outputs": [], + "source": [ + "# Create Gradio interface\n", + "\n", + "interface = gr.Interface(\n", + " fn=process_upload,\n", + " inputs=gr.Audio(type=\"filepath\", label=\"Upload MP3 File\", format=\"mp3\"),\n", + " outputs=gr.Markdown(label=\"Meeting Minutes\", min_height=60),\n", + " title=\"Meeting Minutes Generator\",\n", + " description=\"Upload an MP3 recording of your meeting to get AI-generated meeting minutes. This process may take a few minutes.\",\n", + " flagging_mode=\"never\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "X3JbzRNRy9oG" + }, + "outputs": [], + "source": [ + "# Launch Gradio interface\n", + "\n", + "interface.launch()" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/week3/community-contributions/en-de-fr_dataset_generator.ipynb b/week3/community-contributions/en-de-fr_dataset_generator.ipynb index 0c3e0d5..af35dd8 100644 --- a/week3/community-contributions/en-de-fr_dataset_generator.ipynb +++ b/week3/community-contributions/en-de-fr_dataset_generator.ipynb @@ -1,322 +1,332 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4", - "authorship_tag": "ABX9TyPxJzufoQPtui+nhl1J1xiR" - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yqlQTsxNdKrN" + }, + "outputs": [], + "source": [ + "!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2 gradio" + ] }, - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "yqlQTsxNdKrN" - }, - "outputs": [], - "source": [ - "!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2 gradio" - ] - }, - { - "cell_type": "code", - "source": [ - "import os\n", - "import requests\n", - "from IPython.display import Markdown, display, update_display\n", - "from openai import OpenAI\n", - "from google.colab import drive\n", - "from huggingface_hub import login\n", - "from google.colab import userdata\n", - "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", - "import torch\n", - "import gradio as gr\n", - "import re" - ], - "metadata": { - "id": "eyfvQrLxdkGT" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# one can always add more models, of course\n", - "\n", - "LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"\n", - "OPENAI_MODEL = \"gpt-4o-mini\"" - ], - "metadata": { - "id": "WW-cSZk7dnp6" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "hf_token = userdata.get('HF_TOKEN')\n", - "login(hf_token, add_to_git_credential=True)\n", - "openai_api_key = userdata.get('OPENAI_API_KEY')\n", - "openai = OpenAI(api_key=openai_api_key)" - ], - "metadata": { - "id": "XG7Iam6Rdw8F" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "force_dark_mode = \"\"\"\n", - "function refresh() {\n", - " const url = new URL(window.location);\n", - " if (url.searchParams.get('__theme') !== 'dark') {\n", - " url.searchParams.set('__theme', 'dark');\n", - " window.location.href = url.href;\n", - " }\n", - "}\n", - "\"\"\"" - ], - "metadata": { - "id": "Ov7WSdx9dzSt" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "def dataset_generator(model, nature, shots, volume, language):\n", - "\n", - " examples = \"Instruction: 'Make a random sentence.'\\nAnswer: 'When I got home last night, I couldn't believe my eyes: All the pineapples had been removed from the pizza.'\"\n", - " system_message = \"You are a random sentence generator. Generate 10 diverse English sentences.\"\n", - " user_prompt = f\"Generate 10 random English sentences, like so:\\n{examples}\"\n", - " sentences = \"\"\n", - "\n", - " if language == \"English\":\n", - "\n", - " for shot in list(shots.keys()):\n", - " examples += f\"\\nExample instruction: '{shot}'\\nExample answer: '{shots[shot]}'\\n\"\n", - "\n", - " system_message = f\"You are a state-of-the art linguistic dataset compiler. You are given a 'Type' of sentence to create. \\\n", - "Within the bounds of that type, create {volume} diverse sentences with differing structures and lengths. Make the sentences plausible, \\\n", - "but be creative in filling them with random concrete information, names, and data. Here are some examples for how to go about that:\\n{examples}\\n\\\n", - "Just output one sentence per line. Do not comment or format yor output in any way, shape, or form.\"\n", - "\n", - " user_prompt = f\"Generate {volume} English sentences of the following Type: {nature}. Just output one sentence per line. \\\n", - "Do not comment or format yor output in any way, shape, or form.\"\n", - "\n", - " elif language == \"German\":\n", - "\n", - " for shot in list(shots.keys()):\n", - " examples += f\"\\nAnweisung: '{shot}'\\nAntwort: '{shots[shot]}'\\n\"\n", - "\n", - " system_message = f\"Du bist ein weltklasse Datensatz-Sammler fรผr Sprachdaten. Du erhรคltst einen 'Typ' von Sรคtzen, die du erstellen sollst. \\\n", - "Im Rahmen dieses Typs, generiere {volume} untereinander verschiedene Sรคtze mit unterschiedlichen Satzlรคngen und -strukturen. Mache die Beispielsรคtze \\\n", - "plausibel, aber fรผlle sie kreativ mit willkรผrlichen Informationen, Namen, und Daten aller Art. Hier sind ein paar Beispiel, wie du vorgehen sollst:\\n{examples}\\n\\\n", - "Gib einfach einen Satz pro Zeile aus. Kommentiere oder formatiere deine Antwort in keinster Weise.\"\n", - "\n", - " user_prompt = f\"Generiere {volume} deutsche Sรคtze des folgenden Typs: {nature}. Gib einfach einen Satz pro Zeile aus. \\\n", - "Kommentiere oder formatiere deine Antwort in keiner Weise.\"\n", - "\n", - " elif language == \"French\":\n", - "\n", - " for shot in list(shots.keys()):\n", - " examples += f\"\\nConsigne: '{shot}'\\nRรฉponse: '{shots[shot]}'\\n\"\n", - "\n", - " system_message = f\"Tu es un outil linguistique de pointe, ร  savoir, un genรฉrateur de donnรฉes linguistiques. Tu seras assignรฉ un 'Type' de phrases ร  crรฉer. \\\n", - "Dans le cadre de ce type-lร , crรฉe {volume} phrases diverses, avec des structures et longueurs qui varient. Gรฉnรจre des phrases qui soient plausibles, \\\n", - "mais sois crรฉatif, et sers-toi de donnรฉes, noms, et informations alรฉatoires pour rendre les phrases plus naturelles. Voici quelques examples comment faire:\\n{examples}\\n\\\n", - "Sors une seule phrase par ligne. Ne formatte ni commente ta rรฉponse en aucune maniรจre que ce soit.\"\n", - "\n", - " user_prompt = f\"S'il te plaรฎt, crรฉe {volume} phrases en franรงais du Type suivant: {nature}. Sors une seule phrase par ligne. \\\n", - "Ne formatte ni commente ta rรฉponse en aucune maniรจre que ce soit.\"\n", - "\n", - " messages = [\n", - " {\"role\": \"system\", \"content\": system_message},\n", - " {\"role\": \"user\", \"content\": user_prompt}\n", - " ]\n", - "\n", - " if model == \"Llama\":\n", - "\n", - " quant_config = BitsAndBytesConfig(\n", - " load_in_4bit=True,\n", - " bnb_4bit_use_double_quant=True,\n", - " bnb_4bit_compute_dtype=torch.bfloat16,\n", - " bnb_4bit_quant_type=\"nf4\"\n", - " )\n", - "\n", - " tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", - " tokenizer.pad_token = tokenizer.eos_token\n", - " inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", - " streamer = TextStreamer(tokenizer)\n", - " model = AutoModelForCausalLM.from_pretrained(LLAMA, device_map=\"auto\", quantization_config=quant_config)\n", - " outputs = model.generate(inputs, max_new_tokens=10000)\n", - "\n", - " response = tokenizer.decode(outputs[0])\n", - " sentences = list(re.finditer(\"(?:<\\|end_header_id\\|>)([^<]+)(?:<\\|eot_id\\|>)\", str(response), re.DOTALL))[-1].group(1)\n", - "\n", - " elif model == \"OpenAI\":\n", - " response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages)\n", - " sentences = response.choices[0].message.content\n", - "\n", - " return sentences" - ], - "metadata": { - "id": "bEF8w_Mdd2Nb" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "global data\n", - "data = \"\"\n", - "\n", - "with gr.Blocks(\n", - " css=\"\"\"\n", - " .red-button {\n", - " background-color: darkred !important;\n", - " border-color: red !important;\n", - " }\n", - " .blue-button {\n", - " background-color: darkblue !important;\n", - " border-color: blue !important;\n", - " }\n", - " .green-button {\n", - " background-color: green !important;\n", - " border-color: green !important;\n", - " }\n", - " \"\"\"\n", - ") as view:\n", - " with gr.Row():\n", - " title = gr.HTML(\"

Dataset Generator PLUS

for English, German, and French

\")\n", - " subtitle = gr.HTML(\"

Instructions:

  1. Pick the language
  2. \\\n", - "
  3. Select a model
  4. Indicate how many sentences you need
  5. \\\n", - "
  6. Describe the type of sentence you're looking for
  7. Give up to three examples of the desired output sentence, and describe each of them briefly
  8. \\\n", - "
  9. Hit Create Dataset
  10. \\\n", - "
  11. Save the output (.txt) to your Google Drive
  12. \")\n", - " with gr.Row():\n", - " language_choice = gr.Dropdown(choices=[\"English\", \"German\", \"French\"], label=\"Select language\", value=\"English\", interactive=True)\n", - " model_choice = gr.Dropdown(choices=[\"Llama\", \"OpenAI\"], label=\"Select model\", value=\"Llama\", interactive=True)\n", - " volume = gr.Textbox(label=\"Required number of sentences\", interactive=True)\n", - " with gr.Row():\n", - " typeInput = gr.Textbox(label=\"Short description of the kind of sentence you need\", interactive=True)\n", - " with gr.Row():\n", - " sentence_1 = gr.Textbox(label=\"Example sentence 1\", interactive=True)\n", - " instruction_1 = gr.Textbox(label=\"Description\", interactive=True)\n", - " with gr.Row():\n", - " sentence_2 = gr.Textbox(label=\"Example sentence 2\", interactive=True)\n", - " instruction_2 = gr.Textbox(label=\"Description\", interactive=True)\n", - " with gr.Row():\n", - " sentence_3 = gr.Textbox(label=\"Example sentence 3\", interactive=True)\n", - " instruction_3 = gr.Textbox(label=\"Description\", interactive=True)\n", - " with gr.Row():\n", - " liveSentences = gr.Markdown(\n", - " value='
    Your sentences will be displayed here โ€ฆ
    ',\n", - " label=\"Generated sentences:\",\n", - " min_height=60,\n", - " max_height=200\n", - " )\n", - " with gr.Row():\n", - " generate = gr.Button(value=\"Generate sentences\", elem_classes=\"blue-button\")\n", - " with gr.Row():\n", - " clear = gr.Button(value=\"Clear everything\", elem_classes=\"red-button\")\n", - " with gr.Row():\n", - " outputPath = gr.Textbox(label=\"Specify the desired name and location on your Google Drive for the sentences (plain text) to be saved\", interactive=True)\n", - " with gr.Row():\n", - " save = gr.Button(value=\"Save generated data\", elem_classes=\"blue-button\")\n", - "\n", - " def generateSentences(typeInput, s1, i1, s2, i2, s3, i3, volume, language, model):\n", - " global data\n", - " nature = \"\"\n", - " shots = {}\n", - " amount = int(volume) if re.search(\"^[0-9]+$\", volume) is not None else 10\n", - "\n", - " if typeInput != None:\n", - " nature = typeInput\n", - " else:\n", - " nature = \"Random sentences of mixed nature\"\n", - "\n", - " if s1 != None:\n", - " if i1 != None:\n", - " shots[i1] = s1\n", - " else:\n", - " shots[\"A medium-long random sentence about anything\"] = s1\n", - " else:\n", - " shots[\"A medium-long random sentence about anything\"] = \"Paul, waking up out of his half-drunken haze, clearly couldn't tell left from right and ran right into the door.\"\n", - "\n", - " if s2 != None:\n", - " if i2 != None:\n", - " shots[i2] = s2\n", - " else:\n", - " shots[\"A medium-long random sentence about anything\"] = s2\n", - "\n", - " if s3 != None:\n", - " if i3 != None:\n", - " shots[i3] = s3\n", - " else:\n", - " shots[\"A medium-long random sentence about anything\"] = s3\n", - "\n", - " sentences = dataset_generator(model, nature, shots, amount, language)\n", - " data = sentences\n", - "\n", - " return sentences\n", - "\n", - " def saveData(path):\n", - " global data\n", - " drive.mount(\"/content/drive\")\n", - "\n", - " dir_path = os.path.dirname(\"/content/drive/MyDrive/\" + path)\n", - "\n", - " if not os.path.exists(dir_path):\n", - " os.makedirs(dir_path)\n", - "\n", - " with open(\"/content/drive/MyDrive/\" + path, \"w\", encoding=\"utf-8\") as f:\n", - " f.write(data)\n", - "\n", - " generate.click(generateSentences, inputs=[typeInput, sentence_1, instruction_1, sentence_2, instruction_2, sentence_3, instruction_3, volume, language_choice, model_choice], outputs=liveSentences)\n", - " clear.click(\n", - " lambda: [\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"\"),\n", - " gr.update(value='
    Your sentences will be displayed here โ€ฆ
    '),\n", - " gr.update(value=\"\"),\n", - " gr.update(value=\"Save generated data\", elem_classes=\"blue-button\")],\n", - " None,\n", - " [volume, typeInput, sentence_1, instruction_1, sentence_2, instruction_2,\n", - " sentence_3, instruction_3, liveSentences, outputPath, save],\n", - " queue=False\n", - " )\n", - " save.click(saveData, inputs=outputPath, outputs=None).then(lambda: gr.update(value=\"Your data has been saved\", elem_classes=\"green-button\"), [], [save])\n", - "\n", - "view.launch(share=True) #, debug=True)" - ], - "metadata": { - "id": "VRKdu0fEt8mg" - }, - "execution_count": null, - "outputs": [] - } - ] -} \ No newline at end of file + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "eyfvQrLxdkGT" + }, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "from google.colab import drive\n", + "from huggingface_hub import login\n", + "from google.colab import userdata\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", + "import torch\n", + "import gradio as gr\n", + "import re" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WW-cSZk7dnp6" + }, + "outputs": [], + "source": [ + "# one can always add more models, of course\n", + "\n", + "LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"\n", + "OPENAI_MODEL = \"gpt-4o-mini\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XG7Iam6Rdw8F" + }, + "outputs": [], + "source": [ + "hf_token = userdata.get('HF_TOKEN')\n", + "login(hf_token, add_to_git_credential=True)\n", + "openai_api_key = userdata.get('OPENAI_API_KEY')\n", + "openai = OpenAI(api_key=openai_api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ov7WSdx9dzSt" + }, + "outputs": [], + "source": [ + "force_dark_mode = \"\"\"\n", + "function refresh() {\n", + " const url = new URL(window.location);\n", + " if (url.searchParams.get('__theme') !== 'dark') {\n", + " url.searchParams.set('__theme', 'dark');\n", + " window.location.href = url.href;\n", + " }\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bEF8w_Mdd2Nb" + }, + "outputs": [], + "source": [ + "def dataset_generator(model, nature, shots, volume, language):\n", + "\n", + " examples = \"Instruction: 'Make a random sentence.'\\nAnswer: 'When I got home last night, I couldn't believe my eyes: All the pineapples had been removed from the pizza.'\"\n", + " system_message = \"You are a random sentence generator. Generate 10 diverse English sentences.\"\n", + " user_prompt = f\"Generate 10 random English sentences, like so:\\n{examples}\"\n", + " sentences = \"\"\n", + "\n", + " if language == \"English\":\n", + "\n", + " for shot in list(shots.keys()):\n", + " examples += f\"\\nExample instruction: '{shot}'\\nExample answer: '{shots[shot]}'\\n\"\n", + "\n", + " system_message = f\"You are a state-of-the art linguistic dataset compiler. You are given a 'Type' of sentence to create. \\\n", + "Within the bounds of that type, create {volume} diverse sentences with differing structures and lengths. Make the sentences plausible, \\\n", + "but be creative in filling them with random concrete information, names, and data. Here are some examples for how to go about that:\\n{examples}\\n\\\n", + "Just output one sentence per line. Do not comment or format yor output in any way, shape, or form.\"\n", + "\n", + " user_prompt = f\"Generate {volume} English sentences of the following Type: {nature}. Just output one sentence per line. \\\n", + "Do not comment or format yor output in any way, shape, or form.\"\n", + "\n", + " elif language == \"German\":\n", + "\n", + " for shot in list(shots.keys()):\n", + " examples += f\"\\nAnweisung: '{shot}'\\nAntwort: '{shots[shot]}'\\n\"\n", + "\n", + " system_message = f\"Du bist ein weltklasse Datensatz-Sammler fรผr Sprachdaten. Du erhรคltst einen 'Typ' von Sรคtzen, die du erstellen sollst. \\\n", + "Im Rahmen dieses Typs, generiere {volume} untereinander verschiedene Sรคtze mit unterschiedlichen Satzlรคngen und -strukturen. Mache die Beispielsรคtze \\\n", + "plausibel, aber fรผlle sie kreativ mit willkรผrlichen Informationen, Namen, und Daten aller Art. Hier sind ein paar Beispiel, wie du vorgehen sollst:\\n{examples}\\n\\\n", + "Gib einfach einen Satz pro Zeile aus. Kommentiere oder formatiere deine Antwort in keinster Weise.\"\n", + "\n", + " user_prompt = f\"Generiere {volume} deutsche Sรคtze des folgenden Typs: {nature}. Gib einfach einen Satz pro Zeile aus. \\\n", + "Kommentiere oder formatiere deine Antwort in keiner Weise.\"\n", + "\n", + " elif language == \"French\":\n", + "\n", + " for shot in list(shots.keys()):\n", + " examples += f\"\\nConsigne: '{shot}'\\nRรฉponse: '{shots[shot]}'\\n\"\n", + "\n", + " system_message = f\"Tu es un outil linguistique de pointe, ร  savoir, un genรฉrateur de donnรฉes linguistiques. Tu seras assignรฉ un 'Type' de phrases ร  crรฉer. \\\n", + "Dans le cadre de ce type-lร , crรฉe {volume} phrases diverses, avec des structures et longueurs qui varient. Gรฉnรจre des phrases qui soient plausibles, \\\n", + "mais sois crรฉatif, et sers-toi de donnรฉes, noms, et informations alรฉatoires pour rendre les phrases plus naturelles. Voici quelques examples comment faire:\\n{examples}\\n\\\n", + "Sors une seule phrase par ligne. Ne formatte ni commente ta rรฉponse en aucune maniรจre que ce soit.\"\n", + "\n", + " user_prompt = f\"S'il te plaรฎt, crรฉe {volume} phrases en franรงais du Type suivant: {nature}. Sors une seule phrase par ligne. \\\n", + "Ne formatte ni commente ta rรฉponse en aucune maniรจre que ce soit.\"\n", + "\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + "\n", + " if model == \"Llama\":\n", + "\n", + " quant_config = BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_use_double_quant=True,\n", + " bnb_4bit_compute_dtype=torch.bfloat16,\n", + " bnb_4bit_quant_type=\"nf4\"\n", + " )\n", + "\n", + " tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", + " tokenizer.pad_token = tokenizer.eos_token\n", + " inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", + " streamer = TextStreamer(tokenizer)\n", + " model = AutoModelForCausalLM.from_pretrained(LLAMA, device_map=\"auto\", quantization_config=quant_config)\n", + " outputs = model.generate(inputs, max_new_tokens=10000)\n", + "\n", + " response = tokenizer.decode(outputs[0])\n", + " sentences = list(re.finditer(\"(?:<\\|end_header_id\\|>)([^<]+)(?:<\\|eot_id\\|>)\", str(response), re.DOTALL))[-1].group(1)\n", + "\n", + " elif model == \"OpenAI\":\n", + " response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages)\n", + " sentences = response.choices[0].message.content\n", + "\n", + " return sentences" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VRKdu0fEt8mg" + }, + "outputs": [], + "source": [ + "global data\n", + "data = \"\"\n", + "\n", + "with gr.Blocks(\n", + " css=\"\"\"\n", + " .red-button {\n", + " background-color: darkred !important;\n", + " border-color: red !important;\n", + " }\n", + " .blue-button {\n", + " background-color: darkblue !important;\n", + " border-color: blue !important;\n", + " }\n", + " .green-button {\n", + " background-color: green !important;\n", + " border-color: green !important;\n", + " }\n", + " \"\"\"\n", + ") as view:\n", + " with gr.Row():\n", + " title = gr.HTML(\"

    Dataset Generator PLUS

    for English, German, and French

    \")\n", + " subtitle = gr.HTML(\"

    Instructions:

    1. Pick the language
    2. \\\n", + "
    3. Select a model
    4. Indicate how many sentences you need
    5. \\\n", + "
    6. Describe the type of sentence you're looking for
    7. Give up to three examples of the desired output sentence, and describe each of them briefly
    8. \\\n", + "
    9. Hit Create Dataset
    10. \\\n", + "
    11. Save the output (.txt) to your Google Drive
    12. \")\n", + " with gr.Row():\n", + " language_choice = gr.Dropdown(choices=[\"English\", \"German\", \"French\"], label=\"Select language\", value=\"English\", interactive=True)\n", + " model_choice = gr.Dropdown(choices=[\"Llama\", \"OpenAI\"], label=\"Select model\", value=\"Llama\", interactive=True)\n", + " volume = gr.Textbox(label=\"Required number of sentences\", interactive=True)\n", + " with gr.Row():\n", + " typeInput = gr.Textbox(label=\"Short description of the kind of sentence you need\", interactive=True)\n", + " with gr.Row():\n", + " sentence_1 = gr.Textbox(label=\"Example sentence 1\", interactive=True)\n", + " instruction_1 = gr.Textbox(label=\"Description\", interactive=True)\n", + " with gr.Row():\n", + " sentence_2 = gr.Textbox(label=\"Example sentence 2\", interactive=True)\n", + " instruction_2 = gr.Textbox(label=\"Description\", interactive=True)\n", + " with gr.Row():\n", + " sentence_3 = gr.Textbox(label=\"Example sentence 3\", interactive=True)\n", + " instruction_3 = gr.Textbox(label=\"Description\", interactive=True)\n", + " with gr.Row():\n", + " liveSentences = gr.Markdown(\n", + " value='
      Your sentences will be displayed here โ€ฆ
      ',\n", + " label=\"Generated sentences:\",\n", + " min_height=60,\n", + " max_height=200\n", + " )\n", + " with gr.Row():\n", + " generate = gr.Button(value=\"Generate sentences\", elem_classes=\"blue-button\")\n", + " with gr.Row():\n", + " clear = gr.Button(value=\"Clear everything\", elem_classes=\"red-button\")\n", + " with gr.Row():\n", + " outputPath = gr.Textbox(label=\"Specify the desired name and location on your Google Drive for the sentences (plain text) to be saved\", interactive=True)\n", + " with gr.Row():\n", + " save = gr.Button(value=\"Save generated data\", elem_classes=\"blue-button\")\n", + "\n", + " def generateSentences(typeInput, s1, i1, s2, i2, s3, i3, volume, language, model):\n", + " global data\n", + " nature = \"\"\n", + " shots = {}\n", + " amount = int(volume) if re.search(\"^[0-9]+$\", volume) is not None else 10\n", + "\n", + " if typeInput != None:\n", + " nature = typeInput\n", + " else:\n", + " nature = \"Random sentences of mixed nature\"\n", + "\n", + " if s1 != None:\n", + " if i1 != None:\n", + " shots[i1] = s1\n", + " else:\n", + " shots[\"A medium-long random sentence about anything\"] = s1\n", + " else:\n", + " shots[\"A medium-long random sentence about anything\"] = \"Paul, waking up out of his half-drunken haze, clearly couldn't tell left from right and ran right into the door.\"\n", + "\n", + " if s2 != None:\n", + " if i2 != None:\n", + " shots[i2] = s2\n", + " else:\n", + " shots[\"A medium-long random sentence about anything\"] = s2\n", + "\n", + " if s3 != None:\n", + " if i3 != None:\n", + " shots[i3] = s3\n", + " else:\n", + " shots[\"A medium-long random sentence about anything\"] = s3\n", + "\n", + " sentences = dataset_generator(model, nature, shots, amount, language)\n", + " data = sentences\n", + "\n", + " return sentences\n", + "\n", + " def saveData(path):\n", + " global data\n", + " drive.mount(\"/content/drive\")\n", + "\n", + " dir_path = os.path.dirname(\"/content/drive/MyDrive/\" + path)\n", + "\n", + " if not os.path.exists(dir_path):\n", + " os.makedirs(dir_path)\n", + "\n", + " with open(\"/content/drive/MyDrive/\" + path, \"w\", encoding=\"utf-8\") as f:\n", + " f.write(data)\n", + "\n", + " generate.click(generateSentences, inputs=[typeInput, sentence_1, instruction_1, sentence_2, instruction_2, sentence_3, instruction_3, volume, language_choice, model_choice], outputs=liveSentences)\n", + " clear.click(\n", + " lambda: [\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"\"),\n", + " gr.update(value='
      Your sentences will be displayed here โ€ฆ
      '),\n", + " gr.update(value=\"\"),\n", + " gr.update(value=\"Save generated data\", elem_classes=\"blue-button\")],\n", + " None,\n", + " [volume, typeInput, sentence_1, instruction_1, sentence_2, instruction_2,\n", + " sentence_3, instruction_3, liveSentences, outputPath, save],\n", + " queue=False\n", + " )\n", + " save.click(saveData, inputs=outputPath, outputs=None).then(lambda: gr.update(value=\"Your data has been saved\", elem_classes=\"green-button\"), [], [save])\n", + "\n", + "view.launch(share=True) #, debug=True)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "authorship_tag": "ABX9TyPxJzufoQPtui+nhl1J1xiR", + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/week3/community-contributions/synthetic_data_generator.ipynb b/week3/community-contributions/synthetic_data_generator.ipynb index 50ea37d..ad0ebee 100644 --- a/week3/community-contributions/synthetic_data_generator.ipynb +++ b/week3/community-contributions/synthetic_data_generator.ipynb @@ -387,7 +387,7 @@ ], "metadata": { "kernelspec": { - "display_name": "llm_engineering-yg2xCEUG", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -401,9 +401,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.11.11" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/week5/community-contributions/day 5 - ollama_rag_1.ipynb b/week5/community-contributions/day 5 - ollama_rag_1.ipynb index 18f002f..6d8a387 100644 --- a/week5/community-contributions/day 5 - ollama_rag_1.ipynb +++ b/week5/community-contributions/day 5 - ollama_rag_1.ipynb @@ -202,7 +202,7 @@ ], "metadata": { "kernelspec": { - "display_name": "venv", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -216,9 +216,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.5" + "version": "3.11.11" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/week5/community-contributions/day3-gemini.ipynb b/week5/community-contributions/day3-gemini.ipynb index ef4808b..bb03b65 100644 --- a/week5/community-contributions/day3-gemini.ipynb +++ b/week5/community-contributions/day3-gemini.ipynb @@ -3389,7 +3389,7 @@ ], "metadata": { "kernelspec": { - "display_name": "llms", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -3407,5 +3407,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/week5/day2.ipynb b/week5/day2.ipynb index 9ca0cb4..d11c9b3 100644 --- a/week5/day2.ipynb +++ b/week5/day2.ipynb @@ -78,7 +78,7 @@ "# Read in documents using LangChain's loaders\n", "# Take everything in all the sub-folders of our knowledgebase\n", "\n", - "folders = glob.glob(\"knowledge-base/*\")\n", + "folders = glob.glob(\"knowledge-base/*/\")\n", "\n", "# With thanks to CG and Jon R, students on the course, for this fix needed for some users \n", "text_loader_kwargs = {'encoding': 'utf-8'}\n", diff --git a/week6/day1.ipynb b/week6/day1.ipynb index 804f1d0..8e18bc5 100644 --- a/week6/day1.ipynb +++ b/week6/day1.ipynb @@ -66,6 +66,22 @@ "login(hf_token, add_to_git_credential=True)" ] }, + { + "cell_type": "markdown", + "id": "e7cb2e20-7fac-44c1-8a4b-131dd37ee06e", + "metadata": {}, + "source": [ + "## One more import - the Item class\n", + "\n", + "If you get an error that you need to agree to Meta's terms when you run this, then follow the link it provides you and follow their instructions. You should get approved by Meta within minutes.\n", + "\n", + "See the last cell in [this colab](https://colab.research.google.com/drive/1deJO03YZTXUwcq2vzxWbiBhrRuI29Vo8?usp=sharing#scrollTo=FqyF5jZQkIl_) for steps to take if Meta doesn't approve.\n", + "\n", + "Any problems - message me or email me! \n", + "\n", + "With thanks to student Dr John S. for pointing out that this import needs to come after signing in to HF" + ] + }, { "cell_type": "code", "execution_count": null, @@ -73,12 +89,6 @@ "metadata": {}, "outputs": [], "source": [ - "# One more import - the Item class\n", - "# If you get an error that you need to agree to Meta's terms when you run this, then follow the link it provides you and follow their instructions\n", - "# You should get approved by Meta within minutes\n", - "# Any problems - message me or email me!\n", - "# With thanks to student Dr John S. for pointing out that this import needs to come after signing in to HF\n", - "\n", "from items import Item" ] }, diff --git a/week6/day2.ipynb b/week6/day2.ipynb index 55c1446..1f89c78 100644 --- a/week6/day2.ipynb +++ b/week6/day2.ipynb @@ -43,7 +43,6 @@ "from dotenv import load_dotenv\n", "from huggingface_hub import login\n", "from datasets import load_dataset, Dataset, DatasetDict\n", - "from items import Item\n", "from loaders import ItemLoader\n", "import matplotlib.pyplot as plt\n", "from collections import Counter, defaultdict\n", @@ -79,6 +78,18 @@ "login(hf_token, add_to_git_credential=True)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "6746144c-2e19-485a-8086-368c144722b4", + "metadata": {}, + "outputs": [], + "source": [ + "# One more import after HF login\n", + "\n", + "from items import Item" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/week6/day3.ipynb b/week6/day3.ipynb index 93e0928..0b30ebb 100644 --- a/week6/day3.ipynb +++ b/week6/day3.ipynb @@ -29,7 +29,6 @@ "import random\n", "from dotenv import load_dotenv\n", "from huggingface_hub import login\n", - "from items import Item\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pickle\n", @@ -137,6 +136,18 @@ "login(hf_token, add_to_git_credential=True)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff3942d8-b010-46b5-a665-15554eae9776", + "metadata": {}, + "outputs": [], + "source": [ + "# One more import after logging in\n", + "\n", + "from items import Item" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/week6/day4.ipynb b/week6/day4.ipynb index 6644ce2..cb7058f 100644 --- a/week6/day4.ipynb +++ b/week6/day4.ipynb @@ -38,7 +38,6 @@ "import random\n", "from dotenv import load_dotenv\n", "from huggingface_hub import login\n", - "from items import Item\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pickle\n", @@ -47,19 +46,6 @@ "from anthropic import Anthropic" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "21a3833e-4093-43b0-8f7b-839c50b911ea", - "metadata": {}, - "outputs": [], - "source": [ - "# moved our Tester into a separate package\n", - "# call it with Tester.test(function_name, test_dataset)\n", - "\n", - "from testing import Tester" - ] - }, { "cell_type": "code", "execution_count": null, @@ -88,6 +74,20 @@ "login(hf_token, add_to_git_credential=True)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "6985bdc7-fa45-49a3-ae97-84bdeb9b2083", + "metadata": {}, + "outputs": [], + "source": [ + "# moved our Tester into a separate package\n", + "# call it with Tester.test(function_name, test_dataset)\n", + "\n", + "from items import Item\n", + "from testing import Tester" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/week6/day5.ipynb b/week6/day5.ipynb index 4a733eb..e437dbc 100644 --- a/week6/day5.ipynb +++ b/week6/day5.ipynb @@ -30,7 +30,6 @@ "import random\n", "from dotenv import load_dotenv\n", "from huggingface_hub import login\n", - "from items import Item\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pickle\n", @@ -39,19 +38,6 @@ "from anthropic import Anthropic" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "21a3833e-4093-43b0-8f7b-839c50b911ea", - "metadata": {}, - "outputs": [], - "source": [ - "# moved our Tester into a separate package\n", - "# call it with Tester.test(function_name, test_dataset)\n", - "\n", - "from testing import Tester" - ] - }, { "cell_type": "code", "execution_count": null, @@ -80,6 +66,20 @@ "login(hf_token, add_to_git_credential=True)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "884a50bd-8cae-425e-8e56-f079fc3e65ce", + "metadata": {}, + "outputs": [], + "source": [ + "# moved our Tester into a separate package\n", + "# call it with Tester.test(function_name, test_dataset)\n", + "\n", + "from items import Item\n", + "from testing import Tester" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/week8/agents/ensemble_agent.py b/week8/agents/ensemble_agent.py index 438e32e..141a3e4 100644 --- a/week8/agents/ensemble_agent.py +++ b/week8/agents/ensemble_agent.py @@ -43,6 +43,6 @@ class EnsembleAgent(Agent): 'Min': [min(specialist, frontier, random_forest)], 'Max': [max(specialist, frontier, random_forest)], }) - y = self.model.predict(X)[0] + y = max(0, self.model.predict(X)[0]) self.log(f"Ensemble Agent complete - returning ${y:.2f}") return y \ No newline at end of file diff --git a/week8/day1.ipynb b/week8/day1.ipynb index 18159fd..5b539cd 100644 --- a/week8/day1.ipynb +++ b/week8/day1.ipynb @@ -68,9 +68,22 @@ "`modal token new` \n", "(Thank you Ed B. for that!)\n", "\n", + "Another Windows student Minh N. mentioned you may need to use this approach, from an activated environment in the command line: \n", + "`modal token set --token-id --token-secret `\n", + "\n", "Also, a student David S. mentioned the following: \n", "> In case anyone else using Windows hits this problem: Along with having to run `modal token new` from a command prompt, you have to move the generated token file. It will deploy the token file (.modal.toml) to your Windows profile folder. The virtual environment couldn't see that location (strangely, it couldn't even after I set environment variables for it and rebooted). I moved that token file to the folder I'm operating out of for the lab and it stopped throwing auth errors.\n", "\n", + "And another Windows student (Robert M. - thank you!!) added another possible step:\n", + "\n", + "\n", + "> I could not get modal to see my tokens (resulting in an 'auth error'), even after copying the \".modal.toml\" file to the \"week8\" folder and restarting JupyterLab. The fix was to manually set the environment variables (the standard way). This config method is explained by modal on their [web site](https://modal.com/docs/reference/modal.config) \n", + "```\n", + "import os\n", + "os.environ[\"MODAL_TOKEN_ID\"] = \"xxx\"\n", + "os.environ[\"MODAL_TOKEN_SECRET\"] = \"yyy\" \n", + "```\n", + "\n", "Finally: I've also heard that in some situations, you might need to restart the Kernel of this jupyter notebook after running this. (Kernel menu >> Restart Kernel and Clear Outputs of All Cells)." ] }, @@ -81,7 +94,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Remove the '# ' from the next line and run the cell\n", + "# Remove the '# ' from the next line and run the cell, or run this command without the excalamation mark from an activated command prompt\n", "# !modal setup" ] }, diff --git a/week8/day2.4.ipynb b/week8/day2.4.ipynb index 3f141ab..e80fcf6 100644 --- a/week8/day2.4.ipynb +++ b/week8/day2.4.ipynb @@ -348,7 +348,7 @@ "outputs": [], "source": [ "def ensemble_pricer(item):\n", - " return ensemble.price(description(item))" + " return max(0,ensemble.price(description(item)))" ] }, { diff --git a/week8/day5.ipynb b/week8/day5.ipynb index d9c0513..67be5c5 100644 --- a/week8/day5.ipynb +++ b/week8/day5.ipynb @@ -156,6 +156,28 @@ "!python price_is_right_final.py" ] }, + { + "cell_type": "markdown", + "id": "242d1243-fbec-4807-988b-8f70c8c9b806", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      But wait!! There's more..

      \n", + " If you're not fed up of product prices yet ๐Ÿ˜‚ I've built this out some more!
      \n", + " If you look in my repo tech2ai, in segment3/lab1 is a neural network implementation of the pricer in pure PyTorch. It does pretty well..
      \n", + " And in segment4/agents is this same Agent project taken further. There's a new version of the PlanningAgent called AutonomousPlanningAgent that uses multiple Tools, and a MessagingAgent that uses claude-3.7 to write texts.
      \n", + " You could experiment with similar ideas to build out this framework.\n", + "
      \n", + "
      " + ] + }, { "cell_type": "markdown", "id": "331a2044-566f-4866-be4d-7542b7dfdf3f", @@ -169,7 +191,7 @@ " \n", "

      CONGRATULATIONS AND THANK YOU!!!

      \n", " \n", - " It's so fabulous that you've made it to the end! My heartiest congratulations. Please stay in touch! I'm here on LinkedIn if we're not already connected and I'm on X at @edwarddonner. And my editor would be cross with me if I didn't mention one more time: it makes a HUGE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others.

      Massive thanks again for putting up with me for 8 weeks and getting all the way to the final cell! I'm excited to hear all about your career as an LLM Engineer. You could not have picked a better time to be in this field.\n", + " It's so fabulous that you've made it to the very end! My heartiest congratulations. Please stay in touch! I'm here on LinkedIn if we're not already connected and I'm on X at @edwarddonner. And my editor would be cross with me if I didn't mention one more time: it makes a HUGE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others.

      Massive thanks again for putting up with me for 8 weeks and getting all the way to the final cell! I'm excited to hear all about your career as an LLM Engineer. If you post on LinkedIn about completing the course and tag me, then I'll weigh in to amplify your achievement.
      You could not have picked a better time to be in this field.\n", "
      \n", " \n", " \n", From 8b4ad6d6b50da24ee512d96cf8e954b75ff22772 Mon Sep 17 00:00:00 2001 From: ariel1985 Date: Sun, 16 Mar 2025 00:12:28 +0200 Subject: [PATCH 26/43] Implement community contributions feature for turning stackoverflow issues to tutorials --- ...ckoverflow-to-tutorial-summarization.ipynb | 486 ++++++++++++++++++ 1 file changed, 486 insertions(+) create mode 100644 week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb diff --git a/week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb b/week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb new file mode 100644 index 0000000..882e967 --- /dev/null +++ b/week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb @@ -0,0 +1,486 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# MY !FIRST LAB\n", + "\n", + "### Script will take a stackoverflow issue and summarize it as a technical tutorial. \n", + "\n", + "Example links to use: \n", + " \n", + "https://stackoverflow.com/questions/14220321/how-do-i-return-the-response-from-an-asynchronous-call \n", + "https://stackoverflow.com/questions/60174/how-can-i-prevent-sql-injection-in-php\n", + "https://stackoverflow.com/questions/1732348/regex-match-open-tags-except-xhtml-self-contained-tags\n", + "\n", + "*Note: Issues must be answered preferebly by a lot of users.*\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e2fd67f3-6441-4fee-b19c-7c91e6188348", + "metadata": {}, + "outputs": [], + "source": [ + "website = 'https://stackoverflow.com/questions/60174/how-can-i-prevent-sql-injection-in-php'" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\n" + ] + } + ], + "source": [ + "# Load environment variables in a file callwebsite_content .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "c5e793b2-6775-426a-a139-4848291d0463", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermwebsite_contentiate Python\" notebook\n", + "\n", + "# Some websites newebsite_content you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mysql - How can I prevent SQL injection in PHP? - Stack Overflow\n", + "Skip to main content\n", + "Stack Overflow\n", + "About\n", + "Products\n", + "OverflowAI\n", + "Stack Overflow for Teams\n", + "Where developers & technologists share private knowledge with c\n" + ] + } + ], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "\n", + "website_content = Website(website)\n", + "print(website_content.title[:100])\n", + "print(website_content.text[:150])" + ] + }, + { + "cell_type": "markdown", + "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", + "metadata": {}, + "source": [ + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT4o have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "268cb127-ec40-4016-9436-94a1ae10a1c6", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "3da6e0e9-e8fe-4e94-9de8-c4a031631f3b", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = f\"\"\" \n", + "\n", + " You are looking at a website titled {website_content.title}\n", + "\n", + " Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n", + " \n", + " {website_content.text}\n", + "\n", + "\n", + " The tutorial should include an introduction, problem statement, solution steps, and conclusion.\n", + " Tutrial should be in markdown format.\n", + " \"\"\"\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += f\"\"\" \n", + "\n", + " You are looking at a website titled {website_content.title}\n", + "\n", + " Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n", + " \n", + " {website_content.text}\n", + "\n", + "\n", + " The tutorial should include an introduction, problem statement, solution steps, and conclusion.\n", + " Tutrial should be in markdown format.\n", + " \"\"\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are looking at a website titled mysql - How can I prevent SQL injection in PHP? - Stack Overflow \n", + "\n", + " You are looking at a website titled mysql - How can I prevent SQL injection in PHP? - Stack Overflow\n", + "\n", + " Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n", + "\n", + " Skip to main content\n", + "Stack Overflow\n", + "About\n", + "Products\n", + "OverflowAI\n", + "Stack Overflow for Teams\n", + "Where developers & technologists share private knowledge with coworkers\n", + "Advertising & Talent\n", + "Reach devs & t\n" + ] + } + ], + "source": [ + "print(user_prompt_for(website_content)[:500])" + ] + }, + { + "cell_type": "markdown", + "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "The API from OpenAI expects to receive messages in a particular structure.\n", + "Many of the other APIs share this structure:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", + " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", + "]\n", + "\n", + "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" + ] + }, + { + "cell_type": "markdown", + "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", + "metadata": {}, + "source": [ + "## And now let's build useful messages for GPT-4o-mini, using a function" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "markdown", + "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", + "metadata": {}, + "source": [ + "## Time to bring it together - the API for OpenAI is very simple!" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. You will get very familiar with this!\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the Jupyter output, using markdown\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "0a6970cc-bed8-4759-a312-3b81236c2f4e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "```markdown\n", + "# How to Prevent SQL Injection in PHP\n", + "\n", + "## Introduction\n", + "SQL injection is a serious security vulnerability that can allow an attacker to interfere with the queries that your application makes to the database. By exploiting this vulnerability, an attacker can gain unauthorized access to sensitive data, manipulate data, and even execute administrative operations on the database. This tutorial will guide you on how to prevent SQL injection in your PHP applications through various best practices.\n", + "\n", + "## Problem Statement\n", + "Consider the following PHP code that is vulnerable to SQL injection:\n", + "\n", + "```php\n", + "$unsafe_variable = $_POST['user_input']; \n", + "mysql_query(\"INSERT INTO `table` (`column`) VALUES ('$unsafe_variable')\");\n", + "```\n", + "\n", + "If a user were to input something like `value'); DROP TABLE table;--`, the query would become:\n", + "\n", + "```sql\n", + "INSERT INTO `table` (`column`) VALUES('value'); DROP TABLE table;--');\n", + "```\n", + "\n", + "This inserts an unwanted SQL command leading to disastrous effects on the database.\n", + "\n", + "## Solution Steps\n", + "\n", + "### 1. Use Prepared Statements\n", + "The best method to prevent SQL injection is to use prepared statements with parameterized queries. This separates SQL logic from data, ensuring that user input is treated as data, not executable code.\n", + "\n", + "#### Using PDO\n", + "Here's how to use PDO in PHP:\n", + "\n", + "```php\n", + "$dsn = 'mysql:dbname=dbtest;host=127.0.0.1;charset=utf8mb4';\n", + "$dbConnection = new PDO($dsn, 'user', 'password');\n", + "$dbConnection->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);\n", + "$dbConnection->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);\n", + "\n", + "$stmt = $dbConnection->prepare('SELECT * FROM users WHERE name = :name');\n", + "$stmt->execute(['name' => $name]);\n", + "\n", + "foreach ($stmt as $row) {\n", + " // Process row\n", + "}\n", + "```\n", + "\n", + "#### Using MySQLi\n", + "If you're using MySQLi, the syntax is slightly different:\n", + "\n", + "```php\n", + "$dbConnection = new mysqli('127.0.0.1', 'username', 'password', 'test');\n", + "$dbConnection->set_charset('utf8mb4');\n", + "\n", + "$stmt = $dbConnection->prepare('SELECT * FROM users WHERE name = ?');\n", + "$stmt->bind_param('s', $name); // 's' stands for string\n", + "$stmt->execute();\n", + "$result = $stmt->get_result();\n", + "\n", + "while ($row = $result->fetch_assoc()) {\n", + " // Process row\n", + "}\n", + "```\n", + "\n", + "### 2. Properly Configure the Database Connection\n", + "When using PDO, ensure that emulated prepared statements are disabled. This is essential for real prepared statements to take effect.\n", + "\n", + "Example configuration:\n", + "```php\n", + "$dbConnection->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);\n", + "```\n", + "\n", + "### 3. Validate Input Data\n", + "In addition to using prepared statements, you should validate and sanitize user inputs. Implementing whitelist validation can help by ensuring only expected values are processed.\n", + "\n", + "For example, if you expect a sorting direction:\n", + "```php\n", + "$dir = !empty($_GET['dir']) && $_GET['dir'] === 'DESC' ? 'DESC' : 'ASC';\n", + "```\n", + "\n", + "### 4. Limit Database Permissions\n", + "Restrict database user permissions to the minimum required for their role. For example, a user who only needs to read data should not have permissions to delete or alter it.\n", + "\n", + "```sql\n", + "GRANT SELECT ON database TO 'username'@'localhost';\n", + "```\n", + "\n", + "### 5. Regularly Update Your Codebase\n", + "Keep libraries and the PHP version you are using up-to-date. Deprecated functions and libraries often contain vulnerabilities that can be exploited.\n", + "\n", + "## Conclusion\n", + "Preventing SQL injection in PHP applications requires a proactive approach. Using prepared statements ensures user input is handled securely, while validating data and limiting permissions fortifies your application against potential attacks. By implementing these best practices, you can significantly reduce the risk of SQL injection vulnerabilities in your applications.\n", + "\n", + "For more in-depth information on SQL injection prevention techniques, consult the [OWASP SQL Injection Prevention Cheat Sheet](https://owasp.org/www-community/attacks/SQL_Injection).\n", + "```" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_summary(website)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 27dd6af931615168cdb21610d67d384de6929583 Mon Sep 17 00:00:00 2001 From: SyedNaqi Hussain Date: Sat, 15 Mar 2025 18:00:40 -0500 Subject: [PATCH 27/43] Undo change of day1 file from week1 folder --- week1/day1.ipynb | 103 +--- week1/tweet-generate-from-alt-text.ipynb | 632 ----------------------- 2 files changed, 21 insertions(+), 714 deletions(-) delete mode 100644 week1/tweet-generate-from-alt-text.ipynb diff --git a/week1/day1.ipynb b/week1/day1.ipynb index af7aac4..bb8e5fa 100644 --- a/week1/day1.ipynb +++ b/week1/day1.ipynb @@ -159,8 +159,8 @@ "metadata": {}, "outputs": [], "source": [ - "import httpx\n", - "openai = OpenAI(http_client=httpx.Client(verify=False))\n", + "openai = OpenAI()\n", + "\n", "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" ] @@ -217,8 +217,7 @@ " Create this Website object from the given url using the BeautifulSoup library\n", " \"\"\"\n", " self.url = url\n", - " requests.packages.urllib3.disable_warnings()\n", - " response = requests.get(url, headers=headers, verify=False)\n", + " response = requests.get(url, headers=headers)\n", " soup = BeautifulSoup(response.content, 'html.parser')\n", " self.title = soup.title.string if soup.title else \"No title found\"\n", " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", @@ -234,7 +233,8 @@ "outputs": [], "source": [ "# Let's try one out. Change the website and add print statements to follow along.\n", - "ed = Website(\"http://edwarddonner.com\")\n", + "\n", + "ed = Website(\"https://edwarddonner.com\")\n", "print(ed.title)\n", "print(ed.text)" ] @@ -434,22 +434,10 @@ "cell_type": "code", "execution_count": null, "id": "3018853a-445f-41ff-9560-d925d1774b2f", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "display_summary(\"https://edwarddonner.com\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f8a34db6-9c2f-4f5e-95b4-62090d7b591b", "metadata": {}, "outputs": [], "source": [ - "display_summary(\"https://openai.com\")" + "display_summary(\"https://edwarddonner.com\")" ] }, { @@ -482,9 +470,7 @@ "cell_type": "code", "execution_count": null, "id": "75e9fd40-b354-4341-991e-863ef2e59db7", - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ "display_summary(\"https://anthropic.com\")" @@ -524,77 +510,30 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "Here's a markdown layout featuring tables for each image with a funny tweet alongside it:\n", - "\n", - "```markdown\n", - "| Image | Funny Tweet |\n", - "|------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------|\n", - "| ![Vintage Motorcycle](https://images.pexels.com/photos/30770767/pexels-photo-30770767/free-photo-of-classic-motorcycle-in-kerala-countryside.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When you want to feel like a rebel, but your bike is still in the shop. ๐Ÿ๏ธ๐Ÿ˜‚\" |\n", - "| ![Flock of Birds](https://images.pexels.com/photos/30810205/pexels-photo-30810205/free-photo-of-flock-of-birds-in-flight-against-clear-sky.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When the squad is finally ready to leave the party but you can't find your keys. ๐Ÿ•Š๏ธ๐Ÿคฃ\" |\n", - "| ![Playful Seals](https://images.pexels.com/photos/30824250/pexels-photo-30824250/free-photo-of-playful-seals-on-rocky-san-diego-shore.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500) | \"When youโ€™re trying to chill at the beach, but your buddy wonโ€™t stop splashing you. ๐Ÿฆญ๐Ÿ’ฆ\" |\n", - "```\n", - "\n", - "Feel free to use or modify the layout and the tweets as you see fit!" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# A small exercise to feed the llm with image alt text and return a funny tweet.\n", - "\n", + "outputs": [], + "source": [ "# Step 1: Create your prompts\n", - "import json\n", - "system_prompt = \"You are a meme lord. You like tweeting funny and hilarious comments on images. To understand the image you would be given alt text on the image.\"\n", - "class website:\n", - " def __init__(self,url):\n", - " self.url = url\n", - " requests.packages.urllib3.disable_warnings()\n", - " response = requests.get(url, headers=headers, verify=False)\n", - " html_content = response.content\n", - " soup = BeautifulSoup(html_content, 'html.parser')\n", - " image_tags = soup.find_all('img')\n", - " self.image_urls = [img['src'] for img in image_tags if img.get('src')]\n", - " self.image_alt = [img['alt'] if img.get('alt') else \"\" for img in image_tags]\n", - "\n", - " # Restricting to 3 images only.\n", - " if self.image_urls:\n", - " self.images = {self.image_urls[i]:self.image_alt[i] for i in range(4)}\n", - " else:\n", - " self.images = {}\n", - " \n", - "\n", - "def user_prompt_for(website):\n", - " user_prompt = f\"Following are images with their alt-text:\"\n", - " user_prompt += json.dumps(website.images)\n", - " user_prompt += \"\\n Give me a markdown layout with tables for each image where each image is given its own row, with the image itself on the left and funny tweet on the right.\"\n", - " return user_prompt\n", "\n", + "system_prompt = \"something here\"\n", + "user_prompt = \"\"\"\n", + " Lots of text\n", + " Can be pasted here\n", + "\"\"\"\n", "\n", "# Step 2: Make the messages list\n", - "page = website(\"https://www.pexels.com/\")\n", - "user_prompt = user_prompt_for(page)\n", - "messages = [{\"role\":\"system\",\"content\":system_prompt},{\"role\":\"user\", \"content\":user_prompt}] # fill this in\n", + "\n", + "messages = [] # fill this in\n", "\n", "# Step 3: Call OpenAI\n", - "response = openai.chat.completions.create(\n", - " model = \"gpt-4o-mini\",\n", - " messages = messages\n", - " )\n", + "\n", + "response =\n", "\n", "# Step 4: print the result\n", - "display(Markdown((response.choices[0].message.content)))" + "\n", + "print(" ] }, { diff --git a/week1/tweet-generate-from-alt-text.ipynb b/week1/tweet-generate-from-alt-text.ipynb deleted file mode 100644 index 9b7ba91..0000000 --- a/week1/tweet-generate-from-alt-text.ipynb +++ /dev/null @@ -1,632 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", - "metadata": {}, - "source": [ - "# YOUR FIRST LAB\n", - "## Please read this. This is super-critical to get you prepared; there's no fluff here!\n", - "\n", - "## Your first Frontier LLM Project\n", - "\n", - "Let's build a useful LLM solution - in a matter of minutes.\n", - "\n", - "By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", - "\n", - "Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", - "\n", - "Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", - "\n", - "## If you're new to Jupyter Lab\n", - "\n", - "Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", - "\n", - "I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", - "\n", - "## If you're new to the Command Line\n", - "\n", - "Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n", - "Linux people, something tells me you could teach _me_ a thing or two about the command line!\n", - "\n", - "## If you'd prefer to work in IDEs\n", - "\n", - "If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n", - "If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n", - "\n", - "## If you'd like to brush up your Python\n", - "\n", - "I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n", - "`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n", - "\n", - "## I am here to help\n", - "\n", - "If you have any problems at all, please do reach out. \n", - "I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n", - "And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done ๐Ÿ˜‚ \n", - "\n", - "## More troubleshooting\n", - "\n", - "Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", - "\n", - "## If this is old hat!\n", - "\n", - "If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
      \n", - " \n", - " \n", - "

      Please read - important note

      \n", - " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", - "
      \n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
      \n", - " \n", - " \n", - "

      Treat these labs as a resource

      \n", - " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", - " \n", - "
      \n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
      \n", - " \n", - " \n", - "

      Business value of these exercises

      \n", - " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", - "
      " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", - "metadata": {}, - "outputs": [], - "source": [ - "# imports\n", - "\n", - "import os\n", - "import requests\n", - "from dotenv import load_dotenv\n", - "from bs4 import BeautifulSoup\n", - "from IPython.display import Markdown, display\n", - "from openai import OpenAI\n", - "\n", - "# If you get an error running this cell, then please head over to the troubleshooting notebook!" - ] - }, - { - "cell_type": "markdown", - "id": "6900b2a8-6384-4316-8aaa-5e519fca4254", - "metadata": {}, - "source": [ - "# Connecting to OpenAI\n", - "\n", - "The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", - "\n", - "## Troubleshooting if you have problems:\n", - "\n", - "Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", - "\n", - "If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", - "\n", - "Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", - "\n", - "Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", - "metadata": {}, - "outputs": [], - "source": [ - "# Load environment variables in a file called .env\n", - "\n", - "load_dotenv(override=True)\n", - "api_key = os.getenv('OPENAI_API_KEY')\n", - "\n", - "# Check the key\n", - "\n", - "if not api_key:\n", - " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", - "elif not api_key.startswith(\"sk-proj-\"):\n", - " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", - "elif api_key.strip() != api_key:\n", - " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", - "else:\n", - " print(\"API key found and looks good so far!\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", - "metadata": {}, - "outputs": [], - "source": [ - "import httpx\n", - "openai = OpenAI(http_client=httpx.Client(verify=False))\n", - "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", - "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" - ] - }, - { - "cell_type": "markdown", - "id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", - "metadata": {}, - "source": [ - "# Let's make a quick call to a Frontier model to get started, as a preview!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", - "metadata": {}, - "outputs": [], - "source": [ - "# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", - "\n", - "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", - "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", - "print(response.choices[0].message.content)" - ] - }, - { - "cell_type": "markdown", - "id": "2aa190e5-cb31-456a-96cc-db109919cd78", - "metadata": {}, - "source": [ - "## OK onwards with our first project" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c5e793b2-6775-426a-a139-4848291d0463", - "metadata": {}, - "outputs": [], - "source": [ - "# A class to represent a Webpage\n", - "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", - "\n", - "# Some websites need you to use proper headers when fetching them:\n", - "headers = {\n", - " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", - "}\n", - "\n", - "class Website:\n", - "\n", - " def __init__(self, url):\n", - " \"\"\"\n", - " Create this Website object from the given url using the BeautifulSoup library\n", - " \"\"\"\n", - " self.url = url\n", - " requests.packages.urllib3.disable_warnings()\n", - " response = requests.get(url, headers=headers, verify=False)\n", - " soup = BeautifulSoup(response.content, 'html.parser')\n", - " self.title = soup.title.string if soup.title else \"No title found\"\n", - " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", - " irrelevant.decompose()\n", - " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", - "metadata": {}, - "outputs": [], - "source": [ - "# Let's try one out. Change the website and add print statements to follow along.\n", - "ed = Website(\"http://edwarddonner.com\")\n", - "print(ed.title)\n", - "print(ed.text)" - ] - }, - { - "cell_type": "markdown", - "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", - "metadata": {}, - "source": [ - "## Types of prompts\n", - "\n", - "You may know this already - but if not, you will get very familiar with it!\n", - "\n", - "Models like GPT4o have been trained to receive instructions in a particular way.\n", - "\n", - "They expect to receive:\n", - "\n", - "**A system prompt** that tells them what task they are performing and what tone they should use\n", - "\n", - "**A user prompt** -- the conversation starter that they should reply to" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", - "metadata": {}, - "outputs": [], - "source": [ - "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", - "\n", - "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", - "and provides a short summary, ignoring text that might be navigation related. \\\n", - "Respond in markdown.\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", - "metadata": {}, - "outputs": [], - "source": [ - "# A function that writes a User Prompt that asks for summaries of websites:\n", - "\n", - "def user_prompt_for(website):\n", - " user_prompt = f\"You are looking at a website titled {website.title}\"\n", - " user_prompt += \"\\nThe contents of this website is as follows; \\\n", - "please provide a short summary of this website in markdown. \\\n", - "If it includes news or announcements, then summarize these too.\\n\\n\"\n", - " user_prompt += website.text\n", - " return user_prompt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", - "metadata": {}, - "outputs": [], - "source": [ - "print(user_prompt_for(ed))" - ] - }, - { - "cell_type": "markdown", - "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", - "metadata": {}, - "source": [ - "## Messages\n", - "\n", - "The API from OpenAI expects to receive messages in a particular structure.\n", - "Many of the other APIs share this structure:\n", - "\n", - "```\n", - "[\n", - " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", - " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", - "]\n", - "\n", - "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", - "metadata": {}, - "outputs": [], - "source": [ - "messages = [\n", - " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", - " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", - "metadata": {}, - "outputs": [], - "source": [ - "# To give you a preview -- calling OpenAI with system and user messages:\n", - "\n", - "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", - "print(response.choices[0].message.content)" - ] - }, - { - "cell_type": "markdown", - "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", - "metadata": {}, - "source": [ - "## And now let's build useful messages for GPT-4o-mini, using a function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", - "metadata": {}, - "outputs": [], - "source": [ - "# See how this function creates exactly the format above\n", - "\n", - "def messages_for(website):\n", - " return [\n", - " {\"role\": \"system\", \"content\": system_prompt},\n", - " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", - "metadata": {}, - "outputs": [], - "source": [ - "# Try this out, and then try for a few more websites\n", - "\n", - "messages_for(ed)" - ] - }, - { - "cell_type": "markdown", - "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", - "metadata": {}, - "source": [ - "## Time to bring it together - the API for OpenAI is very simple!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", - "metadata": {}, - "outputs": [], - "source": [ - "# And now: call the OpenAI API. You will get very familiar with this!\n", - "\n", - "def summarize(url):\n", - " website = Website(url)\n", - " response = openai.chat.completions.create(\n", - " model = \"gpt-4o-mini\",\n", - " messages = messages_for(website)\n", - " )\n", - " return response.choices[0].message.content" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", - "metadata": {}, - "outputs": [], - "source": [ - "summarize(\"https://edwarddonner.com\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3d926d59-450e-4609-92ba-2d6f244f1342", - "metadata": {}, - "outputs": [], - "source": [ - "# A function to display this nicely in the Jupyter output, using markdown\n", - "\n", - "def display_summary(url):\n", - " summary = summarize(url)\n", - " display(Markdown(summary))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3018853a-445f-41ff-9560-d925d1774b2f", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "display_summary(\"https://edwarddonner.com\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f8a34db6-9c2f-4f5e-95b4-62090d7b591b", - "metadata": {}, - "outputs": [], - "source": [ - "display_summary(\"https://openai.com\")" - ] - }, - { - "cell_type": "markdown", - "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", - "metadata": {}, - "source": [ - "# Let's try more websites\n", - "\n", - "Note that this will only work on websites that can be scraped using this simplistic approach.\n", - "\n", - "Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", - "\n", - "Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", - "\n", - "But many websites will work just fine!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "45d83403-a24c-44b5-84ac-961449b4008f", - "metadata": {}, - "outputs": [], - "source": [ - "display_summary(\"https://cnn.com\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "75e9fd40-b354-4341-991e-863ef2e59db7", - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "display_summary(\"https://anthropic.com\")" - ] - }, - { - "cell_type": "markdown", - "id": "c951be1a-7f1b-448f-af1f-845978e47e2c", - "metadata": {}, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
      \n", - " \n", - " \n", - "

      Business applications

      \n", - " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", - "\n", - "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", - "
      \n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
      \n", - " \n", - " \n", - "

      Before you continue - now try yourself

      \n", - " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", - "
      " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", - "metadata": {}, - "outputs": [], - "source": [ - "# A small exercise to feed the llm with image alt text and return a funny tweet.\n", - "\n", - "# Step 1: Create your prompts\n", - "import json\n", - "system_prompt = \"You are a meme lord. You like tweeting funny and hilarious comments on images. To understand the image you would be given alt text on the image.\"\n", - "class website:\n", - " def __init__(self,url):\n", - " self.url = url\n", - " requests.packages.urllib3.disable_warnings()\n", - " response = requests.get(url, headers=headers, verify=False)\n", - " html_content = response.content\n", - " soup = BeautifulSoup(html_content, 'html.parser')\n", - " image_tags = soup.find_all('img')\n", - " self.image_urls = [img['src'] for img in image_tags if img.get('src')]\n", - " self.image_alt = [img['alt'] if img.get('alt') else \"\" for img in image_tags]\n", - "\n", - " # Restricting to 3 images only.\n", - " if self.image_urls:\n", - " self.images = {self.image_urls[i]:self.image_alt[i] for i in range(4)}\n", - " else:\n", - " self.images = {}\n", - " \n", - "\n", - "def user_prompt_for(website):\n", - " user_prompt = f\"Following are images with their alt-text:\"\n", - " user_prompt += json.dumps(website.images)\n", - " user_prompt += \"\\n Give me a markdown layout with tables for each image where each image is given its own row, with the image itself on the left and funny tweet on the right.\"\n", - " return user_prompt\n", - "\n", - "\n", - "# Step 2: Make the messages list\n", - "page = website(\"https://www.pexels.com/\")\n", - "user_prompt = user_prompt_for(page)\n", - "messages = [{\"role\":\"system\",\"content\":system_prompt},{\"role\":\"user\", \"content\":user_prompt}] # fill this in\n", - "\n", - "# Step 3: Call OpenAI\n", - "response = openai.chat.completions.create(\n", - " model = \"gpt-4o-mini\",\n", - " messages = messages\n", - " )\n", - "\n", - "# Step 4: print the result\n", - "display(Markdown((response.choices[0].message.content)))" - ] - }, - { - "cell_type": "markdown", - "id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", - "metadata": {}, - "source": [ - "## An extra exercise for those who enjoy web scraping\n", - "\n", - "You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" - ] - }, - { - "cell_type": "markdown", - "id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", - "metadata": {}, - "source": [ - "# Sharing your code\n", - "\n", - "I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", - "\n", - "If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", - "\n", - "Here are good instructions courtesy of an AI friend: \n", - "https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f4484fcf-8b39-4c3f-9674-37970ed71988", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 74d9af7b7791111d53c8e818c201014a64f83543 Mon Sep 17 00:00:00 2001 From: SyedNaqi Hussain Date: Sat, 15 Mar 2025 18:13:45 -0500 Subject: [PATCH 28/43] Move notebook to community notes --- .../tweet-generate-from-alt-text.ipynb | 632 ++++++++++++++++++ 1 file changed, 632 insertions(+) create mode 100644 week1/community-contributions/tweet-generate-from-alt-text.ipynb diff --git a/week1/community-contributions/tweet-generate-from-alt-text.ipynb b/week1/community-contributions/tweet-generate-from-alt-text.ipynb new file mode 100644 index 0000000..9b7ba91 --- /dev/null +++ b/week1/community-contributions/tweet-generate-from-alt-text.ipynb @@ -0,0 +1,632 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# YOUR FIRST LAB\n", + "## Please read this. This is super-critical to get you prepared; there's no fluff here!\n", + "\n", + "## Your first Frontier LLM Project\n", + "\n", + "Let's build a useful LLM solution - in a matter of minutes.\n", + "\n", + "By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", + "\n", + "Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", + "\n", + "Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", + "\n", + "## If you're new to Jupyter Lab\n", + "\n", + "Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", + "\n", + "I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", + "\n", + "## If you're new to the Command Line\n", + "\n", + "Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n", + "Linux people, something tells me you could teach _me_ a thing or two about the command line!\n", + "\n", + "## If you'd prefer to work in IDEs\n", + "\n", + "If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n", + "If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n", + "\n", + "## If you'd like to brush up your Python\n", + "\n", + "I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n", + "`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n", + "\n", + "## I am here to help\n", + "\n", + "If you have any problems at all, please do reach out. \n", + "I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n", + "And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done ๐Ÿ˜‚ \n", + "\n", + "## More troubleshooting\n", + "\n", + "Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", + "\n", + "## If this is old hat!\n", + "\n", + "If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Please read - important note

      \n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + "
      \n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Treat these labs as a resource

      \n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + "
      \n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Business value of these exercises

      \n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + "
      " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "markdown", + "id": "6900b2a8-6384-4316-8aaa-5e519fca4254", + "metadata": {}, + "source": [ + "# Connecting to OpenAI\n", + "\n", + "The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", + "\n", + "## Troubleshooting if you have problems:\n", + "\n", + "Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", + "\n", + "If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", + "\n", + "Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", + "\n", + "Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", + "metadata": {}, + "outputs": [], + "source": [ + "import httpx\n", + "openai = OpenAI(http_client=httpx.Client(verify=False))\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" + ] + }, + { + "cell_type": "markdown", + "id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", + "metadata": {}, + "source": [ + "# Let's make a quick call to a Frontier model to get started, as a preview!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", + "\n", + "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "2aa190e5-cb31-456a-96cc-db109919cd78", + "metadata": {}, + "source": [ + "## OK onwards with our first project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5e793b2-6775-426a-a139-4848291d0463", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " requests.packages.urllib3.disable_warnings()\n", + " response = requests.get(url, headers=headers, verify=False)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "ed = Website(\"http://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "markdown", + "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", + "metadata": {}, + "source": [ + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT4o have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", + "metadata": {}, + "outputs": [], + "source": [ + "print(user_prompt_for(ed))" + ] + }, + { + "cell_type": "markdown", + "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "The API from OpenAI expects to receive messages in a particular structure.\n", + "Many of the other APIs share this structure:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", + " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", + "]\n", + "\n", + "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with system and user messages:\n", + "\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", + "metadata": {}, + "source": [ + "## And now let's build useful messages for GPT-4o-mini, using a function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", + "metadata": {}, + "outputs": [], + "source": [ + "# Try this out, and then try for a few more websites\n", + "\n", + "messages_for(ed)" + ] + }, + { + "cell_type": "markdown", + "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", + "metadata": {}, + "source": [ + "## Time to bring it together - the API for OpenAI is very simple!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. You will get very familiar with this!\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", + "metadata": {}, + "outputs": [], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the Jupyter output, using markdown\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3018853a-445f-41ff-9560-d925d1774b2f", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "display_summary(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8a34db6-9c2f-4f5e-95b4-62090d7b591b", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://openai.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", + "metadata": {}, + "source": [ + "# Let's try more websites\n", + "\n", + "Note that this will only work on websites that can be scraped using this simplistic approach.\n", + "\n", + "Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", + "\n", + "Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", + "\n", + "But many websites will work just fine!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d83403-a24c-44b5-84ac-961449b4008f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://cnn.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75e9fd40-b354-4341-991e-863ef2e59db7", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "display_summary(\"https://anthropic.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "c951be1a-7f1b-448f-af1f-845978e47e2c", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Business applications

      \n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + "
      \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Before you continue - now try yourself

      \n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + "
      " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", + "metadata": {}, + "outputs": [], + "source": [ + "# A small exercise to feed the llm with image alt text and return a funny tweet.\n", + "\n", + "# Step 1: Create your prompts\n", + "import json\n", + "system_prompt = \"You are a meme lord. You like tweeting funny and hilarious comments on images. To understand the image you would be given alt text on the image.\"\n", + "class website:\n", + " def __init__(self,url):\n", + " self.url = url\n", + " requests.packages.urllib3.disable_warnings()\n", + " response = requests.get(url, headers=headers, verify=False)\n", + " html_content = response.content\n", + " soup = BeautifulSoup(html_content, 'html.parser')\n", + " image_tags = soup.find_all('img')\n", + " self.image_urls = [img['src'] for img in image_tags if img.get('src')]\n", + " self.image_alt = [img['alt'] if img.get('alt') else \"\" for img in image_tags]\n", + "\n", + " # Restricting to 3 images only.\n", + " if self.image_urls:\n", + " self.images = {self.image_urls[i]:self.image_alt[i] for i in range(4)}\n", + " else:\n", + " self.images = {}\n", + " \n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"Following are images with their alt-text:\"\n", + " user_prompt += json.dumps(website.images)\n", + " user_prompt += \"\\n Give me a markdown layout with tables for each image where each image is given its own row, with the image itself on the left and funny tweet on the right.\"\n", + " return user_prompt\n", + "\n", + "\n", + "# Step 2: Make the messages list\n", + "page = website(\"https://www.pexels.com/\")\n", + "user_prompt = user_prompt_for(page)\n", + "messages = [{\"role\":\"system\",\"content\":system_prompt},{\"role\":\"user\", \"content\":user_prompt}] # fill this in\n", + "\n", + "# Step 3: Call OpenAI\n", + "response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages\n", + " )\n", + "\n", + "# Step 4: print the result\n", + "display(Markdown((response.choices[0].message.content)))" + ] + }, + { + "cell_type": "markdown", + "id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", + "metadata": {}, + "source": [ + "## An extra exercise for those who enjoy web scraping\n", + "\n", + "You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" + ] + }, + { + "cell_type": "markdown", + "id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", + "metadata": {}, + "source": [ + "# Sharing your code\n", + "\n", + "I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", + "\n", + "If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", + "\n", + "Here are good instructions courtesy of an AI friend: \n", + "https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4484fcf-8b39-4c3f-9674-37970ed71988", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 84244ed774f8c4c88ea7845ed2b9c20647028a0d Mon Sep 17 00:00:00 2001 From: Adil Mubashir Chaudhry <75829014+Heuscartist@users.noreply.github.com> Date: Mon, 17 Mar 2025 00:54:44 +0500 Subject: [PATCH 29/43] Added Master Chef to community-contributions - Adil --- .../day1-master-chef.ipynb | 611 ++++++++++++++++++ 1 file changed, 611 insertions(+) create mode 100644 week1/community-contributions/day1-master-chef.ipynb diff --git a/week1/community-contributions/day1-master-chef.ipynb b/week1/community-contributions/day1-master-chef.ipynb new file mode 100644 index 0000000..b4c2260 --- /dev/null +++ b/week1/community-contributions/day1-master-chef.ipynb @@ -0,0 +1,611 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# YOUR FIRST LAB\n", + "### Please read this section. This is valuable to get you prepared, even if it's a long read -- it's important stuff.\n", + "\n", + "## Your first Frontier LLM Project\n", + "\n", + "Let's build a useful LLM solution - in a matter of minutes.\n", + "\n", + "By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", + "\n", + "Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", + "\n", + "Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", + "\n", + "## If you're new to Jupyter Lab\n", + "\n", + "Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", + "\n", + "I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", + "\n", + "## If you're new to the Command Line\n", + "\n", + "Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n", + "\n", + "## If you'd prefer to work in IDEs\n", + "\n", + "If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n", + "If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n", + "\n", + "## If you'd like to brush up your Python\n", + "\n", + "I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n", + "`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n", + "\n", + "## I am here to help\n", + "\n", + "If you have any problems at all, please do reach out. \n", + "I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n", + "And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done ๐Ÿ˜‚ \n", + "\n", + "## More troubleshooting\n", + "\n", + "Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", + "\n", + "## If this is old hat!\n", + "\n", + "If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Please read - important note

      \n", + " The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, after watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...\n", + "
      \n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Treat these labs as a resource

      \n", + " I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", + " \n", + "
      \n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Business value of these exercises

      \n", + " A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.\n", + "
      " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "markdown", + "id": "6900b2a8-6384-4316-8aaa-5e519fca4254", + "metadata": {}, + "source": [ + "# Connecting to OpenAI\n", + "\n", + "The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", + "\n", + "## Troubleshooting if you have problems:\n", + "\n", + "Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", + "\n", + "If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", + "\n", + "Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", + "\n", + "Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" + ] + }, + { + "cell_type": "markdown", + "id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", + "metadata": {}, + "source": [ + "# Let's make a quick call to a Frontier model to get started, as a preview!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", + "\n", + "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "2aa190e5-cb31-456a-96cc-db109919cd78", + "metadata": {}, + "source": [ + "## OK onwards with our first project" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5e793b2-6775-426a-a139-4848291d0463", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "\n", + "ed = Website(\"https://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "markdown", + "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", + "metadata": {}, + "source": [ + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT4o have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", + "metadata": {}, + "outputs": [], + "source": [ + "print(user_prompt_for(ed))" + ] + }, + { + "cell_type": "markdown", + "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "The API from OpenAI expects to receive messages in a particular structure.\n", + "Many of the other APIs share this structure:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", + " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", + "]\n", + "\n", + "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", + "metadata": {}, + "outputs": [], + "source": [ + "# To give you a preview -- calling OpenAI with system and user messages:\n", + "\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", + "metadata": {}, + "source": [ + "## And now let's build useful messages for GPT-4o-mini, using a function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", + "metadata": {}, + "outputs": [], + "source": [ + "# Try this out, and then try for a few more websites\n", + "\n", + "messages_for(ed)" + ] + }, + { + "cell_type": "markdown", + "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", + "metadata": {}, + "source": [ + "## Time to bring it together - the API for OpenAI is very simple!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. You will get very familiar with this!\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", + "metadata": {}, + "outputs": [], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the Jupyter output, using markdown\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3018853a-445f-41ff-9560-d925d1774b2f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", + "metadata": {}, + "source": [ + "# Let's try more websites\n", + "\n", + "Note that this will only work on websites that can be scraped using this simplistic approach.\n", + "\n", + "Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", + "\n", + "Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", + "\n", + "But many websites will work just fine!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d83403-a24c-44b5-84ac-961449b4008f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://cnn.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75e9fd40-b354-4341-991e-863ef2e59db7", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://anthropic.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "c951be1a-7f1b-448f-af1f-845978e47e2c", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Business applications

      \n", + " In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", + "\n", + "More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.\n", + "
      \n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
      \n", + " \n", + " \n", + "

      Before you continue - now try yourself

      \n", + " Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.\n", + "
      " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", + "metadata": {}, + "outputs": [], + "source": [ + "# Step 1: Create your prompts\n", + "\n", + "system_prompt = \"You are an head chef of a michelin star restaurant who has a diverse skillset \\\n", + "and loves to teach new and interesting recepies for homechefs. Given input of several ingredients \\\n", + "provide step by step instruction of what could be cooked for any cuisine of your choice. Respond in markdown.\"\n", + "\n", + "user_prompt = \"\"\"\n", + "You are a Michelin-starred head chef with a passion for teaching home chefs. \n", + "I have the following ingredients: \n", + "\n", + "**[Chicken breast, Bell peppers, cherry tomatoes, spinach, Basmati rice,\n", + "Garlic, basil, black pepper, smoked paprika]** \n", + "\n", + "Can you provide a step-by-step recipe using these ingredients? You can choose any cuisine that best fits them. \n", + "Please include cooking times, techniques, and any chef tips for enhancing flavors. \n", + "\"\"\"\n", + "\n", + "# Step 2: Make the messages list\n", + "\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + "\n", + "# Step 3: Call OpenAI\n", + "\n", + "response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages\n", + " )\n", + "\n", + "\n", + "\n", + "# Step 4: print the result\n", + "def display_summary(summary):\n", + " display(Markdown(summary))\n", + "display_summary(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", + "metadata": {}, + "source": [ + "## An extra exercise for those who enjoy web scraping\n", + "\n", + "You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" + ] + }, + { + "cell_type": "markdown", + "id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", + "metadata": {}, + "source": [ + "# Sharing your code\n", + "\n", + "I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", + "\n", + "If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", + "\n", + "Here are good instructions courtesy of an AI friend: \n", + "https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4484fcf-8b39-4c3f-9674-37970ed71988", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From a4e11e0022e3cdbb6c36920a70aa273dc31a0a67 Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Mon, 17 Mar 2025 02:54:11 +0500 Subject: [PATCH 30/43] Added Code Documentation Generator to community-contributions --- .../code_documentation_generator.ipynb | 433 ++++++++++++++++++ 1 file changed, 433 insertions(+) create mode 100644 week4/community-contributions/code_documentation_generator.ipynb diff --git a/week4/community-contributions/code_documentation_generator.ipynb b/week4/community-contributions/code_documentation_generator.ipynb new file mode 100644 index 0000000..9875fa6 --- /dev/null +++ b/week4/community-contributions/code_documentation_generator.ipynb @@ -0,0 +1,433 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "05432987-80bc-4aa5-8c05-277861e19307", + "metadata": {}, + "source": [ + "## Adds docstrings/comments to code and generates code summary" + ] + }, + { + "cell_type": "markdown", + "id": "e706f175-1e83-4d2c-8613-056b2e532624", + "metadata": {}, + "source": [ + "### Model Usage \n", + "\n", + "- **Open Source Models:**\n", + "\n", + " - Deployed via Endpoint: Hosted on a server and accessed remotely (Qwen 1.5-7)\n", + " - Run Locally on Machine: Executed directly on a local device (Ollama running Llama 3.2-1B)\n", + "\n", + "- **Closed Source Models:** \n", + " - Accessed through API key authentication: (OpenAI, Anthropic). \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ed667df-6660-4ba3-80c5-4c1c8f7e63f3", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import io\n", + "import sys\n", + "import json\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import google.generativeai\n", + "import anthropic\n", + "import ollama\n", + "from IPython.display import Markdown, display, update_display\n", + "import gradio as gr\n", + "from huggingface_hub import login, InferenceClient\n", + "from transformers import AutoTokenizer, pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c9dd4bf1-48cf-44dc-9d04-0ec6e8189a3c", + "metadata": {}, + "outputs": [], + "source": [ + "# environment\n", + "\n", + "load_dotenv()\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY')\n", + "CODE_QWEN_URL = os.environ['CODE_QWEN_URL'] \n", + "BIGBIRD_PEGASUS_URL = os.environ['BIGBIRD_PEGASUS_URL']\n", + "HF_TOKEN = os.environ['HF_TOKEN']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "71f671d6-50a7-43cf-9e04-52a159d67dab", + "metadata": {}, + "outputs": [], + "source": [ + "!ollama pull llama3.2:1b" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8e6f8f35-477d-4014-8fe9-874b5aee0061", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ae34b79c-425a-4f04-821a-8f1d9868b146", + "metadata": {}, + "outputs": [], + "source": [ + "OPENAI_MODEL = \"gpt-4o-mini\"\n", + "CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n", + "LLAMA_MODEL = \"llama3.2:1b\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80e6d920-3c94-48c4-afd8-518f415ab777", + "metadata": {}, + "outputs": [], + "source": [ + "code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n", + "bigbird_pegasus = \"google/bigbird-pegasus-large-arxiv\"\n", + "login(HF_TOKEN, add_to_git_credential=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "314cd8e3-2c10-4149-9818-4e6b0c05b871", + "metadata": {}, + "outputs": [], + "source": [ + "# Uses Llama to Check Which Language the Code is Written In\n", + "system_message_comments = \"You are an assistant designed to add docstrings and helpful comments to code for documentation purposes.\"\n", + "system_message_comments += \"Respond back with properly formatted code, including docstrings and comments. Keep comments concise. \"\n", + "system_message_comments += \"Do not respond with greetings, or any such extra output\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66fa09e4-1b79-4f53-9bb7-904d515b2f26", + "metadata": {}, + "outputs": [], + "source": [ + "system_message_summary = \"You are an assistant designed to summarise code for documentation purposes. You are not to display code again.\"\n", + "system_message_summary += \"Respond back with a properly crafted summary, mentioning key details regarding to the code, such as workflow, code language.\"\n", + "system_message_summary += \"Do not respond with greetings, or any such extra output. Do not respond in Markdown. Be thorough, keep explanation level at undergraduate level.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea405820-f9d1-4cf1-b465-9ae5cd9016f6", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for(code):\n", + " user_prompt = \"Rewrite this code to include helpful comments and docstrings. \"\n", + " user_prompt += \"Respond only with code.\\n\"\n", + " user_prompt += code\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26c9be56-1d4f-43e5-9bc4-eb5b76da8071", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for_summary(code):\n", + " user_prompt = \"Return the summary of the code.\\n\"\n", + " user_prompt += code\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c0ac22cb-dc96-4ae1-b00d-2747572f6945", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(code):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message_comments},\n", + " {\"role\":\"user\", \"content\" : user_prompt_for(code)}\n", + " ]\n", + " return messages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eae1a8b4-68a8-4cd5-849e-0ecabd166a0c", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for_summary(code):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message_summary},\n", + " {\"role\":\"user\", \"content\" : user_prompt_for_summary(code)}\n", + " ]\n", + " return messages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5eb726dd-e09e-4011-8eb6-4d20f2830ff5", + "metadata": {}, + "outputs": [], + "source": [ + "func = \"\"\"\n", + "import time\n", + "\n", + "def calculate(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " j = i * param1 - param2\n", + " result -= (1/j)\n", + " j = i * param1 + param2\n", + " result += (1/j)\n", + " return result\n", + "\n", + "start_time = time.time()\n", + "result = calculate(100_000_000, 4, 1) * 4\n", + "end_time = time.time()\n", + "\n", + "print(f\"Result: {result:.12f}\")\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f61943b2-c939-4910-a670-58abaf464bb6", + "metadata": {}, + "outputs": [], + "source": [ + "def call_llama(code):\n", + " # commented code\n", + " messages = messages_for(code)\n", + " response1 = ollama.chat(model=LLAMA_MODEL, messages=messages)\n", + "\n", + " # summary\n", + " messages = messages_for_summary(code)\n", + " response2 = ollama.chat(model=LLAMA_MODEL, messages=messages)\n", + " \n", + " return response1['message']['content'],response2['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "696fb97e-807e-40ed-b0e1-beb82d1108a6", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude(code):\n", + " # commented code\n", + " message1 = claude.messages.create(\n", + " model=CLAUDE_MODEL,\n", + " system=system_message_comments,\n", + " messages=([{\"role\": \"user\", \"content\":user_prompt_for(code)}]),\n", + " max_tokens=500\n", + " )\n", + "\n", + " # summary\n", + " message2 = claude.messages.create(\n", + " model=CLAUDE_MODEL,\n", + " system=system_message_summary,\n", + " messages=([{\"role\": \"user\", \"content\":user_prompt_for_summary(code)}]),\n", + " max_tokens=500\n", + " )\n", + " \n", + " return message1.content[0].text,message2.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4bf1db64-86fa-42a1-98dd-3df74607f8db", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt(code):\n", + " # commented code\n", + " completion1 = openai.chat.completions.create(\n", + " model=OPENAI_MODEL,\n", + " messages=messages_for(code),\n", + " )\n", + "\n", + " #summary\n", + " completion2 = openai.chat.completions.create(\n", + " model=OPENAI_MODEL,\n", + " messages=messages_for_summary(code),\n", + " )\n", + " \n", + " return completion1.choices[0].message.content,completion2.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6863dc42-cbcd-4a95-8b0a-cfbcbfed0764", + "metadata": {}, + "outputs": [], + "source": [ + "def call_codeqwen(code):\n", + " # commented code\n", + " tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", + " messages = messages_for(code)\n", + " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", + " client = InferenceClient(CODE_QWEN_URL, token=HF_TOKEN)\n", + " response1 = client.text_generation(text, details=True, max_new_tokens=1000)\n", + "\n", + " # summary\n", + " tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", + " messages = messages_for_summary(code)\n", + " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", + " client = InferenceClient(CODE_QWEN_URL, token=HF_TOKEN)\n", + " response2 = client.text_generation(text, details=True, max_new_tokens=1000)\n", + " \n", + " return response1.generated_text ,response2.generated_text " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "06d05c02-45e4-47da-b70b-cf433dfaca4c", + "metadata": {}, + "outputs": [], + "source": [ + "def create_docs(code,model):\n", + " if model == \"Llama\":\n", + " comments,summary = call_llama(code)\n", + " elif model == \"Claude\":\n", + " comments,summary = call_claude(code)\n", + " elif model == \"GPT\":\n", + " comments,summary = call_gpt(code)\n", + " elif model == \"CodeQwen\":\n", + " comments,summary = call_codeqwen(code)\n", + " else:\n", + " raise ValueError(\"Unknown Model\")\n", + " return comments,summary" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1b4ea289-5da9-4b0e-b4d4-f8f01e466839", + "metadata": {}, + "outputs": [], + "source": [ + "css = \"\"\"\n", + ".comments {background-color: #00599C;}\n", + ".summary {background-color: #008B8B;}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89ad7c7b-b881-45d3-aadc-d7206af578fb", + "metadata": {}, + "outputs": [], + "source": [ + "with gr.Blocks(css=css) as ui:\n", + " gr.Markdown(\"### Code Documentation and Formatting\")\n", + " with gr.Row():\n", + " code = gr.Textbox(label=\"Input Code: \", value=func, lines=10)\n", + " with gr.Row():\n", + " model = gr.Dropdown([\"GPT\",\"Claude\",\"Llama\",\"CodeQwen\"],label=\"Select model\",value=\"GPT\")\n", + " with gr.Row():\n", + " docs = gr.Button(\"Add Comments and Sumarise Code\")\n", + " with gr.Row():\n", + " commented_code = gr.Textbox(label= \"Formatted Code\", lines=10,elem_classes=[\"comments\"])\n", + " code_summary = gr.Textbox(label = \"Code Summary\", lines=10,elem_classes=[\"summary\"])\n", + " docs.click(create_docs,inputs=[code,model],outputs=[commented_code,code_summary])," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a9e3b1c-bfe6-4b71-aac8-fa36a491c157", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac895aa9-e044-4598-b715-d96d1c158656", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a96877c-22b7-4ad5-b235-1cf8f8b200a1", + "metadata": {}, + "outputs": [], + "source": [ + "print(call_llama(func))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f11de1a2-52c0-41c7-ad88-01ef5f8bc628", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d6e70826394061f1c466ca84494cad20fabf37e1 Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Mon, 17 Mar 2025 02:56:24 +0500 Subject: [PATCH 31/43] Added Code Documentation Generator to community-contributions --- .../community-contributions/code_documentation_generator.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/week4/community-contributions/code_documentation_generator.ipynb b/week4/community-contributions/code_documentation_generator.ipynb index 9875fa6..362f187 100644 --- a/week4/community-contributions/code_documentation_generator.ipynb +++ b/week4/community-contributions/code_documentation_generator.ipynb @@ -35,7 +35,7 @@ "\n", "import os\n", "import io\n", - "import sys\n", + "import sys \n", "import json\n", "import requests\n", "from dotenv import load_dotenv\n", From 3e7ba931b4d05b3ca2ecf028e1b93f9f921cd051 Mon Sep 17 00:00:00 2001 From: "Palbha Kulkarni (Nazwale)" Date: Mon, 17 Mar 2025 21:51:51 -0400 Subject: [PATCH 32/43] Week 2 day 4 Budget trip planner Free Gemini API on Google colab --- ...ay4_budget_trip_planner_using_gemini.ipynb | 167 ++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 week2/community-contributions/week2day4_budget_trip_planner_using_gemini.ipynb diff --git a/week2/community-contributions/week2day4_budget_trip_planner_using_gemini.ipynb b/week2/community-contributions/week2day4_budget_trip_planner_using_gemini.ipynb new file mode 100644 index 0000000..f301817 --- /dev/null +++ b/week2/community-contributions/week2day4_budget_trip_planner_using_gemini.ipynb @@ -0,0 +1,167 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "Import libraries as needed & keep your gemini api key ready" + ], + "metadata": { + "id": "2UAcHYzT6ikw" + } + }, + { + "cell_type": "code", + "source": [ + "#!pip install gradio" + ], + "metadata": { + "id": "XW0IY4xK6JZ1" + }, + "execution_count": 14, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "dwoPNMMP4ZSh" + }, + "outputs": [], + "source": [ + "from google import genai\n", + "from google.genai import types\n", + "from google.colab import userdata\n", + "\n" + ] + }, + { + "cell_type": "code", + "source": [ + "def get_trip_itinerary(budget: int) -> str:\n", + " \"\"\"\n", + " Returns a trip itinerary based on the given budget.\n", + " \"\"\"\n", + " itinerary_dict: Dict[int, str] = {\n", + " 500: \"Paris: 3-day budget trip covering Eiffel Tower, Louvre, and Seine River Cruise.\",\n", + " 1000: \"Tokyo: 5-day adventure covering Shibuya, Akihabara, Mount Fuji day trip.\",\n", + " 2000: \"New York: 7-day luxury stay covering Times Square, Broadway show, and helicopter tour.\",\n", + " 3000: \"Dubai: 7-day ultra-luxury trip with Burj Khalifa VIP tour, desert safari, and yacht cruise.\",\n", + " }\n", + "\n", + " return itinerary_dict.get(budget, \"No itinerary found for this budget. Try another amount!\")\n" + ], + "metadata": { + "id": "cnYD07T24ueV" + }, + "execution_count": 3, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from google.genai import types\n", + "\n", + "config = types.GenerateContentConfig(tools=[get_trip_itinerary])\n", + "\n", + "from google import genai\n", + "\n", + "client = genai.Client(api_key=userdata.get('gemini_api'))\n", + "\n", + "response = client.models.generate_content(\n", + " model='gemini-2.0-flash',\n", + " config=config,\n", + " contents='Based on the user budget suggest trip itinerary'\n", + ")\n" + ], + "metadata": { + "id": "3WRUXvD45VFC" + }, + "execution_count": 7, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "import gradio as gr\n", + "\n", + "# Chat function using Gemini\n", + "chat = client.chats.create(model='gemini-2.0-flash', config=config)\n", + "\n", + "def chat_with_ai(user_input: str):\n", + " response = chat.send_message(user_input)\n", + " return response.text\n", + "\n", + "# Gradio Chat Interface\n", + "demo = gr.Interface(fn=chat_with_ai, inputs=\"text\", outputs=\"text\", title=\"AI Trip Planner\")\n", + "\n", + "demo.launch()\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 645 + }, + "id": "5fE700z96DHs", + "outputId": "3e35423c-8b2b-4868-8113-00d9d3a7a2ba" + }, + "execution_count": 13, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Running Gradio in a Colab notebook requires sharing enabled. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n", + "\n", + "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n", + "* Running on public URL: https://079a23f363400da700.gradio.live\n", + "\n", + "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "
      " + ] + }, + "metadata": {} + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [] + }, + "metadata": {}, + "execution_count": 13 + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "XC9zzq8X5u8m" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file From a99435d6e5f513a54bf9b097216aa823b5ee3ad9 Mon Sep 17 00:00:00 2001 From: Adriana394 <158718290+Adriana394@users.noreply.github.com> Date: Tue, 18 Mar 2025 12:12:01 +0100 Subject: [PATCH 33/43] Create synthetic_dataset_generator_deepseek_qwen_llama.ipynb --- ...ataset_generator_deepseek_qwen_llama.ipynb | 402 ++++++++++++++++++ 1 file changed, 402 insertions(+) create mode 100644 week3/community-contributions/synthetic_dataset_generator_deepseek_qwen_llama.ipynb diff --git a/week3/community-contributions/synthetic_dataset_generator_deepseek_qwen_llama.ipynb b/week3/community-contributions/synthetic_dataset_generator_deepseek_qwen_llama.ipynb new file mode 100644 index 0000000..6009259 --- /dev/null +++ b/week3/community-contributions/synthetic_dataset_generator_deepseek_qwen_llama.ipynb @@ -0,0 +1,402 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "843542f7-220a-4408-9f8a-848696092434", + "metadata": { + "id": "843542f7-220a-4408-9f8a-848696092434" + }, + "source": [ + "# Build a Model to generate Synthetic Data" + ] + }, + { + "cell_type": "markdown", + "id": "a8816fc8-9517-46ff-af27-9fd0060840aa", + "metadata": {}, + "source": [ + "Code was written in Google Colab. " + ] + }, + { + "cell_type": "markdown", + "id": "08a8d539-950b-4b58-abf4-f17bd832c0af", + "metadata": { + "id": "08a8d539-950b-4b58-abf4-f17bd832c0af" + }, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "Ienu-NHTuUlT", + "metadata": { + "id": "Ienu-NHTuUlT" + }, + "outputs": [], + "source": [ + "!pip install -q gradio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5e737cd-27b0-4a2e-9a0c-dbb30ce5cdbf", + "metadata": { + "id": "c5e737cd-27b0-4a2e-9a0c-dbb30ce5cdbf" + }, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "import json\n", + "from google.colab import userdata\n", + "\n", + "from huggingface_hub import login\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", + "import torch\n", + "\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "khD9X5-V_txO", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "khD9X5-V_txO", + "outputId": "e2b8d8d0-0433-4b5f-c777-a675213a3f4c" + }, + "outputs": [], + "source": [ + "!pip install -U bitsandbytes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e47ead5f-b4e9-4e9f-acf9-be1ffb7fa6d7", + "metadata": { + "id": "e47ead5f-b4e9-4e9f-acf9-be1ffb7fa6d7" + }, + "outputs": [], + "source": [ + "hf_token = userdata.get('HF_TOKEN')" + ] + }, + { + "cell_type": "markdown", + "id": "ba104a9c-f298-4e90-9ceb-9d907e392d0d", + "metadata": { + "id": "ba104a9c-f298-4e90-9ceb-9d907e392d0d" + }, + "source": [ + "## Open Source Models from HF" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11b1eb65-8ef5-4e6d-9176-cf1f70d07fb6", + "metadata": { + "id": "11b1eb65-8ef5-4e6d-9176-cf1f70d07fb6" + }, + "outputs": [], + "source": [ + "deepseek_model = 'deepseek-ai/deepseek-llm-7b-chat'\n", + "llama_model = 'meta-llama/Meta-Llama-3.1-8B-Instruct'\n", + "qwen2 = 'Qwen/Qwen2-7B-Instruct'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90fb1d2e-5d25-4d73-b629-8273ab71503c", + "metadata": { + "id": "90fb1d2e-5d25-4d73-b629-8273ab71503c" + }, + "outputs": [], + "source": [ + "login(hf_token, add_to_git_credential=True)" + ] + }, + { + "cell_type": "markdown", + "id": "52948c01-8dc6-404b-a2c1-c87f9f6dbd64", + "metadata": { + "id": "52948c01-8dc6-404b-a2c1-c87f9f6dbd64" + }, + "source": [ + "## Creating Prompts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79374337-34fe-4002-b173-ac9b132a54d8", + "metadata": { + "id": "79374337-34fe-4002-b173-ac9b132a54d8" + }, + "outputs": [], + "source": [ + "system_prompt = \"You are an expert in generating synthetic datasets. Your goal is to generate realistic datasets \\\n", + "based on a given business and its requirements from the user. You will also be given the desired datset format.\"\n", + "system_prompt += \"Do not repeat the instructions.\"\n", + "\n", + "user_prompt = (\"Please provide me a dataset for the following business.\"\n", + "\"For example:\\n\"\n", + "\"The Business: A retail store selling luxury watches.\\n\"\n", + "\"The Data Format: CSV.\\n\"\n", + "\"Output:\\n\"\n", + "\"Item,Price,Quantity,Brand,Sale Date\\n\"\n", + "\"Superocean II, 20.000$, 3, Breitling, 2025-04-08 \\n\"\n", + "\"If I don't provide you the necessary columns, please create the columns based on your knowledge about the given business\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dcd90b5e-a7d2-4cdc-81ff-17974c5ff1fe", + "metadata": { + "id": "dcd90b5e-a7d2-4cdc-81ff-17974c5ff1fe" + }, + "outputs": [], + "source": [ + "def dataset_format(data_format, num_records):\n", + " format_message = ''\n", + " if data_format == 'CSV':\n", + " format_message = 'Please provide the dataset in a CSV format.'\n", + " elif data_format == 'JSON':\n", + " format_message = 'Please provide the dataset in a JSON format'\n", + " elif data_format == 'Tabular':\n", + " format_message = 'Please provide the dataset in a Tabular format'\n", + "\n", + " return format_message + f'Please generate {num_records} records'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39243edb-3eba-46fd-a610-e474ed421b01", + "metadata": { + "id": "39243edb-3eba-46fd-a610-e474ed421b01" + }, + "outputs": [], + "source": [ + "def complete_user_prompt(user_input, data_format, num_records):\n", + " messages = [\n", + " {'role': 'system', 'content': system_prompt},\n", + " {'role': 'user', 'content': user_input + user_prompt + dataset_format(data_format, num_records)}\n", + " ]\n", + "\n", + " return messages" + ] + }, + { + "cell_type": "markdown", + "id": "1ac81127-b9cc-424b-8b38-8a8b09bcc226", + "metadata": { + "id": "1ac81127-b9cc-424b-8b38-8a8b09bcc226" + }, + "source": [ + "## Accessing the Models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc4aaab5-bde1-463b-b873-e8bd1a231dc1", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "cc4aaab5-bde1-463b-b873-e8bd1a231dc1", + "outputId": "16c9420d-2c4a-4e57-f281-7c531b5145db" + }, + "outputs": [], + "source": [ + "print(\"CUDA available:\", torch.cuda.is_available())\n", + "if torch.cuda.is_available():\n", + " print(\"GPU-Device:\", torch.cuda.get_device_name(torch.cuda.current_device()))\n", + "else:\n", + " print(\"No GPU found.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b8e648d-747f-4684-a20b-b8da550efc23", + "metadata": { + "id": "6b8e648d-747f-4684-a20b-b8da550efc23" + }, + "outputs": [], + "source": [ + "quant_config = BitsAndBytesConfig(\n", + " load_in_4bit = True,\n", + " bnb_4bit_use_double_quant = False,\n", + " bnb_4bit_compute_dtype= torch.bfloat16,\n", + " bnb_4bit_quant_type= 'nf4'\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b3ae602f-0abf-420d-8c7b-1938cba92528", + "metadata": { + "id": "b3ae602f-0abf-420d-8c7b-1938cba92528" + }, + "outputs": [], + "source": [ + "def generate_model(model_id, messages):\n", + " try:\n", + " tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code = True)\n", + " inputs = tokenizer.apply_chat_template(messages, return_tensors = 'pt').to('cuda')\n", + " streamer = TextStreamer(tokenizer)\n", + " model = AutoModelForCausalLM.from_pretrained(model_id, device_map = 'auto', quantization_config = quant_config)\n", + " outputs = model.generate(inputs, max_new_tokens = 2000, streamer = streamer)\n", + " generated_text = tokenizer.decode(outputs[0], skip_special_tokens = True)\n", + " del tokenizer, streamer, model, inputs, outputs\n", + " return generated_text\n", + "\n", + " except Exception as e:\n", + " return f'Error during generation: {str(e)}'" + ] + }, + { + "cell_type": "markdown", + "id": "7c575c9e-4674-4eee-a9b9-c8d14ceed474", + "metadata": { + "id": "7c575c9e-4674-4eee-a9b9-c8d14ceed474" + }, + "source": [ + "## Generate Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d9c5963e-9f4e-4990-b744-b9ead03e623a", + "metadata": { + "id": "d9c5963e-9f4e-4990-b744-b9ead03e623a" + }, + "outputs": [], + "source": [ + "def generate_dataset(user_input, target_format, model_choice, num_records):\n", + " if model_choice == 'DeepSeek':\n", + " model_id = deepseek_model\n", + " elif model_choice == 'Llama-3.1-8B':\n", + " model_id = llama_model\n", + " elif model_choice == 'Qwen2':\n", + " model_id = qwen2\n", + "\n", + " messages = complete_user_prompt(user_input, target_format, num_records)\n", + " return generate_model(model_id, messages)" + ] + }, + { + "cell_type": "markdown", + "id": "ff574cfe-567f-4c6d-b944-fb756bf7ebca", + "metadata": { + "id": "ff574cfe-567f-4c6d-b944-fb756bf7ebca" + }, + "source": [ + "## Creating Gradio UI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61d2b056-0d00-4b73-b083-024a8f374fef", + "metadata": { + "id": "61d2b056-0d00-4b73-b083-024a8f374fef" + }, + "outputs": [], + "source": [ + "with gr.Blocks(title = 'Synthetic Data Generator') as ui:\n", + " gr.Markdown('# Synthetic Data Generator')\n", + "\n", + " with gr.Row():\n", + " with gr.Column(min_width=600):\n", + " user_inputs = gr.Textbox(label = 'Enter your Business details and data requirements',\n", + " placeholder = 'Type here...', lines = 15)\n", + "\n", + " model_choice = gr.Dropdown(\n", + " ['DeepSeek', 'Llama-3.1-8B', 'Qwen2'],\n", + " label = 'Choose your Model',\n", + " value = 'DeepSeek'\n", + " )\n", + "\n", + " target_format = gr.Dropdown(\n", + " ['CSV', 'JSON', 'Tabular'],\n", + " label = 'Choose your Format',\n", + " value = 'CSV'\n", + " )\n", + " num_records = gr.Dropdown(\n", + " [50, 100, 150, 200],\n", + " label = 'Number of Records',\n", + " value = 50\n", + " )\n", + "\n", + " generate_button = gr.Button('Generate')\n", + "\n", + " with gr.Column():\n", + " output = gr.Textbox(label = 'Generated Synthetic Data',\n", + " lines = 30)\n", + "\n", + " generate_button.click(fn = generate_dataset, inputs = [user_inputs, target_format, model_choice, num_records],\n", + " outputs = output\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "958d9cbf-50ff-4c50-a305-18df6d5f5eda", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 626 + }, + "id": "958d9cbf-50ff-4c50-a305-18df6d5f5eda", + "outputId": "a6736641-85c3-4b6a-a28d-02ac5caf4562", + "scrolled": true + }, + "outputs": [], + "source": [ + "ui.launch(inbrowser = True)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 750f9a60621465425a9ed61b3ca1db9960eb60f0 Mon Sep 17 00:00:00 2001 From: Edward Donner Date: Tue, 18 Mar 2025 22:25:20 -0400 Subject: [PATCH 34/43] Fixed bug with path check --- week5/day2.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/week5/day2.ipynb b/week5/day2.ipynb index d11c9b3..4ae52ad 100644 --- a/week5/day2.ipynb +++ b/week5/day2.ipynb @@ -77,8 +77,9 @@ "source": [ "# Read in documents using LangChain's loaders\n", "# Take everything in all the sub-folders of our knowledgebase\n", + "# Thank you Mark D. and Zoya H. for fixing a bug here..\n", "\n", - "folders = glob.glob(\"knowledge-base/*/\")\n", + "folders = glob.glob(\"knowledge-base/*\")\n", "\n", "# With thanks to CG and Jon R, students on the course, for this fix needed for some users \n", "text_loader_kwargs = {'encoding': 'utf-8'}\n", From 9ba4350a41ace4dc6e17e1aae359513d496cb762 Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Wed, 19 Mar 2025 12:49:08 +0500 Subject: [PATCH 35/43] Added contribution to community-contributions --- ...ay3_vector_embeddings_from_text_file.ipynb | 235 ++++++++++++++++++ 1 file changed, 235 insertions(+) create mode 100644 week5/community-contributions/day3_vector_embeddings_from_text_file.ipynb diff --git a/week5/community-contributions/day3_vector_embeddings_from_text_file.ipynb b/week5/community-contributions/day3_vector_embeddings_from_text_file.ipynb new file mode 100644 index 0000000..8519256 --- /dev/null +++ b/week5/community-contributions/day3_vector_embeddings_from_text_file.ipynb @@ -0,0 +1,235 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fad6ee3f-45b8-4ac3-aa39-4a44dac91994", + "metadata": {}, + "source": [ + "## Creating Text Embeddings From a Text File\n", + "- Loading data using TextLoader\n", + "- Splitting into chunks using CharacterTextSplitter\n", + "- Converting chunks into vector embeddings and creating a vectorstore\n", + "- Retreiving, reducing dimensions to 2D and displaying text embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33b79f0d-7bd5-4e82-9295-2cc5cfa9495b", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "391d12b3-ea25-4c66-93ba-71ef7c590be3", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders import DirectoryLoader, TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.schema import Document\n", + "from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", + "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_chroma import Chroma\n", + "import numpy as np\n", + "from sklearn.manifold import TSNE\n", + "import plotly.graph_objects as go" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "365d4346-bcf7-48b3-be13-b492f1877fab", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL = \"gpt-4o-mini\"\n", + "db_name = \"my_vector_db\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93887c1e-fb5e-4f9a-95f6-91a284e49695", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86289eb8-25d8-405f-b1bb-3d9d9fed8671", + "metadata": {}, + "outputs": [], + "source": [ + "loader = TextLoader(\"data.txt\", encoding=\"utf-8\")\n", + "data = loader.load()\n", + "\n", + "documents = []\n", + "for text in data:\n", + " documents.append(text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32320fff-2321-40ea-9b7d-294dc2dfba3a", + "metadata": {}, + "outputs": [], + "source": [ + "text_splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=5)\n", + "chunks = text_splitter.split_documents(documents)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fce762a5-4c78-4102-ab55-f95ee0c97286", + "metadata": {}, + "outputs": [], + "source": [ + "len(chunks)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ddb5bc12-af30-476d-bbbb-f91a3ae8af2f", + "metadata": {}, + "outputs": [], + "source": [ + "embeddings = OpenAIEmbeddings()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75ba81ec-9178-4ce4-83e2-82f937c85902", + "metadata": {}, + "outputs": [], + "source": [ + "if os.path.exists(db_name):\n", + " Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3ca2632-a8b3-4e7e-8370-d91579d31c23", + "metadata": {}, + "outputs": [], + "source": [ + "vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", + "print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0de67066-73f5-446f-9033-a00d45b0cdc1", + "metadata": {}, + "outputs": [], + "source": [ + "# Get one vector and find how many dimensions it has\n", + "\n", + "collection = vectorstore._collection\n", + "sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0] # represents a single vector\n", + "dimensions = len(sample_embedding)\n", + "print(f\"The vectors have {dimensions:,} dimensions\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e50d972c-d740-4f0a-8bc2-e55ebe462a41", + "metadata": {}, + "outputs": [], + "source": [ + "sample_embedding" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aa96105d-b882-48d9-b088-6aab5db7b1e9", + "metadata": {}, + "outputs": [], + "source": [ + "result = collection.get(include=['embeddings','documents'])\n", + "vectors = np.array(result['embeddings']) \n", + "documents = result['documents']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "213b4cf2-db0a-4610-8d8f-97607996ed17", + "metadata": {}, + "outputs": [], + "source": [ + "# Reduce dimensionality to 2D using t-SNE\n", + "tsne = TSNE(n_components=2,perplexity=5, random_state=42)\n", + "reduced_vectors = tsne.fit_transform(vectors)\n", + "\n", + "# Create the 2D scatter plot\n", + "fig = go.Figure(data=[go.Scatter(\n", + " x=reduced_vectors[:, 0],\n", + " y=reduced_vectors[:, 1],\n", + " mode='markers',\n", + " marker=dict(size=5, opacity=0.8),\n", + " text=[f\"Text: {d[:200]}...\" for d in documents],\n", + " hoverinfo='text'\n", + ")])\n", + "\n", + "fig.update_layout(\n", + " title='2D Chroma Vector Store Visualization',\n", + " scene=dict(xaxis_title='x',yaxis_title='y'),\n", + " width=800,\n", + " height=600,\n", + " margin=dict(r=20, b=10, l=10, t=40)\n", + ")\n", + "\n", + "fig.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d13aa60-da3e-4c61-af69-1ba9087e0181", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From b6a6b31e7bc321c4b826f4f2203c9a8a3aed7ac1 Mon Sep 17 00:00:00 2001 From: ariel1985 Date: Wed, 19 Mar 2025 20:46:04 +0200 Subject: [PATCH 36/43] Refine system prompt for tutorial summarization from Stack Overflow content --- ...ckoverflow-to-tutorial-summarization.ipynb | 28 ++----------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb b/week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb index 882e967..f7ecf37 100644 --- a/week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb +++ b/week1/community-contributions/week1-day1-stackoverflow-to-tutorial-summarization.ipynb @@ -171,40 +171,18 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 2, "id": "268cb127-ec40-4016-9436-94a1ae10a1c6", "metadata": {}, "outputs": [], "source": [ "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", "\n", - "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", - "and provides a short summary, ignoring text that might be navigation related. \\\n", + "system_prompt = \"You are a technical writer that analyzes the contents of a stackoverflow website issue containing a question and answer \\\n", + "and provides a summary in the form of a technical tutorial , ignoring text that might be navigation related. \\\n", "Respond in markdown.\"" ] }, - { - "cell_type": "code", - "execution_count": 8, - "id": "3da6e0e9-e8fe-4e94-9de8-c4a031631f3b", - "metadata": {}, - "outputs": [], - "source": [ - "system_prompt = f\"\"\" \n", - "\n", - " You are looking at a website titled {website_content.title}\n", - "\n", - " Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n", - " \n", - " {website_content.text}\n", - "\n", - "\n", - " The tutorial should include an introduction, problem statement, solution steps, and conclusion.\n", - " Tutrial should be in markdown format.\n", - " \"\"\"\n", - " " - ] - }, { "cell_type": "code", "execution_count": 9, From 0cc83c89a4ae06c6d6a65eade3a7e422f1b8c892 Mon Sep 17 00:00:00 2001 From: Simon Steinberg Date: Fri, 21 Mar 2025 16:54:34 +0100 Subject: [PATCH 37/43] The Airline AI Assistant can now compare prices, e.g., 'Is it cheaper to go to Berlin or to go to London?' --- .../day4_compare_prices.ipynb | 275 ++++++++++++++++++ 1 file changed, 275 insertions(+) create mode 100644 week2/community-contributions/day4_compare_prices.ipynb diff --git a/week2/community-contributions/day4_compare_prices.ipynb b/week2/community-contributions/day4_compare_prices.ipynb new file mode 100644 index 0000000..0626bc4 --- /dev/null +++ b/week2/community-contributions/day4_compare_prices.ipynb @@ -0,0 +1,275 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec", + "metadata": {}, + "source": [ + "# Project - Airline AI Assistant\n", + "\n", + "We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and be\\\\gins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "MODEL = \"gpt-4o-mini\"\n", + "openai = OpenAI()\n", + "\n", + "# As an alternative, if you'd like to use Ollama instead of OpenAI\n", + "# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n", + "# MODEL = \"llama3.2\"\n", + "# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a521d84-d07c-49ab-a0df-d6451499ed97", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", + "metadata": {}, + "outputs": [], + "source": [ + "# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", + "\n", + "def chat(message, history):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message}\n", + " ] + history + [\n", + " {\"role\": \"user\", \"content\": message}\n", + " ]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " return response.choices[0].message.content\n", + "\n", + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Tools are an incredibly powerful feature provided by the frontier LLMs.\n", + "\n", + "With tools, you can write a function, and have the LLM call that function as part of its response.\n", + "\n", + "Sounds almost spooky.. we're giving it the power to run code on our machine?\n", + "\n", + "Well, kinda." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by making a useful function\n", + "\n", + "ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", + "\n", + "def get_ticket_price(destination_city):\n", + " print(f\"Tool get_ticket_price called for {destination_city}\")\n", + " city = destination_city.lower()\n", + " return ticket_prices.get(city, \"Unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", + "metadata": {}, + "outputs": [], + "source": [ + "get_ticket_price(\"Berlin\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4afceded-7178-4c05-8fa6-9f2085e6a344", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "price_function = {\n", + " \"name\": \"get_ticket_price\",\n", + " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "\n", + "tools = [{\"type\": \"function\", \"function\": price_function}]" + ] + }, + { + "cell_type": "markdown", + "id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340", + "metadata": {}, + "source": [ + "## Getting OpenAI to use our Tool\n", + "\n", + "There's some fiddly stuff to allow OpenAI \"to call our tool\"\n", + "\n", + "What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n", + "\n", + "Here's how the new chat function looks:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ad32321f-083a-4462-a6d6-7bb3b0f5d10a", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "\n", + "def handle_tool_call(message): \n", + " responses = []\n", + " for tool_call in message.tool_calls: \n", + " if tool_call.function.name == \"get_ticket_price\":\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " city = arguments.get('destination_city')\n", + " price = get_ticket_price(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " responses.append(response)\n", + " return responses" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message}\n", + " ] + history + [\n", + " {\"role\": \"user\", \"content\": message}\n", + " ]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + "\n", + " # Tool usage\n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " responses = handle_tool_call(message)\n", + " messages.append(message) # That's the assistant asking us to run a tool\n", + " for response in responses:\n", + " messages.append(response) # That's the result of the tool calls\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8dc18486-4d6b-4cbf-a6b8-16d08d7c4f54", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From e5916b7c169da92caf9a1cdfa3d452a8bb6fcc44 Mon Sep 17 00:00:00 2001 From: Zoya Hammad Date: Sun, 23 Mar 2025 16:58:17 +0500 Subject: [PATCH 38/43] Added Knowledge Worker to community-contributions --- .../markdown_knowledge_worker.ipynb | 359 ++++++++++++++++++ .../ui_markdown_knowledge_worker.ipynb | 353 +++++++++++++++++ 2 files changed, 712 insertions(+) create mode 100644 week5/community-contributions/markdown_knowledge_worker.ipynb create mode 100644 week5/community-contributions/ui_markdown_knowledge_worker.ipynb diff --git a/week5/community-contributions/markdown_knowledge_worker.ipynb b/week5/community-contributions/markdown_knowledge_worker.ipynb new file mode 100644 index 0000000..51597f5 --- /dev/null +++ b/week5/community-contributions/markdown_knowledge_worker.ipynb @@ -0,0 +1,359 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c25c6e94-f3de-4367-b2bf-269ba7160977", + "metadata": {}, + "source": [ + "## An Expert Knowledge Worker Question-Answering Agent using RAG" + ] + }, + { + "cell_type": "markdown", + "id": "15169580-cf11-4dee-8ec7-3a4ef59b19ee", + "metadata": {}, + "source": [ + "Aims\n", + "- Reads README.md files and loads data using TextLoader\n", + "- Splits into chunks using CharacterTextSplitter\n", + "- Converts chunks into vector embeddings and creates a datastore\n", + "- 2D and 3D visualisations\n", + "- Langchain to set up a conversation retrieval chain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "051cf881-357d-406b-8eae-1610651e40f1", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import glob\n", + "from dotenv import load_dotenv\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ccfd403a-5bdb-4a8c-b3fd-d47ae79e43f7", + "metadata": {}, + "outputs": [], + "source": [ + "# imports for langchain, plotly and Chroma\n", + "\n", + "from langchain.document_loaders import DirectoryLoader, TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.schema import Document\n", + "from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", + "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_chroma import Chroma\n", + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.chains import ConversationalRetrievalChain\n", + "import numpy as np\n", + "from sklearn.manifold import TSNE\n", + "import plotly.graph_objects as go\n", + "import plotly.express as px\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2d853868-d2f6-43e1-b27c-b8e91d06b724", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL = \"gpt-4o-mini\"\n", + "db_name = \"vector_db\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f152fc3b-0bf4-4d51-948f-95da1ebc030a", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24e621ac-df06-4af6-a60d-a9ed7adb884a", + "metadata": {}, + "outputs": [], + "source": [ + "# Read in documents using LangChain's loaders\n", + "\n", + "folder = \"my-knowledge-base/\"\n", + "text_loader_kwargs={'autodetect_encoding': True}\n", + "\n", + "loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", + "folder_docs = loader.load()\n", + "\n", + "for doc in folder_docs:\n", + " filename_md = os.path.basename(doc.metadata[\"source\"]) \n", + " filename, _ = os.path.splitext(filename_md) \n", + " doc.metadata[\"filename\"] = filename\n", + "\n", + "documents = folder_docs \n", + "\n", + "text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=200)\n", + "chunks = text_splitter.split_documents(documents)\n", + "\n", + "print(f\"Total number of chunks: {len(chunks)}\")\n", + "print(f\"Files found: {set(doc.metadata['filename'] for doc in documents)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f02f08ee-5ade-4f79-a500-045a8f1a532f", + "metadata": {}, + "outputs": [], + "source": [ + "# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n", + "\n", + "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", + "\n", + "# Delete if already exists\n", + "\n", + "if os.path.exists(db_name):\n", + " Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", + "\n", + "# Create vectorstore\n", + "\n", + "vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", + "print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7f665f4d-ccb1-43fb-b901-040117925732", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's investigate the vectors\n", + "\n", + "collection = vectorstore._collection\n", + "count = collection.count()\n", + "\n", + "sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n", + "dimensions = len(sample_embedding)\n", + "print(f\"There are {count:,} vectors with {dimensions:,} dimensions in the vector store\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6208a971-e8b7-48bc-be7a-6dcb82967fd2", + "metadata": {}, + "outputs": [], + "source": [ + "# pre work\n", + "\n", + "result = collection.get(include=['embeddings','documents','metadatas'])\n", + "vectors = np.array(result['embeddings']) \n", + "documents = result['documents']\n", + "metadatas = result['metadatas']\n", + "filenames = [metadata['filename'] for metadata in metadatas]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb27bc8a-453b-4b19-84b4-dc495bb0e544", + "metadata": {}, + "outputs": [], + "source": [ + "import random\n", + "def random_color():\n", + " return f\"rgb({random.randint(0,255)},{random.randint(0,255)},{random.randint(0,255)})\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78db67e5-ef10-4581-b8ac-3e0281ceba45", + "metadata": {}, + "outputs": [], + "source": [ + "def show_embeddings_2d(result):\n", + " vectors = np.array(result['embeddings']) \n", + " documents = result['documents']\n", + " metadatas = result['metadatas']\n", + " filenames = [metadata['filename'] for metadata in metadatas]\n", + " filenames_unique = sorted(set(filenames))\n", + "\n", + " # color assignment\n", + " color_map = {name: random_color() for name in filenames_unique}\n", + " colors = [color_map[name] for name in filenames]\n", + "\n", + " tsne = TSNE(n_components=2, random_state=42,perplexity=4)\n", + " reduced_vectors = tsne.fit_transform(vectors)\n", + "\n", + " # Create the 2D scatter plot\n", + " fig = go.Figure(data=[go.Scatter(\n", + " x=reduced_vectors[:, 0],\n", + " y=reduced_vectors[:, 1],\n", + " mode='markers',\n", + " marker=dict(size=5,color=colors, opacity=0.8),\n", + " text=[f\"Type: {t}
      Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", + " hoverinfo='text'\n", + " )])\n", + "\n", + " fig.update_layout(\n", + " title='2D Chroma Vector Store Visualization',\n", + " scene=dict(xaxis_title='x',yaxis_title='y'),\n", + " width=800,\n", + " height=600,\n", + " margin=dict(r=20, b=10, l=10, t=40)\n", + " )\n", + "\n", + " fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c250166-cb5b-4a75-8981-fae2d6dfe509", + "metadata": {}, + "outputs": [], + "source": [ + "show_embeddings_2d(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b290e38-0800-4453-b664-7a7622ff5ed2", + "metadata": {}, + "outputs": [], + "source": [ + "def show_embeddings_3d(result):\n", + " vectors = np.array(result['embeddings']) \n", + " documents = result['documents']\n", + " metadatas = result['metadatas']\n", + " filenames = [metadata['filename'] for metadata in metadatas]\n", + " filenames_unique = sorted(set(filenames))\n", + "\n", + " # color assignment\n", + " color_map = {name: random_color() for name in filenames_unique}\n", + " colors = [color_map[name] for name in filenames]\n", + "\n", + " tsne = TSNE(n_components=3, random_state=42)\n", + " reduced_vectors = tsne.fit_transform(vectors)\n", + "\n", + " fig = go.Figure(data=[go.Scatter3d(\n", + " x=reduced_vectors[:, 0],\n", + " y=reduced_vectors[:, 1],\n", + " z=reduced_vectors[:, 2],\n", + " mode='markers',\n", + " marker=dict(size=5, color=colors, opacity=0.8),\n", + " text=[f\"Type: {t}
      Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", + " hoverinfo='text'\n", + " )])\n", + "\n", + " fig.update_layout(\n", + " title='3D Chroma Vector Store Visualization',\n", + " scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", + " width=900,\n", + " height=700,\n", + " margin=dict(r=20, b=10, l=10, t=40)\n", + " )\n", + "\n", + " fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d1d034-2503-4176-b1e4-f248e31c4770", + "metadata": {}, + "outputs": [], + "source": [ + "show_embeddings_3d(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e79946a1-f93a-4b3a-8d19-deef40dec223", + "metadata": {}, + "outputs": [], + "source": [ + "# create a new Chat with OpenAI\n", + "llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", + "\n", + "# set up the conversation memory for the chat\n", + "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", + "\n", + "# the retriever is an abstraction over the VectorStore that will be used during RAG\n", + "retriever = vectorstore.as_retriever(search_kwargs={\"k\": 50})\n", + "\n", + "# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n", + "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59f90c85-c113-4482-8574-8a728ef25459", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(question, history):\n", + " result = conversation_chain.invoke({\"question\": question})\n", + " return result[\"answer\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0520a8ff-01a4-4fa6-9dc8-57da87272edc", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b4949b17-cd9c-4bff-bd5b-0f80df72e7dc", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week5/community-contributions/ui_markdown_knowledge_worker.ipynb b/week5/community-contributions/ui_markdown_knowledge_worker.ipynb new file mode 100644 index 0000000..5bf6f56 --- /dev/null +++ b/week5/community-contributions/ui_markdown_knowledge_worker.ipynb @@ -0,0 +1,353 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d13be0fd-db15-4ab1-860a-b00257051339", + "metadata": {}, + "source": [ + "## Gradio UI for Markdown-Based Q&A with Visualization" + ] + }, + { + "cell_type": "markdown", + "id": "bc63fbdb-66a9-4c10-8dbd-11476b5e2d21", + "metadata": {}, + "source": [ + "This interface enables users to:\n", + "- Upload Markdown files for processing\n", + "- Visualize similarity between document chunks in 2D and 3D using embeddings\n", + "- Ask questions and receive RAG enabled responses\n", + "- Mantain conversation context for better question answering\n", + "- Clear chat history when required for fresh sessions\n", + "- Store and retrieve embeddings using ChromaDB\n", + "\n", + "Integrates LangChain, ChromaDB, and OpenAI to process, store, and retrieve information efficiently." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91da28d8-8e29-44b7-a62a-a3a109753727", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e47f670a-e2cb-4700-95d0-e59e440677a1", + "metadata": {}, + "outputs": [], + "source": [ + "# imports for langchain, plotly and Chroma\n", + "\n", + "from langchain.document_loaders import DirectoryLoader, TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.schema import Document\n", + "from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", + "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_chroma import Chroma\n", + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.chains import ConversationalRetrievalChain\n", + "import numpy as np\n", + "from sklearn.manifold import TSNE\n", + "import plotly.graph_objects as go\n", + "import plotly.express as px\n", + "import matplotlib.pyplot as plt\n", + "from random import randint\n", + "import shutil" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "362d4976-2553-4ed8-8fbb-49806145cad1", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install --upgrade gradio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "968b6e96-557e-439f-b2f1-942c05168641", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL = \"gpt-4o-mini\"\n", + "db_name = \"vector_db\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "537f66de-6abf-4b34-8e05-6b9a9df8ae82", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "246c1c1b-fcfa-4f4c-b99c-024598751361", + "metadata": {}, + "outputs": [], + "source": [ + "folder = \"my-knowledge-base/\"\n", + "db_name = \"vectorstore_db\"\n", + "\n", + "def process_files(files):\n", + " os.makedirs(folder, exist_ok=True)\n", + "\n", + " processed_files = []\n", + " for file in files:\n", + " file_path = os.path.join(folder, os.path.basename(file)) # Get filename\n", + " shutil.copy(file, file_path)\n", + " processed_files.append(os.path.basename(file))\n", + "\n", + " # Load documents using LangChain's DirectoryLoader\n", + " text_loader_kwargs = {'autodetect_encoding': True}\n", + " loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", + " folder_docs = loader.load()\n", + "\n", + " # Assign filenames as metadata\n", + " for doc in folder_docs:\n", + " filename_md = os.path.basename(doc.metadata[\"source\"])\n", + " filename, _ = os.path.splitext(filename_md)\n", + " doc.metadata[\"filename\"] = filename\n", + "\n", + " documents = folder_docs \n", + "\n", + " # Split documents into chunks\n", + " text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=200)\n", + " chunks = text_splitter.split_documents(documents)\n", + "\n", + " # Initialize embeddings\n", + " embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", + "\n", + " # Delete previous vectorstore\n", + " if os.path.exists(db_name):\n", + " Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", + "\n", + " # Store in ChromaDB\n", + " vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", + "\n", + " # Retrieve results\n", + " collection = vectorstore._collection\n", + " result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n", + "\n", + " llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", + " memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", + " retriever = vectorstore.as_retriever(search_kwargs={\"k\": 35})\n", + " global conversation_chain\n", + " conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)\n", + "\n", + " processed_text = \"**Processed Files:**\\n\\n\" + \"\\n\".join(f\"- {file}\" for file in processed_files)\n", + " return result, processed_text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48678d3a-0ab2-4aa4-aa9e-4160c6a9cb24", + "metadata": {}, + "outputs": [], + "source": [ + "def random_color():\n", + " return f\"rgb({randint(0,255)},{randint(0,255)},{randint(0,255)})\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6caed889-9bb4-42ad-b1c2-da051aefc802", + "metadata": {}, + "outputs": [], + "source": [ + "def show_embeddings_2d(result):\n", + " vectors = np.array(result['embeddings']) \n", + " documents = result['documents']\n", + " metadatas = result['metadatas']\n", + " filenames = [metadata['filename'] for metadata in metadatas]\n", + " filenames_unique = sorted(set(filenames))\n", + "\n", + " # color assignment\n", + " color_map = {name: random_color() for name in filenames_unique}\n", + " colors = [color_map[name] for name in filenames]\n", + "\n", + " tsne = TSNE(n_components=2, random_state=42,perplexity=4)\n", + " reduced_vectors = tsne.fit_transform(vectors)\n", + "\n", + " # Create the 2D scatter plot\n", + " fig = go.Figure(data=[go.Scatter(\n", + " x=reduced_vectors[:, 0],\n", + " y=reduced_vectors[:, 1],\n", + " mode='markers',\n", + " marker=dict(size=5,color=colors, opacity=0.8),\n", + " text=[f\"Type: {t}
      Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", + " hoverinfo='text'\n", + " )])\n", + "\n", + " fig.update_layout(\n", + " title='2D Chroma Vector Store Visualization',\n", + " scene=dict(xaxis_title='x',yaxis_title='y'),\n", + " width=800,\n", + " height=600,\n", + " margin=dict(r=20, b=10, l=10, t=40)\n", + " )\n", + "\n", + " return fig" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "de993495-c8cd-4313-a6bb-7d27494ecc13", + "metadata": {}, + "outputs": [], + "source": [ + "def show_embeddings_3d(result):\n", + " vectors = np.array(result['embeddings']) \n", + " documents = result['documents']\n", + " metadatas = result['metadatas']\n", + " filenames = [metadata['filename'] for metadata in metadatas]\n", + " filenames_unique = sorted(set(filenames))\n", + "\n", + " # color assignment\n", + " color_map = {name: random_color() for name in filenames_unique}\n", + " colors = [color_map[name] for name in filenames]\n", + "\n", + " tsne = TSNE(n_components=3, random_state=42)\n", + " reduced_vectors = tsne.fit_transform(vectors)\n", + "\n", + " fig = go.Figure(data=[go.Scatter3d(\n", + " x=reduced_vectors[:, 0],\n", + " y=reduced_vectors[:, 1],\n", + " z=reduced_vectors[:, 2],\n", + " mode='markers',\n", + " marker=dict(size=5, color=colors, opacity=0.8),\n", + " text=[f\"Type: {t}
      Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", + " hoverinfo='text'\n", + " )])\n", + "\n", + " fig.update_layout(\n", + " title='3D Chroma Vector Store Visualization',\n", + " scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", + " width=900,\n", + " height=700,\n", + " margin=dict(r=20, b=10, l=10, t=40)\n", + " )\n", + "\n", + " return fig" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b7bf62b-c559-4e97-8135-48cd8d97a40e", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(question, history):\n", + " result = conversation_chain.invoke({\"question\": question})\n", + " return result[\"answer\"]\n", + "\n", + "def visualise_data(result):\n", + " fig_2d = show_embeddings_2d(result)\n", + " fig_3d = show_embeddings_3d(result)\n", + " return fig_2d,fig_3d" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99217109-fbee-4269-81c7-001e6f768a72", + "metadata": {}, + "outputs": [], + "source": [ + "css = \"\"\"\n", + ".btn {background-color: #1d53d1;}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1429ea1-1d9f-4be6-b270-01997864c642", + "metadata": {}, + "outputs": [], + "source": [ + "with gr.Blocks(css=css) as ui:\n", + " gr.Markdown(\"# Markdown-Based Q&A with Visualization\")\n", + " with gr.Row():\n", + " file_input = gr.Files(file_types=[\".md\"], label=\"Upload Markdown Files\")\n", + " with gr.Column(scale=1):\n", + " processed_output = gr.Markdown(\"Progress\")\n", + " with gr.Row():\n", + " process_btn = gr.Button(\"Process Files\",elem_classes=[\"btn\"])\n", + " with gr.Row():\n", + " question = gr.Textbox(label=\"Chat \", lines=10)\n", + " answer = gr.Markdown(label= \"Response\")\n", + " with gr.Row():\n", + " question_btn = gr.Button(\"Ask a Question\",elem_classes=[\"btn\"])\n", + " clear_btn = gr.Button(\"Clear Output\",elem_classes=[\"btn\"])\n", + " with gr.Row():\n", + " plot_2d = gr.Plot(label=\"2D Visualization\")\n", + " plot_3d = gr.Plot(label=\"3D Visualization\")\n", + " with gr.Row():\n", + " visualise_btn = gr.Button(\"Visualise Data\",elem_classes=[\"btn\"])\n", + "\n", + " result = gr.State([])\n", + " # Action: When button is clicked, process files and update visualization\n", + " clear_btn.click(fn=lambda:(\"\", \"\"), inputs=[],outputs=[question, answer])\n", + " process_btn.click(process_files, inputs=[file_input], outputs=[result,processed_output])\n", + " question_btn.click(chat, inputs=[question], outputs= [answer])\n", + " visualise_btn.click(visualise_data, inputs=[result], outputs=[plot_2d,plot_3d])\n", + "\n", + "# Launch Gradio app\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3686048-ac29-4df1-b816-e58996913ef1", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 212b6da1862e05cba9f559fb3d203df52a6d62dd Mon Sep 17 00:00:00 2001 From: Petri Alapiessa Date: Tue, 25 Mar 2025 11:47:01 +0200 Subject: [PATCH 39/43] vectorstore with openAI --- .../day5_vectorstore_openai.ipynb | 283 ++++++++++++++++++ 1 file changed, 283 insertions(+) create mode 100644 week5/community-contributions/day5_vectorstore_openai.ipynb diff --git a/week5/community-contributions/day5_vectorstore_openai.ipynb b/week5/community-contributions/day5_vectorstore_openai.ipynb new file mode 100644 index 0000000..a1aa575 --- /dev/null +++ b/week5/community-contributions/day5_vectorstore_openai.ipynb @@ -0,0 +1,283 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Import documents exported from Evernote to a vectorstore\n", + "### Use OpenAI file search with responses API\n", + "#### Prerequisite steps\n", + "* exported notes from your Evernote notebook as html \n", + "* converted the notes further to md-files and remove broken image links (use python/AI)\n", + "* the files are named with note titles\n", + "\n", + "Files are in one folder.\n", + "\n", + "\n", + "##### Query ChromaDB vectorstore\n", + "I tried to accomplish this task with RAG like the example by https://github.com/ed-donner/llm_engineering/commits?author=dinorrusso.\n", + "\n", + "I thought this to be a trivial task, but it was not ๐Ÿ˜ƒ That example uses Ollama running locally.\n", + "Even though the retriever had the information required, it was dropped from the answer.\n", + "\n", + "I tried then to use Chroma + OpenAI. After several attemps succeeded to create a vectorstore and query it. That's it for this time.\n", + "\n", + "##### Openai vectorstore, see bottom of the notebook\n", + "One attempt was to use OpenAI's fileSearch-tool which seemed pretty straightforward.\n", + "The con: loading files was not working always. Code is left though as reference." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Imports\n", + "from dotenv import load_dotenv\n", + "import gradio as gr\n", + "import openai\n", + "import chromadb\n", + "from chromadb.config import Settings\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Load files to vectorstore" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "load_dotenv(override=True)\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "openai.api_key = os.environ['OPENAI_API_KEY']\n", + "\n", + "def chunk_text(text, max_tokens=2000):\n", + " words = text.split()\n", + " chunks = []\n", + " current_chunk = []\n", + " current_length = 0\n", + "\n", + " for word in words:\n", + " current_length += len(word) + 1 # +1 for the space\n", + " if current_length > max_tokens:\n", + " chunks.append(\" \".join(current_chunk))\n", + " current_chunk = [word]\n", + " current_length = len(word) + 1\n", + " else:\n", + " current_chunk.append(word)\n", + "\n", + " if current_chunk:\n", + " chunks.append(\" \".join(current_chunk))\n", + "\n", + " return chunks\n", + "\n", + "\n", + "# # Set up OpenAI API key\n", + "# openai.api_key = \"your_openai_api_key\" # Replace with your API key\n", + "chroma_client = chromadb.Client()\n", + "\n", + "# Create or get the existing collection\n", + "collection_name = \"EverNotes\"\n", + "\n", + "try:\n", + " existing_collection = chroma_client.get_collection(name=collection_name)\n", + " if existing_collection.count() > 0:\n", + " chroma_client.delete_collection(name=collection_name)\n", + "except:\n", + " print(f\"Collection {collection_name} does not exist. Creating a new one.\")\n", + "\n", + "# Create a collection in ChromaDB\n", + "collection = chroma_client.get_or_create_collection(name=collection_name)\n", + "\n", + "# Define your data\n", + "# it should be like this\n", + "# documents = [\"OpenAI is revolutionizing AI.\", \"ChromaDB makes embedding storage easy.\"]\n", + "# metadata = [{\"id\": 1}, {\"id\": 2}]\n", + "\n", + "folder_path = os.getenv('EVERNOTE_EXPORT')\n", + "documents = []\n", + "\n", + "for root, dirs, files in os.walk(folder_path):\n", + " for file in files:\n", + " if file.endswith('.md'): # Change this to the file extension you need\n", + " with open(os.path.join(root, file), 'r') as f:\n", + " documents.append(f.read())\n", + "\n", + "metadata = [{\"id\": i + 1} for i in range(len(documents))]\n", + "\n", + "# Generate embeddings using OpenAI\n", + "def get_embedding(text, model=\"text-embedding-ada-002\"):\n", + " response = openai.embeddings.create(input=text, model=model)\n", + " return response.data[0].embedding\n", + "\n", + "# Add documents and embeddings to ChromaDB in chunks\n", + "for doc, meta in zip(documents, metadata):\n", + " chunks = chunk_text(doc)\n", + " for chunk in chunks:\n", + " embedding = get_embedding(chunk)\n", + " collection.add(\n", + " documents=[chunk],\n", + " embeddings=[embedding],\n", + " metadatas=[meta],\n", + " ids=[str(meta[\"id\"])]\n", + " )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Query ChromaDB" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# \n", + "query_text = \"Is there a video for Fitting the Shimano speed hub 7\"\n", + "query_embedding = get_embedding(query_text)\n", + "\n", + "results = collection.query(\n", + " query_embeddings=[query_embedding],\n", + " n_results=2\n", + ")\n", + "\n", + "print(\"Query Results:\", results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Gradio interface" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Function to query ChromaDB\n", + "def query_chromadb(query_text):\n", + " query_embedding = get_embedding(query_text)\n", + " results = collection.query(\n", + " query_embeddings=[query_embedding],\n", + " n_results=2\n", + " )\n", + " return results\n", + "\n", + "# Gradio interface\n", + "def gradio_interface(query_text):\n", + " results = query_chromadb(query_text)\n", + " return results\n", + "\n", + "# Create Gradio app\n", + "iface = gr.Interface(\n", + " fn=gradio_interface,\n", + " inputs=\"text\",\n", + " outputs=\"text\",\n", + " title=\"ChromaDB Query Interface\",\n", + " description=\"Enter your query to search the ChromaDB collection.\"\n", + ")\n", + "\n", + "iface.launch()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Below OpenAI filesearch variant which had some failures in file uploads." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import glob\n", + "folder_path = os.environ['EVERNOTE_EXPORT'] \n", + "# Filter out other except .md-files\n", + "md_files = glob.glob(os.path.join(folder_path, '*.md'))\n", + "file_paths = [os.path.join(folder_path, file) for file in md_files]\n", + "file_streams = [open(path, 'rb') for path in file_paths]\n", + "\n", + "# Create vector store\n", + "vector_store = openai.vector_stores.create(\n", + " name=\"Evernote notes\",\n", + ")\n", + "\n", + "# Batch Upload Limit: You can upload up to 100 files in a single batch\n", + "# https://community.openai.com/t/max-100-files-in-vector-store/729876/4\n", + "batch_size = 90\n", + "for i in range(0, len(file_streams), batch_size):\n", + " batch = file_streams[i:i + batch_size]\n", + " file_batch = openai.vector_stores.file_batches.upload_and_poll(\n", + " vector_store_id=vector_store.id,\n", + " files=batch\n", + " )\n", + " print(file_batch.status)\n", + " print(file_batch.file_counts)\n", + "\n", + "# There can be some fails in file counts:\n", + "# \"FileCounts(cancelled=0, completed=89, failed=1, in_progress=0, total=90)\"\"\n", + "# Usually 1 % fails. Did not find solution for improving that yet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "response = openai.responses.create(\n", + " model=\"gpt-4o-mini\",\n", + " input=\"Is there a video for Fitting the Shimano speed hub 7?\",\n", + " tools=[{\n", + " \"type\": \"file_search\",\n", + " \"vector_store_ids\": [vector_store.id]\n", + " }],\n", + " include=None\n", + ")\n", + "print(response)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From e0c179729497dfd93c8afbebeb42febb4213b79b Mon Sep 17 00:00:00 2001 From: Youssef Khalil Youssef Date: Tue, 25 Mar 2025 15:44:35 +0400 Subject: [PATCH 40/43] Added my contribution to community-contributions in week 2 --- .../brochure_links_tone.ipynb | 567 ++++++++++++++++++ 1 file changed, 567 insertions(+) create mode 100644 week2/community-contributions/brochure_links_tone.ipynb diff --git a/week2/community-contributions/brochure_links_tone.ipynb b/week2/community-contributions/brochure_links_tone.ipynb new file mode 100644 index 0000000..12cb9a2 --- /dev/null +++ b/week2/community-contributions/brochure_links_tone.ipynb @@ -0,0 +1,567 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "c79dc33e-1a3b-4601-a8f2-219b7a9b6d88", + "metadata": {}, + "source": [ + "# Company Brochure - Relevant Links and Custom Tone\n", + "\n", + "Using GPT to generate a company brochure with the relevant links functionality and the ability to choose the desired tone." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e32f4aa7-6fc4-4dc9-8058-58e6a7f329c5", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "\n", + "import os\n", + "import requests\n", + "import json\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "d1d65a21-bbba-44ff-a2be-85bf2055a493", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key set and good to go.\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(\"OpenAI API Key set and good to go.\")\n", + "else:\n", + " print(\"OpenAI API Key not set. :(\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "c5db63fe-5da8-496e-9b37-139598d600a7", + "metadata": {}, + "outputs": [], + "source": [ + "# Setting up the OpenAI object\n", + "\n", + "openai = OpenAI()\n", + "gpt_model = 'gpt-4o-mini'" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "535da52f-b280-48ce-aa8b-f82f9f9805d9", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to represent a Website that we have scraped, now with links\n", + " \"\"\"\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = \"\"\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " self.links = [link for link in links if link]\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8d5757c4-95f4-4038-8ed4-8c81da5112b0", + "metadata": {}, + "outputs": [], + "source": [ + "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", + "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", + "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + "link_system_prompt += \"You should respond in JSON as in this example:\"\n", + "link_system_prompt += \"\"\"\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d5fd31ac-7c81-454a-a1dc-4c58bd3db246", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links_user_prompt(website):\n", + " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", + "Do not include Terms of Service, Privacy, email links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"\\n\".join(website.links)\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "e8b67492-1ba4-4aad-a588-39116128fa18", + "metadata": {}, + "outputs": [], + "source": [ + "def gpt_get_links(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model= gpt_model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", + " ],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "e8846e7a-ace2-487e-a0a8-fccb389f2eb9", + "metadata": {}, + "outputs": [], + "source": [ + "# This function provides uses the get_contents method in the Website Class as well as GPT to find relevant links.\n", + "\n", + "def get_all_details(url):\n", + " result = \"Landing page:\\n\"\n", + " result += Website(url).get_contents()\n", + " links = gpt_get_links(url)\n", + " print(\"Found links:\", links)\n", + " for link in links[\"links\"]:\n", + " result += f\"\\n\\n{link['type']}\\n\"\n", + " result += Website(link[\"url\"]).get_contents()\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "18b42319-8342-4b9c-bef6-8b72acf92ab3", + "metadata": {}, + "outputs": [], + "source": [ + "def get_brochure_user_prompt(company_name, url):\n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; \\\n", + " use this information to build a short brochure of the company in markdown.\\n\"\n", + " \n", + " user_prompt += get_all_details(url)\n", + " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "d7748293-a616-41de-93cb-89f65cc5c73d", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's create a call that streams back results\n", + "# If you'd like a refresher on Generators (the \"yield\" keyword),\n", + "# Please take a look at the Intermediate Python notebook in week1 folder.\n", + "\n", + "def stream_brochure(company_name, url, tone):\n", + "\n", + " system_message = f\"You are an assistant that analyzes the content of several relevant pages from a company website \\\n", + " and creates a short brochure about the company for prospective customers, investors, and recruits. \\\n", + " Include details of company culture, customers and careers/jobs if you have the information. \\\n", + " Respond in markdown, and use a {tone.lower()} tone throughout the brochure.\"\n", + "\n", + " \n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ]\n", + " stream = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages,\n", + " stream=True\n", + " )\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " yield result" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "15222832-06e0-4452-a8e1-59b9b1755488", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
      " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'company page', 'url': 'https://www.snowflake.com/en/company/overview/about-snowflake/'}, {'type': 'company leadership page', 'url': 'https://www.snowflake.com/en/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'company ESG page', 'url': 'https://www.snowflake.com/en/company/overview/esg/'}, {'type': 'company ventures page', 'url': 'https://www.snowflake.com/en/company/overview/snowflake-ventures/'}, {'type': 'end data disparity page', 'url': 'https://www.snowflake.com/en/company/overview/end-data-disparity/'}]}\n", + "Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'about page', 'url': 'https://www.snowflake.com/company/overview/about-snowflake/'}, {'type': 'leadership page', 'url': 'https://www.snowflake.com/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'investor relations', 'url': 'https://investors.snowflake.com/overview/default.aspx'}, {'type': 'ESG page', 'url': 'https://www.snowflake.com/company/overview/esg/'}, {'type': 'snowflake ventures', 'url': 'https://www.snowflake.com/company/overview/snowflake-ventures/'}, {'type': 'end data disparity', 'url': 'https://www.snowflake.com/company/overview/end-data-disparity/'}]}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n", + " self._validate_conn(conn)\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n", + " conn.connect()\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n", + " sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n", + " ssl_sock = ssl_wrap_socket(\n", + " ^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n", + " ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n", + " return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n", + " return self.sslsocket_class._create(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n", + " self.do_handshake()\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n", + " self._sslobj.do_handshake()\n", + "ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n", + " response = self._make_request(\n", + " ^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n", + " raise new_e\n", + "urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", + "\n", + "The above exception was the direct cause of the following exception:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n", + " resp = conn.urlopen(\n", + " ^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n", + " retries = retries.increment(\n", + " ^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n", + " raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + "urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", + " response = await route_utils.call_process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", + " output = await app.get_blocks().process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n", + " result = await self.call_function(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n", + " prediction = await utils.async_iteration(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n", + " return await anext(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n", + " return await anyio.to_thread.run_sync(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", + " return await get_async_backend().run_sync_in_worker_thread(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n", + " return await future\n", + " ^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n", + " result = context.run(func, *args)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n", + " return next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n", + " response = next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n", + " user_prompt += get_all_details(url)\n", + " ^^^^^^^^^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n", + " result += Website(url).get_contents()\n", + " ^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n", + " response = requests.get(url, headers=headers)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n", + " return request(\"get\", url, params=params, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n", + " return session.request(method=method, url=url, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n", + " resp = self.send(prep, **send_kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n", + " r = adapter.send(request, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n", + " raise SSLError(e, request=request)\n", + "requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n", + " self._validate_conn(conn)\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n", + " conn.connect()\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n", + " sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n", + " ssl_sock = ssl_wrap_socket(\n", + " ^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n", + " ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n", + " return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n", + " return self.sslsocket_class._create(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n", + " self.do_handshake()\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n", + " self._sslobj.do_handshake()\n", + "ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n", + " response = self._make_request(\n", + " ^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n", + " raise new_e\n", + "urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", + "\n", + "The above exception was the direct cause of the following exception:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n", + " resp = conn.urlopen(\n", + " ^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n", + " retries = retries.increment(\n", + " ^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n", + " raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + "urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", + "\n", + "During handling of the above exception, another exception occurred:\n", + "\n", + "Traceback (most recent call last):\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", + " response = await route_utils.call_process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", + " output = await app.get_blocks().process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n", + " result = await self.call_function(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n", + " prediction = await utils.async_iteration(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n", + " return await anext(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n", + " return await anyio.to_thread.run_sync(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", + " return await get_async_backend().run_sync_in_worker_thread(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n", + " return await future\n", + " ^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n", + " result = context.run(func, *args)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n", + " return next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n", + " response = next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n", + " user_prompt += get_all_details(url)\n", + " ^^^^^^^^^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n", + " result += Website(url).get_contents()\n", + " ^^^^^^^^^^^^\n", + " File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n", + " response = requests.get(url, headers=headers)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n", + " return request(\"get\", url, params=params, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n", + " return session.request(method=method, url=url, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n", + " resp = self.send(prep, **send_kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n", + " r = adapter.send(request, **kwargs)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n", + " raise SSLError(e, request=request)\n", + "requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'about page', 'url': 'https://www.petrofac.com/who-we-are/'}, {'type': 'what we do page', 'url': 'https://www.petrofac.com/who-we-are/what-we-do/'}, {'type': 'careers page', 'url': 'https://www.petrofac.com/careers/'}, {'type': 'our structure page', 'url': 'https://www.petrofac.com/who-we-are/our-structure/'}, {'type': 'energy transition page', 'url': 'https://www.petrofac.com/who-we-are/energy-transition/'}, {'type': 'sustainability and ESG page', 'url': 'https://www.petrofac.com/who-we-are/sustainability-and-esg/'}, {'type': 'investor relations page', 'url': 'https://www.petrofac.com/investors/'}, {'type': 'services page', 'url': 'https://www.petrofac.com/services/'}, {'type': 'where we operate page', 'url': 'https://www.petrofac.com/where-we-operate/'}]}\n" + ] + } + ], + "source": [ + "view = gr.Interface(\n", + " fn=stream_brochure,\n", + " inputs=[\n", + " gr.Textbox(label=\"Company name:\"),\n", + " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", + " gr.Textbox(label=\"Tone:\")],\n", + " outputs=[gr.Markdown(label=\"Brochure:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70d6398c-21dd-44f8-ba7d-0204414dffa0", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d27413664af340cf33997d22a61e92db0cee3553 Mon Sep 17 00:00:00 2001 From: "Palbha Kulkarni (Nazwale)" Date: Tue, 25 Mar 2025 16:46:16 -0400 Subject: [PATCH 41/43] Create day5_openai_whisper_llamainstruct --- .../day5_openai_whisper_llamainstruct | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 week3/community-contributions/day5_openai_whisper_llamainstruct diff --git a/week3/community-contributions/day5_openai_whisper_llamainstruct b/week3/community-contributions/day5_openai_whisper_llamainstruct new file mode 100644 index 0000000..c11e2b1 --- /dev/null +++ b/week3/community-contributions/day5_openai_whisper_llamainstruct @@ -0,0 +1,78 @@ +import gradio as gr +import torch +from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer, AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline +from huggingface_hub import login +import os + +# Use the secret stored in the Hugging Face space +token = os.getenv("HF_TOKEN") +login(token=token) + +# Whisper Model Optimization +model = "openai/whisper-tiny" +DEVICE = "cuda" if torch.cuda.is_available() else "cpu" + +processor = AutoProcessor.from_pretrained(model) + + +transcriber = pipeline( + "automatic-speech-recognition", + model=model, + tokenizer=processor.tokenizer, + feature_extractor=processor.feature_extractor, + device=0 if torch.cuda.is_available() else "cpu", +) + + + +# Function to Transcribe & Generate Minutes +def process_audio(audio_file): + if audio_file is None: + return "Error: No audio provided!" + + # Transcribe audio + transcript = transcriber(audio_file)["text"] + del transcriber + del processor + # LLaMA Model Optimization + LLAMA = "meta-llama/Llama-3.2-3B-Instruct" + llama_quant_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.bfloat16, + bnb_4bit_quant_type="nf4" + ) + + tokenizer = AutoTokenizer.from_pretrained(LLAMA) + tokenizer.pad_token = tokenizer.eos_token + model = AutoModelForCausalLM.from_pretrained( + LLAMA, + torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, + device_map="auto" + ) + # Generate meeting minutes + system_message = "You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown." + user_prompt = f"Below is an extract transcript of a Denver council meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\n{transcript}" + + messages = [ + {"role": "system", "content": system_message}, + {"role": "user", "content": user_prompt} + ] + + inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(DEVICE) + streamer = TextStreamer(tokenizer) + outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer) + + return tokenizer.decode(outputs[0], skip_special_tokens=True) + +# Gradio Interface +interface = gr.Interface( + fn=process_audio, + inputs=gr.Audio(sources=["upload", "microphone"], type="filepath"), + outputs="text", + title="Meeting Minutes Generator", + description="Upload or record an audio file to get structured meeting minutes in Markdown.", +) + +# Launch App +interface.launch() From 4e29a2a8d49015d8c6ab479c72141ba8bb80cbd1 Mon Sep 17 00:00:00 2001 From: Nejc Firbas Date: Wed, 26 Mar 2025 00:52:29 +0100 Subject: [PATCH 42/43] Michelin-star cooking assistant week1 --- .../day1_michelin_start_cook.ipynb | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 week1/community-contributions/day1_michelin_start_cook.ipynb diff --git a/week1/community-contributions/day1_michelin_start_cook.ipynb b/week1/community-contributions/day1_michelin_start_cook.ipynb new file mode 100644 index 0000000..3cee7ed --- /dev/null +++ b/week1/community-contributions/day1_michelin_start_cook.ipynb @@ -0,0 +1,87 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "44aba2a0-c6eb-4fc1-a5cc-0a8f8679dbb8", + "metadata": {}, + "source": [ + "## Michelin-star cook..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d4d58124-5e9a-4f5a-9e0a-ff74f43896a8", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "67dc3099-2ccc-4ee8-8ff2-0dbbe4ae2fcb", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"You are a professional chef in a Michelin-star restaurant. You will help me cook restaurant-style dishes using the ingredients I have left in my refrigerator.\\\n", + "You will provide detailed instructions with precise times and measurements in grams and include calorie information for raw ingredients, not cooked ones.\\\n", + "Add the caloric information at the end. Your responses should be formatted in Markdown.\"\n", + "\n", + "user_prompt = \"\"\"\n", + "Help me with a recipe using the ingredients I have left in the refrigerator. I have spinach, eggs, pasta, rice, chicken, beef, carrots, potatoes, butter, milk, cheese, tomatoes, red peppers, and all spices in the pantry.\\n\\n\n", + "\"\"\"\n", + "\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + "]\n", + " \n", + "response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages\n", + " )\n", + "\n", + "# Step 4: print the result in markdown format\n", + "pretty_response = Markdown(response.choices[0].message.content)\n", + "display(pretty_response)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 718988286f75f7a43318a05caf6d5f2e4b130dc8 Mon Sep 17 00:00:00 2001 From: Eli Waltuch Date: Wed, 26 Mar 2025 13:28:15 +0200 Subject: [PATCH 43/43] Community Contribution: Week 1, Days 1 and 2 A python program that generates a bedtie story based on the command line arguments you give it. Currently supports OpenAI and ollama. example openai usage: python ./week1-day1_2-bedtime-storyteller.py openai Linux Where Linux is the subject you want the story to be about. example ollama usage: python ./week1-day1_2-bedtime-storyteller.py ollama --model deepseek-r1:1.5b Bears Enjoy! Signed-off-by: Eli Waltuch --- .../week1-day1_2-bedtime-storyteller.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 week1/community-contributions/week1-day1_2-bedtime-storyteller.py diff --git a/week1/community-contributions/week1-day1_2-bedtime-storyteller.py b/week1/community-contributions/week1-day1_2-bedtime-storyteller.py new file mode 100644 index 0000000..f6fc6ef --- /dev/null +++ b/week1/community-contributions/week1-day1_2-bedtime-storyteller.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +import os +import argparse +from dotenv import load_dotenv +from openai import OpenAI + +def load_openai_key(): + # Load environment variables in a file called .env + load_dotenv(override=True) + api_key = os.getenv('OPENAI_API_KEY') + + # Check the key + if not api_key: + return "Error: No API key was found!" + elif not api_key.startswith("sk-proj-"): + return "Error: An API key was found, but it doesn't start sk-proj-; please check you're using the right key" + elif api_key.strip() != api_key: + return "Error: An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them!" + else: + return "API key found and looks good so far!" + +def ask_llm(client, model, user_prompt): + system_prompt = """ + you are a writing assistant with an expertise in children's stories. + Write a bedtime story inspired by the subject below. + The story should have a begining, middle, and end. + The story shoukd be appropriate for children ages 5-8 and have a positive message. + I should be able to read the entire story in about 3 minutes + """ + response = client.chat.completions.create( + model = model, + messages = [ {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}] + ) + return response.choices[0].message.content + +def main(): + parser = argparse.ArgumentParser(description="AI Bedtime Storyteller") + parser.add_argument("provider", choices=["openai", "ollama"], help="AI provider to use") + parser.add_argument("--model", help="Model to use for Ollama (required if provider is 'ollama')", required="ollama" in parser.parse_known_args()[0].provider) + parser.add_argument("subject", help="What do you want the story to be about?") + + args = parser.parse_args() + + if args.provider == "openai": + load_openai_key() + client = OpenAI() + model = "gpt-4o-mini" + elif args.provider == "ollama": + client = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama') + model = args.model + else: + return "Error: invalid provider!" + + user_prompt = args.subject + + result = ask_llm(client, model, user_prompt) + print("AI Response:", result) + +if __name__ == "__main__": + main() +