Browse Source

Merge branch 'main' of github.com:ed-donner/llm_engineering

pull/79/head
Edward Donner 4 months ago
parent
commit
f0c3318904
  1. 226
      week1/community-contributions/day-1-Stock-data-analysis.ipynb
  2. 213
      week1/community-contributions/day1-generate-social-media-posts.ipynb
  3. 276
      week1/community-contributions/day1-youtube-video-summarization.ipynb
  4. 130
      week2/community-contributions/Gemini-api.ipynb
  5. 393
      week2/community-contributions/day2-gemini.ipynb
  6. 310
      week2/community-contributions/day3-gemini.ipynb
  7. 762
      week2/community-contributions/day5.ipynb
  8. 444
      week2/community-contributions/day5_llama3.1_tools_usecase.ipynb
  9. 2
      week2/day5.ipynb
  10. 302
      week3/community-contributions/day5_with_Gradio.ipynb
  11. 211
      week5/community-contributions/day5 - generating answers with citations.ipynb

226
week1/community-contributions/day-1-Stock-data-analysis.ipynb

@ -0,0 +1,226 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "306f1a67-4f1c-4aed-8f80-2a8458a1bce5",
"metadata": {},
"source": [
"# Stock data analysis"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "markdown",
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254",
"metadata": {},
"source": [
"# Connecting to OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "51d42a08-188e-4c56-9578-47cd549bd1d8",
"metadata": {},
"outputs": [],
"source": [
"from urllib.parse import urlencode\n",
"import datetime\n",
"\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "682eff74-55c4-4d4b-b267-703edbc293c7",
"metadata": {},
"outputs": [],
"source": [
"class YahooFinanceWebsite:\n",
" def __init__(self, stock_symbol):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.stock_symbol = stock_symbol.upper()\n",
"\n",
" def __build_url(self, params):\n",
" base_url = f\"https://finance.yahoo.com/quote/{self.stock_symbol}/history/\"\n",
" query_string = urlencode(params)\n",
" return f\"{base_url}?{query_string}\"\n",
"\n",
" def get_stock_data(self):\n",
" datetime_now = datetime.datetime.now()\n",
" datetime_year_ago = datetime_now - datetime.timedelta(days=365)\n",
" params = {\"frequency\": \"1wk\", \"period1\": datetime_year_ago.timestamp(), \"period2\": datetime_now.timestamp()}\n",
" url = self.__build_url(params)\n",
" response = requests.get(url, headers=headers)\n",
"\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" \n",
" title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
"\n",
" html_table_data = soup.find(\"table\")\n",
"\n",
" return title, html_table_data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70b8d7e7-51e7-4392-9b85-9ac9f67a907c",
"metadata": {},
"outputs": [],
"source": [
"def build_stock_analysis_prompt(stock_symbol, title, stock_table_data):\n",
" sys_prompt = r\"\"\"You are an assistant that analyzes the contents of HTML formated table that contains data on a specific stock.\n",
" The HTML table contains the date, open price, close price, low and highs aggregated for every week over one year timeframe.\n",
" Ignoring text, tags or html attributes that might be navigation related. \n",
" Respond in Markdown format\"\"\"\n",
" \n",
" user_prompt = f\"The data provided below in the HTML table format for {stock_symbol} from the Yahoo Finances.\\\n",
" Make the explaination easy enough for a newbie to understand. \\\n",
" Analyze and Summarize the trends on this stock:\\n{stock_table_data}\\n\\n\\\n",
" Also, calculate the total returns in percentage one could have expected over this period.\"\n",
" \n",
" return [\n",
" {\"role\": \"system\", \"content\": sys_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de514421-4cc8-4881-85b4-97f03e94c589",
"metadata": {},
"outputs": [],
"source": [
"def analyze_stock_trends(stock_symbol):\n",
" stock_data_page = YahooFinanceWebsite(stock_symbol)\n",
" title, stock_table_data = stock_data_page.get_stock_data()\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = build_stock_analysis_prompt(stock_symbol, title, stock_table_data)\n",
" )\n",
" return response.choices[0].message.content\n",
"\n",
"def display_analysis(stock_symbol):\n",
" display(Markdown(analyze_stock_trends(stock_symbol)))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "41acc36f-484a-4257-a240-cf27520e7396",
"metadata": {},
"outputs": [],
"source": [
"display_analysis(\"GOOG\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7e09541f-bbc4-4cf3-a1ef-9ed5e1b718e4",
"metadata": {},
"outputs": [],
"source": [
"display_analysis(\"PFE\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e6af9395-0c5c-4265-a309-baba786bfa71",
"metadata": {},
"outputs": [],
"source": [
"display_analysis(\"AAPL\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "afe4f6d1-a6ea-44b5-81ae-8e756cfc0d84",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

213
week1/community-contributions/day1-generate-social-media-posts.ipynb

@ -0,0 +1,213 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9ae10427-6ca2-4ac0-b6a0-e9206dd3cb52",
"metadata": {},
"source": [
"### Using OpenAI gpt-4o-mini model to generate social media posts for events"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "477fe060-a11f-424f-bac4-34c5121cf437",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "61f012e5-cdba-48cb-ae74-df9659c23d90",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "19c79615-57aa-40e0-a83b-891f43df4f65",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "68ad05f8-dfcc-47b1-ba16-b35bedeff48b",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "acff6c95-77a5-40f0-bf9f-7d47cec987fc",
"metadata": {},
"outputs": [],
"source": [
"# See how this function creates exactly the format above\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
"\n",
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
" \n",
"# Generate a summary of content fetched by scraping the website\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(website)\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "b43f8cda-8a61-4773-83b2-bb8fe55a0cb2",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**Twitter Post:** \n",
"🚀 Join us online for #StartupMastery on Jan 7, 6-9 PM GMT! Explore Lean Startup, Agile, & Design Thinking methodologies. Gain practical skills, access resources, and earn a certificate! Tickets from €74.98. Don't miss out! 🎟🌟\n",
"\n",
"**Instagram Post:** \n",
"🌟 Ready to boost your startup skills? Join us for **Startup Mastery**! 💡 On January 7 from 6 PM to 9 PM GMT, dive into Lean Startup, Agile, and Design Thinking with top-notch experts. Access recorded sessions, worksheets, and get certified! 🎟 Tickets from €74.98. See you online! 🚀✨ #StartupMastery #LeanStartup #Agile #DesignThinking\n",
"\n",
"**Facebook Post:** \n",
"🗓 Exciting opportunity for entrepreneurs and startup enthusiasts! Attend our **Startup Mastery** online workshop on January 7, from 6 PM to 9 PM GMT. Learn about Lean Startup, Agile, and Design Thinking methodologies to enhance your startup journey. Enjoy a transformative experience with insights on MVP development, rapid prototyping, and feedback loops. Plus, you'll get access to recorded sessions and can earn a certificate! Limited tickets available from €74.98. Organizers: Lean Agile Zone. Don’t miss out! 🚀 #StartupMastery #Entrepreneurship"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Step 1: Create your prompts\n",
"WEBSITE_LINK = \"https://www.eventbrite.ie/e/startup-mastery-leveraging-lean-startup-agile-and-design-thinking-tickets-920474252267?aff=ebdssbcategorybrowse&keep_tld=1\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of an event \\\n",
"and provides short summaries for a Twitter post, an instagram post and a facebook post.\\\n",
"Ensure the summaries abide by the platform rules for each of the platforms.\"\n",
"\n",
"website_summary = summarize(WEBSITE_LINK)\n",
"user_prompt = f\"The events details are as follows: {website_summary}. Please summarize the above. Capture details like time and location, please capture them as well.\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt},\n",
"]\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
"\n",
"# Step 4: print the result\n",
"\n",
"display(Markdown(response.choices[0].message.content))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

276
week1/community-contributions/day1-youtube-video-summarization.ipynb

@ -0,0 +1,276 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "1b6fe0c1-931e-4194-bcfe-0716d8f75b50",
"metadata": {},
"source": [
"# Youtube Video Summarization\n",
"\n",
"## My First Frontier LLM Project!\n",
"\n",
"Welcome to my first LLM-based project! The goal of this project is to leverage large language models (LLMs) to summarize YouTube videos. Currently, it only supports English transcriptions, so instead of watching the entire video, you can simply read the summary!\n",
"\n",
"## Important Note\n",
"Be mindful when testing with longer videos, as they may consume significant resources and could lead to high costs on your ChatGPT bill.\n",
"You can switch to Ollama for free usage if you're looking to reduce costs.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"!pip install youtube-transcript-api openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a082ddaf-abf5-4e6c-8112-74846c768301",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"\n",
"from openai import OpenAI\n",
"from youtube_transcript_api import YouTubeTranscriptApi\n",
"import re\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
"source": [
"class YoutubeVideoID:\n",
" def __init__(self, url):\n",
" self.url = url\n",
" self.video_id = self.extract_video_id(url)\n",
"\n",
" def extract_video_id(self, url):\n",
" \"\"\"\n",
" Extracts the YouTube video ID from a given URL.\n",
" Supports both regular and shortened URLs.\n",
" \"\"\"\n",
" # Regular expression to match YouTube video URL and extract the video ID\n",
" regex = r\"(?:https?:\\/\\/)?(?:www\\.)?(?:youtube\\.com\\/(?:[^\\/\\n\\s]+\\/\\S+\\/|\\S*\\?v=)|(?:youtu\\.be\\/))([a-zA-Z0-9_-]{11})\"\n",
" match = re.match(regex, url)\n",
" \n",
" if match:\n",
" return match.group(1)\n",
" else:\n",
" raise ValueError(\"Invalid YouTube URL\")\n",
"\n",
" def __str__(self):\n",
" return f\"Video ID: {self.video_id}\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [],
"source": [
"# Example usage\n",
"video_url = \"https://www.youtube.com/watch?v=kqaMIFEz15s\"\n",
"\n",
"yt_video = YoutubeVideoID(video_url)\n",
"print(yt_video)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f724be3c-bdeb-4079-b4be-f12608144484",
"metadata": {},
"outputs": [],
"source": [
"def get_transcript(video_id, language='en'):\n",
" try:\n",
" # Try to get the transcript in the desired language (Indonesian by default)\n",
" transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[language])\n",
" # Join all the 'text' fields into a single string\n",
" return \" \".join([item['text'] for item in transcript])\n",
" except Exception as e:\n",
" print(f\"Error fetching transcript: {e}\")\n",
" return None\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "12e302fa-f564-4ec6-a08f-b3b3ce549396",
"metadata": {},
"outputs": [],
"source": [
"# Fetch transcript using the video ID\n",
"transcript_text = get_transcript(yt_video.video_id)\n",
"print(len(transcript_text))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a0750be-88a1-4e65-9cb8-a0a2f11eecdf",
"metadata": {},
"outputs": [],
"source": [
"# Function to summarize text using ChatGPT\n",
"def summarize_text(text):\n",
" try:\n",
" system_prompts = \"\"\"\n",
" You are a helpful assistant who provides concise and accurate summaries of text. Your task is to:\n",
" \n",
" - Capture the key points of the content.\n",
" - Keep the summary brief and easy to understand.\n",
" - Avoid summarizing overly lengthy texts or breaking them into excessively short summaries.\n",
" - Use bullet points where appropriate to enhance clarity and structure.\n",
" \"\"\"\n",
" response = openai.chat.completions.create(\n",
" model=\"gpt-4o-mini\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompts},\n",
" {\"role\": \"user\", \"content\": f\"Summarize the following text:\\n{text}\"}\n",
" ],\n",
" max_tokens=200\n",
" )\n",
" return response.choices[0].message.content\n",
" except Exception as e:\n",
" print(f\"Error summarizing text: {e}\")\n",
" return None"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad646bc4-a11a-4c44-b941-54befdbf9bc6",
"metadata": {},
"outputs": [],
"source": [
"def split_text(text, chunk_size=3000):\n",
" \"\"\"\n",
" Splits large text into smaller chunks based on the given chunk size.\n",
" Ensures that chunks end with a full stop where possible to maintain sentence integrity.\n",
" \n",
" :param text: str, the text to be split\n",
" :param chunk_size: int, maximum size of each chunk (default 3000 characters)\n",
" :return: list of str, where each str is a chunk of text\n",
" \"\"\"\n",
" chunks = []\n",
" while len(text) > chunk_size:\n",
" # Find the last full stop within or at the chunk size\n",
" split_point = text.rfind('.', 0, chunk_size + 1) # +1 to include the period itself if it's at chunk_size\n",
" if split_point == -1: # No period found within the chunk size\n",
" split_point = chunk_size\n",
" \n",
" # Append the chunk, ensuring we don't strip spaces that might be part of the sentence structure\n",
" chunks.append(text[:split_point + 1] if split_point != chunk_size else text[:chunk_size])\n",
" text = text[split_point + 1:] if split_point != chunk_size else text[chunk_size:]\n",
" \n",
" # Add the remaining text as the final chunk, only strip if there's content\n",
" if text:\n",
" chunks.append(text.strip())\n",
" \n",
" return chunks\n",
"\n",
"transcript_chunks = split_text(transcript_text)\n",
"\n",
"# Now you can summarize each chunk individually\n",
"summaries = []\n",
"for chunk in transcript_chunks:\n",
" summary = summarize_text(chunk)\n",
" summaries.append(summary)\n",
"\n",
"\n",
"# Combine the individual summaries into one\n",
"full_summary = \" \".join(summaries)\n",
"display(Markdown(full_summary))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b266fdc-da31-4d79-8982-be77f03be59f",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "792c814d-73f8-4c1e-a0bb-b654b40e4d8b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

130
week2/community-contributions/Gemini-api.ipynb

@ -0,0 +1,130 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 13,
"id": "147ce61d-b10e-478e-8300-2fb3101f617c",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "2dab29c1-3a8d-45bc-9f45-407419449ba9",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5fb5b749-d84b-4f8c-bfb9-2f5c4e8a2daa",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv()\n",
"# openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"# anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "d34ee171-2647-47cf-9336-2d016480656f",
"metadata": {},
"outputs": [],
"source": [
"google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "856212a1-d07a-400b-9cef-a198e22f26ac",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that is great at telling jokes\"\n",
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\""
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "47289056-cc2b-4d2d-9e18-ecd65c0f3232",
"metadata": {},
"outputs": [],
"source": [
"prompts = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6d331aaf-162b-499e-af7e-5e097e84f1bd",
"metadata": {},
"outputs": [],
"source": [
"# The API for Gemini has a slightly different structure.\n",
"# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n",
"# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n",
"\n",
"gemini = google.generativeai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" system_instruction=system_message\n",
")\n",
"response = gemini.generate_content(user_prompt)\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b727ee91-92b8-4d62-9a03-1b85a76b905c",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

393
week2/community-contributions/day2-gemini.ipynb

@ -0,0 +1,393 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "1b89f103-fc49-487e-930e-14abff8bfab1",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1a40e64b-14c6-4589-a671-6817f9cb09f0",
"metadata": {},
"outputs": [],
"source": [
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "c0990b15-313d-4cf8-bc5b-fc14d263ba27",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "66a38e1f-db7e-4697-aa9c-a303f9828531",
"metadata": {},
"outputs": [],
"source": [
"google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "beb9606e-9be9-4f2e-adfe-4e41fb99566e",
"metadata": {},
"outputs": [],
"source": [
"# A generic system message - no more snarky adversarial AIs!\n",
"\n",
"system_message = \"You are a helpful assistant\""
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "19ab23bc-59cf-48a3-8651-f7b1c52874db",
"metadata": {},
"outputs": [],
"source": [
"def message_gemini(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" gemini = google.generativeai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" system_instruction=system_message\n",
")\n",
" response = gemini.generate_content(prompt)\n",
" return response.text\n",
"\n",
"\n",
"# gemini = google.generativeai.GenerativeModel(\n",
"# model_name='gemini-1.5-flash',\n",
"# system_instruction=system_message\n",
"# )\n",
"# response = gemini.generate_content(user_prompt)\n",
"# print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8fe3c66c-d25d-4627-a401-d84c7d6613e7",
"metadata": {},
"outputs": [],
"source": [
"message_gemini(\"What is today's date?\")\n",
"# message_gemini(\"tell me a funny machine learning joke\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b27027ed-4bff-493c-a41e-8318003e0387",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai as genai\n",
"for model in genai.list_models():\n",
" print(model.name)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "2f82d61b-a7cd-4bee-994d-2e83d0a01bfc",
"metadata": {},
"outputs": [],
"source": [
"# here's a simple function\n",
"\n",
"def shout(text):\n",
" print(f\"Shout has been called with input {text}\")\n",
" return text.upper()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5941fe3f-aab9-47ba-b29f-d99aa3b40aed",
"metadata": {},
"outputs": [],
"source": [
"shout(\"hello\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d6470847-1cce-4bf0-8364-199504a5335f",
"metadata": {},
"outputs": [],
"source": [
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n",
"\n",
"force_dark_mode = \"\"\"\n",
"function refresh() {\n",
" const url = new URL(window.location);\n",
" if (url.searchParams.get('__theme') !== 'dark') {\n",
" url.searchParams.set('__theme', 'dark');\n",
" window.location.href = url.href;\n",
" }\n",
"}\n",
"\"\"\"\n",
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\", js=force_dark_mode).launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "69715604-cc64-4563-967f-b5720462ac69",
"metadata": {},
"outputs": [],
"source": [
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", js=force_dark_mode).launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dede1d8c-fb7a-456a-923b-e221eaa30bd9",
"metadata": {},
"outputs": [],
"source": [
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\").launch(share=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "55ae11b9-e7af-449f-b737-48dd7dc1a5b2",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=shout,\n",
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n",
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cba667cf-d270-426e-b940-a01083352ecb",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=message_gemini,\n",
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n",
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b8bb7885-740f-41f0-95e3-dabe864cea14",
"metadata": {},
"outputs": [],
"source": [
"# Let's use Markdown\n",
"# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n",
"# I'm taking advantage of system_message being a global variable, used back in the message_gpt function (go take a look)\n",
"# Not a great software engineering practice, but quite sommon during Jupyter Lab R&D!\n",
"\n",
"system_message = \"You are a helpful assistant that responds in markdown\"\n",
"\n",
"view = gr.Interface(\n",
" fn=message_gemini,\n",
" inputs=[gr.Textbox(label=\"Your message:\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "43d17b00-f4bc-45ad-a679-3112a170f5fb",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai as genai\n",
"\n",
"def stream_gemini(prompt):\n",
" gemini = genai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" safety_settings=None,\n",
" system_instruction=system_message\n",
" )\n",
"\n",
" response = gemini.generate_content(prompt, safety_settings=[\n",
" {\"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_HATE_SPEECH\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_HARASSMENT\", \"threshold\": \"BLOCK_NONE\"}], stream=True)\n",
" \n",
" result = \"\"\n",
" for chunk in response:\n",
" result += chunk.text\n",
" yield result\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "840f3d11-e66b-4b6b-9b98-70e0f02be9e6",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_gemini,\n",
" inputs=[gr.Textbox(label=\"Your message:\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "markdown",
"id": "ea8a0081-8d2e-4960-b479-7c1ef346f524",
"metadata": {},
"source": [
"# Building a company brochure generator\n",
"\n",
"Now you know how - it's simple!"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "2d43360a-515e-4008-9eef-7a3c4e47cfba",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "08a07e55-b05d-4360-8e05-61dd39cc019b",
"metadata": {},
"outputs": [],
"source": [
"def stream_brochure(company_name, url, model, response_tone):\n",
" prompt = f\"Please generate a {response_tone} company brochure for {company_name}. Here is their landing page:\\n\"\n",
" prompt += Website(url).get_contents()\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Gemini\":\n",
" result = stream_gemini(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d9554211-c832-4558-90c8-fceab95fd23c",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\"),\n",
" gr.Dropdown([\"Informational\", \"Promotional\", \"Humorous\"], label=\"Select tone\")],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4d4e6efd-66e8-4388-bfc3-782bde4babfb",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

310
week2/community-contributions/day3-gemini.ipynb

@ -0,0 +1,310 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "75e2ef28-594f-4c18-9d22-c6b8cd40ead2",
"metadata": {},
"source": [
"# Day 3 - Conversational AI - aka Chatbot!"
]
},
{
"cell_type": "code",
"execution_count": 40,
"id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"# import anthropic\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 41,
"id": "231605aa-fccb-447e-89cf-8b187444536a",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb",
"metadata": {},
"outputs": [],
"source": [
"google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e16839b5-c03b-4d9d-add6-87a0f6f37575",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba2123e7-77ed-43b4-8c37-03658fb42b78",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that is great at telling jokes\"\n",
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"\n",
"\n",
"prompts = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]\n",
"\n",
"# The API for Gemini has a slightly different structure.\n",
"# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n",
"# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n",
"\n",
"gemini = google.generativeai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" system_instruction=system_message\n",
")\n",
"response = gemini.generate_content(user_prompt)\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "7b933ff3",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai as genai\n",
"\n",
"model = genai.GenerativeModel('gemini-1.5-flash')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "91578b16",
"metadata": {},
"outputs": [],
"source": [
"chat = model.start_chat(history=[])\n",
"response = chat.send_message('Hello! My name is Shardul.')\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7c4bc38f",
"metadata": {},
"outputs": [],
"source": [
"response = chat.send_message('Can you tell something interesting about star wars?')\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "337bee91",
"metadata": {},
"outputs": [],
"source": [
"response = chat.send_message('Do you remember what my name is?')\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bcaf4d95",
"metadata": {},
"outputs": [],
"source": [
"chat.history"
]
},
{
"cell_type": "markdown",
"id": "98e97227-f162-4d1a-a0b2-345ff248cbe7",
"metadata": {},
"source": [
"# Please read this! A change from the video:\n",
"\n",
"In the video, I explain how we now need to write a function called:\n",
"\n",
"`chat(message, history)`\n",
"\n",
"Which expects to receive `history` in a particular format, which we need to map to the OpenAI format before we call OpenAI:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
" {\"role\": \"user\", \"content\": \"first user prompt here\"},\n",
" {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n",
" {\"role\": \"user\", \"content\": \"the new user prompt\"},\n",
"]\n",
"```\n",
"\n",
"But Gradio has been upgraded! Now it will pass in `history` in the exact OpenAI format, perfect for us to send straight to OpenAI.\n",
"\n",
"So our work just got easier!\n",
"\n",
"We will write a function `chat(message, history)` where: \n",
"**message** is the prompt to use \n",
"**history** is the past conversation, in OpenAI format \n",
"\n",
"We will combine the system message, history and latest message, then call OpenAI."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" relevant_system_message = system_message\n",
" if 'belt' in message:\n",
" relevant_system_message += \" The store does not sell belts; if you are asked for belts, be sure to point out other items on sale.\"\n",
" \n",
" messages = [{\"role\": \"system\", \"content\": relevant_system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
"\n",
" stream = gemini.generate_content(message, safety_settings=[\n",
" {\"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_HATE_SPEECH\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_HARASSMENT\", \"threshold\": \"BLOCK_NONE\"}], stream=True)\n",
"\n",
" response = \"\"\n",
" for chunk in stream:\n",
" print(chunk) # Print the chunk to understand its structure\n",
" # Adjust the following line based on the actual structure of the chunk\n",
" response += chunk.get('content', '') or ''\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6e745e1",
"metadata": {},
"outputs": [],
"source": [
"chat_model = genai.GenerativeModel('gemini-1.5-flash')\n",
"chat = chat_model.start_chat()\n",
"\n",
"msg = \"what is gen ai\"\n",
"stream = chat.send_message(msg, stream=True)\n",
"# print(\"Response:\", stream.text)\n",
"for chunk in stream:\n",
" print(chunk.text)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dce941ee",
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"\n",
"chat = model.start_chat(history=[])\n",
"\n",
"# Transform Gradio history to Gemini format\n",
"def transform_history(history):\n",
" new_history = []\n",
" for chat in history:\n",
" new_history.append({\"parts\": [{\"text\": chat[0]}], \"role\": \"user\"})\n",
" new_history.append({\"parts\": [{\"text\": chat[1]}], \"role\": \"model\"})\n",
" return new_history\n",
"\n",
"def response(message, history):\n",
" global chat\n",
" # The history will be the same as in Gradio, the 'Undo' and 'Clear' buttons will work correctly.\n",
" chat.history = transform_history(history)\n",
" response = chat.send_message(message)\n",
" response.resolve()\n",
"\n",
" # Each character of the answer is displayed\n",
" for i in range(len(response.text)):\n",
" time.sleep(0.01)\n",
" yield response.text[: i+1]\n",
"\n",
"gr.ChatInterface(response,\n",
" textbox=gr.Textbox(placeholder=\"Question to Gemini\")).launch(debug=True)"
]
},
{
"cell_type": "markdown",
"id": "82a57ee0-b945-48a7-a024-01b56a5d4b3e",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business Applications</h2>\n",
" <span style=\"color:#181;\">Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n",
"<br/><br/>\n",
"Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6dfb9e21-df67-4c2b-b952-5e7e7961b03d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "llms",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

762
week2/community-contributions/day5.ipynb

@ -0,0 +1,762 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec",
"metadata": {},
"source": [
"# Project - Airline AI Assistant\n",
"\n",
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [],
"source": [
"# Initialization\n",
"\n",
"load_dotenv()\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"MODEL = \"gpt-4o-mini\"\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6",
"metadata": {},
"outputs": [],
"source": [
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n",
"\n",
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" return response.choices[0].message.content\n",
"\n",
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "markdown",
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n",
"\n",
"With tools, you can write a function, and have the LLM call that function as part of its response.\n",
"\n",
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n",
"\n",
"Well, kinda."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2",
"metadata": {},
"outputs": [],
"source": [
"# Let's start by making a useful function\n",
"\n",
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n",
"\n",
"def get_ticket_price(destination_city):\n",
" print(f\"Tool get_ticket_price called for {destination_city}\")\n",
" city = destination_city.lower()\n",
" return ticket_prices.get(city, \"Unknown\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85",
"metadata": {},
"outputs": [],
"source": [
"get_ticket_price(\"London\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344",
"metadata": {},
"outputs": [],
"source": [
"# There's a particular dictionary structure that's required to describe our function:\n",
"\n",
"price_function = {\n",
" \"name\": \"get_ticket_price\",\n",
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"\n",
"tools = [{\"type\": \"function\", \"function\": price_function}]"
]
},
{
"cell_type": "markdown",
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340",
"metadata": {},
"source": [
"## Getting OpenAI to use our Tool\n",
"\n",
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n",
"\n",
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n",
"\n",
"Here's how the new chat function looks:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
"\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" response, city = handle_tool_call(message)\n",
" messages.append(message)\n",
" messages.append(response)\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" \n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0992986-ea09-4912-a076-8e5603ee631f",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call:\n",
"\n",
"def handle_tool_call(message):\n",
"\n",
" available_functions = {\n",
" \"get_ticket_price\": get_ticket_price,\n",
" }\n",
"\n",
" tool_call = message.tool_calls[0]\n",
" function_to_call = available_functions.get(tool_call.function.name)\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" city = arguments.get('destination_city')\n",
" price = function_to_call(city)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" return response, city"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "markdown",
"id": "473e5b39-da8f-4db1-83ae-dbaca2e9531e",
"metadata": {},
"source": [
"# Let's go multi-modal!!\n",
"\n",
"We can use DALL-E-3, the image generation model behind GPT-4o, to make us some images\n",
"\n",
"Let's put this in a function called artist.\n",
"\n",
"### Price alert: each time I generate an image it costs about 4 cents - don't go crazy with images!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2c27c4ba-8ed5-492f-add1-02ce9c81d34c",
"metadata": {},
"outputs": [],
"source": [
"# Some imports for handling images\n",
"\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "773a9f11-557e-43c9-ad50-56cbec3a0f8f",
"metadata": {},
"outputs": [],
"source": [
"def artist(city):\n",
" image_response = openai.images.generate(\n",
" model=\"dall-e-3\",\n",
" prompt=f\"An image representing a vacation in {city}, showing tourist spots and everything unique about {city}, in a vibrant pop-art style\",\n",
" size=\"1024x1024\",\n",
" n=1,\n",
" response_format=\"b64_json\",\n",
" )\n",
" image_base64 = image_response.data[0].b64_json\n",
" image_data = base64.b64decode(image_base64)\n",
" return Image.open(BytesIO(image_data))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d877c453-e7fb-482a-88aa-1a03f976b9e9",
"metadata": {},
"outputs": [],
"source": [
"image = artist(\"New York City\")\n",
"display(image)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "728a12c5-adc3-415d-bb05-82beb73b079b",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "f4975b87-19e9-4ade-a232-9b809ec75c9a",
"metadata": {},
"source": [
"## Audio (NOTE - Audio is optional for this course - feel free to skip Audio if it causes trouble!)\n",
"\n",
"And let's make a function talker that uses OpenAI's speech model to generate Audio\n",
"\n",
"### Troubleshooting Audio issues\n",
"\n",
"If you have any problems running this code below (like a FileNotFound error, or a warning of a missing package), you may need to install FFmpeg, a very popular audio utility.\n",
"\n",
"**For PC Users**\n",
"\n",
"Detailed instructions are [here](https://chatgpt.com/share/6724efee-6b0c-8012-ac5e-72e2e3885905) and summary instructions:\n",
"\n",
"1. Download FFmpeg from the official website: https://ffmpeg.org/download.html\n",
"\n",
"2. Extract the downloaded files to a location on your computer (e.g., `C:\\ffmpeg`)\n",
"\n",
"3. Add the FFmpeg bin folder to your system PATH:\n",
"- Right-click on 'This PC' or 'My Computer' and select 'Properties'\n",
"- Click on 'Advanced system settings'\n",
"- Click on 'Environment Variables'\n",
"- Under 'System variables', find and edit 'Path'\n",
"- Add a new entry with the path to your FFmpeg bin folder (e.g., `C:\\ffmpeg\\bin`)\n",
"- Restart your command prompt, and within Jupyter Lab do Kernel -> Restart kernel, to pick up the changes\n",
"\n",
"4. Open a new command prompt and run this to make sure it's installed OK\n",
"`ffmpeg -version`\n",
"\n",
"**For Mac Users**\n",
"\n",
"1. Install homebrew if you don't have it already by running this in a Terminal window and following any instructions: \n",
"`/bin/bash -c \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"`\n",
"\n",
"2. Then install FFmpeg with `brew install ffmpeg`\n",
"\n",
"3. Verify your installation with `ffmpeg -version` and if everything is good, within Jupyter Lab do Kernel -> Restart kernel to pick up the changes\n",
"\n",
"Message me or email me at ed@edwarddonner.com with any problems!"
]
},
{
"cell_type": "markdown",
"id": "4cc90e80-c96e-4dd4-b9d6-386fe2b7e797",
"metadata": {},
"source": [
"## To check you now have ffmpeg and can access it here\n",
"\n",
"Excecute the next cell to see if you get a version number. (Putting an exclamation mark before something in Jupyter Lab tells it to run it as a terminal command rather than python code).\n",
"\n",
"If this doesn't work, you may need to actually save and close down your Jupyter lab, and start it again from a new Terminal window (Mac) or Anaconda prompt (PC), remembering to activate the llms environment. This ensures you pick up ffmpeg.\n",
"\n",
"And if that doesn't work, please contact me!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b3be0fb-1d34-4693-ab6f-dbff190afcd7",
"metadata": {},
"outputs": [],
"source": [
"!ffmpeg -version\n",
"!ffprobe -version\n",
"!ffplay -version"
]
},
{
"cell_type": "markdown",
"id": "d91d3f8f-e505-4e3c-a87c-9e42ed823db6",
"metadata": {},
"source": [
"# For Mac users - and possibly many PC users too\n",
"\n",
"This version should work fine for you. It might work for Windows users too, but you might get a Permissions error writing to a temp file. If so, see the next section!\n",
"\n",
"As always, if you have problems, please contact me! (You could also comment out the audio talker() in the later code if you're less interested in audio generation)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ffbfe93b-5e86-4e68-ba71-b301cd5230db",
"metadata": {},
"outputs": [],
"source": [
"from pydub import AudioSegment\n",
"from pydub.playback import play\n",
"\n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\", # Also, try replacing onyx with alloy\n",
" input=message\n",
" )\n",
" \n",
" audio_stream = BytesIO(response.content)\n",
" audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n",
" play(audio)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b88d775d-d357-4292-a1ad-5dc5ed567281",
"metadata": {},
"outputs": [],
"source": [
"talker(\"Well, hi there\")"
]
},
{
"cell_type": "markdown",
"id": "ad89a9bd-bb1e-4bbb-a49a-83af5f500c24",
"metadata": {},
"source": [
"# For Windows users (or any Mac users with problems above)\n",
"\n",
"## First try the Mac version above, but if you get a permissions error writing to a temp file, then this code should work instead.\n",
"\n",
"A collaboration between students Mark M. and Patrick H. and Claude got this resolved!\n",
"\n",
"Below are 4 variations - hopefully one of them will work on your PC. If not, message me please!\n",
"\n",
"And for Mac people - all 3 of the below work on my Mac too - please try these if the Mac version gave you problems.\n",
"\n",
"## PC Variation 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d104b96a-02ca-4159-82fe-88e0452aa479",
"metadata": {},
"outputs": [],
"source": [
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image\n",
"from IPython.display import Audio, display\n",
"\n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\",\n",
" input=message)\n",
"\n",
" audio_stream = BytesIO(response.content)\n",
" output_filename = \"output_audio.mp3\"\n",
" with open(output_filename, \"wb\") as f:\n",
" f.write(audio_stream.read())\n",
"\n",
" # Play the generated audio\n",
" display(Audio(output_filename, autoplay=True))\n",
"\n",
"talker(\"Well, hi there\")"
]
},
{
"cell_type": "markdown",
"id": "3a5d11f4-bbd3-43a1-904d-f684eb5f3e3a",
"metadata": {},
"source": [
"## PC Variation 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d59c8ebd-79c5-498a-bdf2-3a1c50d91aa0",
"metadata": {},
"outputs": [],
"source": [
"import tempfile\n",
"import subprocess\n",
"from io import BytesIO\n",
"from pydub import AudioSegment\n",
"import time\n",
"\n",
"def play_audio(audio_segment):\n",
" temp_dir = tempfile.gettempdir()\n",
" temp_path = os.path.join(temp_dir, \"temp_audio.wav\")\n",
" try:\n",
" audio_segment.export(temp_path, format=\"wav\")\n",
" time.sleep(3) # Student Dominic found that this was needed. You could also try commenting out to see if not needed on your PC\n",
" subprocess.call([\n",
" \"ffplay\",\n",
" \"-nodisp\",\n",
" \"-autoexit\",\n",
" \"-hide_banner\",\n",
" temp_path\n",
" ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n",
" finally:\n",
" try:\n",
" os.remove(temp_path)\n",
" except Exception:\n",
" pass\n",
" \n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\", # Also, try replacing onyx with alloy\n",
" input=message\n",
" )\n",
" audio_stream = BytesIO(response.content)\n",
" audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n",
" play_audio(audio)\n",
"\n",
"talker(\"Well hi there\")"
]
},
{
"cell_type": "markdown",
"id": "96f90e35-f71e-468e-afea-07b98f74dbcf",
"metadata": {},
"source": [
"## PC Variation 3"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8597c7f8-7b50-44ad-9b31-db12375cd57b",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from pydub import AudioSegment\n",
"from pydub.playback import play\n",
"from io import BytesIO\n",
"\n",
"def talker(message):\n",
" # Set a custom directory for temporary files on Windows\n",
" custom_temp_dir = os.path.expanduser(\"~/Documents/temp_audio\")\n",
" os.environ['TEMP'] = custom_temp_dir # You can also use 'TMP' if necessary\n",
" \n",
" # Create the folder if it doesn't exist\n",
" if not os.path.exists(custom_temp_dir):\n",
" os.makedirs(custom_temp_dir)\n",
" \n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\", # Also, try replacing onyx with alloy\n",
" input=message\n",
" )\n",
" \n",
" audio_stream = BytesIO(response.content)\n",
" audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n",
"\n",
" play(audio)\n",
"\n",
"talker(\"Well hi there\")"
]
},
{
"cell_type": "markdown",
"id": "e821224c-b069-4f9b-9535-c15fdb0e411c",
"metadata": {},
"source": [
"## PC Variation 4\n",
"\n",
"### Let's try a completely different sound library\n",
"\n",
"First run the next cell to install a new library, then try the cell below it."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "69d3c0d9-afcc-49e3-b829-9c9869d8b472",
"metadata": {},
"outputs": [],
"source": [
"!pip install simpleaudio"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "28f9cc99-36b7-4554-b3f4-f2012f614a13",
"metadata": {},
"outputs": [],
"source": [
"from pydub import AudioSegment\n",
"from io import BytesIO\n",
"import tempfile\n",
"import os\n",
"import simpleaudio as sa\n",
"\n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\", # Also, try replacing onyx with alloy\n",
" input=message\n",
" )\n",
" \n",
" audio_stream = BytesIO(response.content)\n",
" audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n",
"\n",
" # Create a temporary file in a folder where you have write permissions\n",
" with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False, dir=os.path.expanduser(\"~/Documents\")) as temp_audio_file:\n",
" temp_file_name = temp_audio_file.name\n",
" audio.export(temp_file_name, format=\"wav\")\n",
" \n",
" # Load and play audio using simpleaudio\n",
" wave_obj = sa.WaveObject.from_wave_file(temp_file_name)\n",
" play_obj = wave_obj.play()\n",
" play_obj.wait_done() # Wait for playback to finish\n",
"\n",
" # Clean up the temporary file afterward\n",
" os.remove(temp_file_name)\n",
" \n",
"talker(\"Well hi there\")"
]
},
{
"cell_type": "markdown",
"id": "7986176b-cd04-495f-a47f-e057b0e462ed",
"metadata": {},
"source": [
"## PC Users - if none of those 4 variations worked!\n",
"\n",
"Please get in touch with me. I'm sorry this is causing problems! We'll figure it out.\n",
"\n",
"Alternatively: playing audio from your PC isn't super-critical for this course, and you can feel free to focus on image generation and skip audio for now, or come back to it later."
]
},
{
"cell_type": "markdown",
"id": "1d48876d-c4fa-46a8-a04f-f9fadf61fb0d",
"metadata": {},
"source": [
"# Our Agent Framework\n",
"\n",
"The term 'Agentic AI' and Agentization is an umbrella term that refers to a number of techniques, such as:\n",
"\n",
"1. Breaking a complex problem into smaller steps, with multiple LLMs carrying out specialized tasks\n",
"2. The ability for LLMs to use Tools to give them additional capabilities\n",
"3. The 'Agent Environment' which allows Agents to collaborate\n",
"4. An LLM can act as the Planner, dividing bigger tasks into smaller ones for the specialists\n",
"5. The concept of an Agent having autonomy / agency, beyond just responding to a prompt - such as Memory\n",
"\n",
"We're showing 1 and 2 here, and to a lesser extent 3 and 5. In week 8 we will do the lot!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba820c95-02f5-499e-8f3c-8727ee0a6c0c",
"metadata": {},
"outputs": [],
"source": [
"def chat(history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
" image = None\n",
" \n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" response, city = handle_tool_call(message)\n",
" messages.append(message)\n",
" messages.append(response)\n",
" image = artist(city)\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" \n",
" reply = response.choices[0].message.content\n",
" history += [{\"role\":\"assistant\", \"content\":reply}]\n",
"\n",
" # Comment out or delete the next line if you'd rather skip Audio for now..\n",
" talker(reply)\n",
" \n",
" return history, image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f38d0d27-33bf-4992-a2e5-5dbed973cde7",
"metadata": {},
"outputs": [],
"source": [
"# More involved Gradio code as we're not using the preset Chat interface!\n",
"# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n",
"\n",
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n",
" image_output = gr.Image(height=500)\n",
" with gr.Row():\n",
" entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n",
" with gr.Row():\n",
" clear = gr.Button(\"Clear\")\n",
"\n",
" def do_entry(message, history):\n",
" history += [{\"role\":\"user\", \"content\":message}]\n",
" return \"\", history\n",
"\n",
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
" chat, inputs=chatbot, outputs=[chatbot, image_output]\n",
" )\n",
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "markdown",
"id": "226643d2-73e4-4252-935d-86b8019e278a",
"metadata": {},
"source": [
"# Exercises and Business Applications\n",
"\n",
"Add in more tools - perhaps to simulate actually booking a flight. A student has done this and provided their example in the community contributions folder.\n",
"\n",
"Next: take this and apply it to your business. Make a multi-modal AI assistant with tools that could carry out an activity for your work. A customer support assistant? New employee onboarding assistant? So many possibilities! Also, see the week2 end of week Exercise in the separate Notebook."
]
},
{
"cell_type": "markdown",
"id": "7e795560-1867-42db-a256-a23b844e6fbe",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../thankyou.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#090;\">I have a special request for you</h2>\n",
" <span style=\"color:#090;\">\n",
" My editor tells me that it makes a HUGE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n",
" </span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

444
week2/community-contributions/day5_llama3.1_tools_usecase.ipynb

@ -0,0 +1,444 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Code tested in google colab with T4 GPU"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "bK-WkZmI_L4S"
},
"outputs": [],
"source": [
"!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Anm0jUBC_dQF"
},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"from google.colab import drive\n",
"from huggingface_hub import login\n",
"from google.colab import userdata\n",
"from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n",
"import torch\n",
"import json"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "GsKyLRam_hpi"
},
"outputs": [],
"source": [
"LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "AWo-zjJJAIIF"
},
"outputs": [],
"source": [
"hf_token = userdata.get('HF_TOKEN')\n",
"login(hf_token, add_to_git_credential=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "TSeMKC4S7Ip6"
},
"outputs": [],
"source": [
"# Utility Functions ::\n",
"\n",
"def get_tokenizer(model_name):\n",
" \"\"\"\n",
" Args:\n",
" model_name: LLM model name\n",
"\n",
" Returns:\n",
" Tokenizer\n",
" \"\"\"\n",
" tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
" tokenizer.pad_token = tokenizer.eos_token\n",
"\n",
" return tokenizer\n",
"\n",
"\n",
"def get_model(model_name):\n",
" \"\"\"\n",
" Args:\n",
" model_name: LLM model name\n",
"\n",
" Returns:\n",
" Model\n",
" \"\"\"\n",
" quant_config = BitsAndBytesConfig(\n",
" load_in_4bit=True,\n",
" bnb_4bit_use_double_quant=True,\n",
" bnb_4bit_compute_dtype=torch.bfloat16,\n",
" bnb_4bit_quant_type=\"nf4\"\n",
" )\n",
"\n",
" model = AutoModelForCausalLM.from_pretrained(model_name, device_map=\"auto\", quantization_config=quant_config)\n",
"\n",
" return model\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "O_mUtibSJFWt"
},
"outputs": [],
"source": [
"tokenizer = get_tokenizer(LLAMA)\n",
"model = get_model(LLAMA)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Mc0RE4XM_2hW"
},
"outputs": [],
"source": [
"\n",
"def parse_response(inputs, outputs):\n",
" \"\"\"\n",
" Args:\n",
" inputs: Input Tokens, pt tensors\n",
" outputs: Model generated output token\n",
"\n",
" Returns:\n",
" response\n",
" \"\"\"\n",
" full_input =(tokenizer.batch_decode(inputs, skip_special_tokens=True)[0])\n",
" full_outputs=tokenizer.batch_decode(outputs, skip_special_tokens=True)\n",
" response = full_outputs[0][len(full_input):]\n",
"\n",
" return response\n",
"\n",
"# Tools Functions:\n",
"\n",
"def get_tools_response(tools_params):\n",
" function_name = tools_params['name']\n",
" arguments = tools_params['arguments']\n",
" location = arguments['location']\n",
"\n",
" if function_name == 'get_current_temperature':\n",
" response = get_current_temperature(location)\n",
" return response\n",
" elif function_name == 'get_current_wind_speed':\n",
" response = get_current_wind_speed(location)\n",
" return response\n",
"\n",
"\n",
"def get_current_temperature(location: str) -> float:\n",
" \"\"\"\n",
" Get the current temperature at a location.\n",
"\n",
" Args:\n",
" location: The location to get the temperature for, in the format \"City, Country\"\n",
" Returns:\n",
" The current temperature at the specified location in the specified units, as a float.\n",
" \"\"\"\n",
" if location == \"Bangalore, India\": \n",
" return 22 # for testing purpose, please replace with your logic\n",
"\n",
"\n",
"def get_current_wind_speed(location: str) -> str:\n",
" \"\"\"\n",
" Get the current wind speed at a given location.\n",
"\n",
" Args:\n",
" location: The location to get the wind speed for, in the format \"City, Country\"\n",
" Returns:\n",
" The current wind speed at the given location in the specified units, as a string.\n",
" \"\"\"\n",
" return 6 # for testing purpose, please replace with your logic\n",
"\n",
"\n",
"def get_llm_response(message):\n",
"\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": \"\"\"\n",
" You are an helpful assistant that responds to weather and wind queries. Please provide accurate answers.\n",
" Follow instructions:\n",
" 1. Please provide courteous answers.\n",
" 2. If you do not know the answer say so.\n",
" 3. Do not provide any explanations or suggestions.\n",
" 4. Provide the response in no more than 1 sentence.\n",
" \"\"\"\n",
" },\n",
" ]\n",
" message = {\"role\": \"user\", \"content\": message}\n",
"\n",
" messages.append(message)\n",
"\n",
"\n",
" inputs = tokenizer.apply_chat_template(messages, tools=[get_current_temperature, get_current_wind_speed], add_generation_prompt=True, return_tensors=\"pt\").to(\"cuda\")\n",
"\n",
" #streamer = TextStreamer(tokenizer) # include to check the output of model, only for debug\n",
" outputs = model.generate(inputs, max_new_tokens=2000 )#, streamer=streamer ) # , include to check the output of model, only for debug\n",
"\n",
" model_response = parse_response(inputs, outputs)\n",
"\n",
" tool_call = json.loads(model_response.replace(\"parameters\", \"arguments\"))\n",
" messages.append({\"role\": \"assistant\", \"tool_calls\": [{\"type\": \"function\", \"function\": tool_call}]})\n",
"\n",
" response = get_tools_response(tool_call)\n",
"\n",
" messages.append({\"role\": \"tool\", \"name\": tool_call[\"name\"], \"content\": response})\n",
"\n",
" inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors=\"pt\").to(\"cuda\")\n",
" outputs = model.generate(inputs, max_new_tokens=200)\n",
"\n",
" response = parse_response(inputs, outputs)\n",
"\n",
" return response\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "AKGLvmnsCrnk",
"outputId": "f3e5448a-76a5-47a6-a5e8-491ef25b27c2"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n",
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"name\": \"get_current_wind_speed\", \"parameters\": {\"location\": \"Bangalore, India\"}}\n",
"6\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"The current wind speed in Bangalore is approximately 6 km/h.\n",
"####################################################################################################\n",
"Query: what is the wind speed in Bangalore?\n",
"Response: The current wind speed in Bangalore is approximately 6 km/h.\n",
"\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"name\": \"get_current_temperature\", \"parameters\": {\"location\": \"Bangalore, India\"}}\n",
"22\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I don't have real-time information, but Bangalore's average temperature is around 22 degrees Celsius.\n",
"####################################################################################################\n",
"Query: what is the temperature of Bangalore?\n",
"Response: I don't have real-time information, but Bangalore's average temperature is around 22 degrees Celsius.\n",
"\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"name\": \"get_current_temperature\", \"parameters\": {\"location\": \"Delhi, India\"}}\n",
"None\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I don't have the current temperature in Delhi.\n",
"####################################################################################################\n",
"Query: temperature in Delhi?\n",
"Response: I don't have the current temperature in Delhi.\n",
"\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"name\": \"get_current_wind_speed\", \"parameters\": {\"location\": \"Goa\"}}\n",
"6\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"The wind speed in Goa is approximately 6 km/h.\n",
"####################################################################################################\n",
"Query: wind speed in Goa?\n",
"Response: The wind speed in Goa is approximately 6 km/h.\n",
"\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
"Setting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\"name\": \"get_current_temperature\", \"parameters\": {\"location\": \"Chennai, India\"}}\n",
"None\n",
"I don't have the current temperature of Chennai.\n",
"####################################################################################################\n",
"Query: Chennai's temperature?\n",
"Response: I don't have the current temperature of Chennai.\n",
"\n",
"\n"
]
}
],
"source": [
"\n",
"\n",
"data = [\n",
" \"what is the wind speed in Bangalore?\",\n",
" \"what is the temperature of Bangalore?\",\n",
" \"temperature in Delhi?\",\n",
" \"wind speed in Goa?\",\n",
" \"Chennai's temperature?\",\n",
"]\n",
"\n",
"for query in data:\n",
" response = get_llm_response(query)\n",
" print(\"#\"*100)\n",
" print(f\"Query: {query}\\nResponse: {response}\\n\\n\")\n",
"\n"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"gpuType": "T4",
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

2
week2/day5.ipynb

@ -235,7 +235,7 @@
"\n",
"Let's put this in a function called artist.\n",
"\n",
"### Price alert: each time I generate an image it costs about 4c - don't go crazy with images!"
"### Price alert: each time I generate an image it costs about 4 cents - don't go crazy with images!"
]
},
{

302
week3/community-contributions/day5_with_Gradio.ipynb

@ -0,0 +1,302 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# Create meeting minutes from an Audio file\n",
"\n",
"I downloaded some Denver City Council meeting minutes and selected a portion of the meeting for us to transcribe. You can download it here: \n",
"https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n",
"\n",
"If you'd rather work with the original data, the HuggingFace dataset is [here](https://huggingface.co/datasets/huuuyeah/meetingbank) and the audio can be downloaded [here](https://huggingface.co/datasets/huuuyeah/MeetingBank_Audio/tree/main).\n",
"\n",
"The goal of this product is to use the Audio to generate meeting minutes, including actions.\n",
"\n",
"For this project, you can either use the Denver meeting minutes, or you can record something of your own!\n",
"\n",
"## Please note:\n",
"\n",
"When you run the pip installs in the first cell below, you might get this error - it can be safely ignored - it sounds quite severe, but it doesn't seem to affect anything else in this project!\n",
"\n",
"\n",
"> ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"gcsfs 2024.10.0 requires fsspec==2024.10.0, but you have fsspec 2024.9.0 which is incompatible.\n",
"\n"
],
"metadata": {
"id": "It89APiAtTUF"
}
},
{
"cell_type": "code",
"source": [
"!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2 gradio"
],
"metadata": {
"id": "f2vvgnFpHpID"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "FW8nl3XRFrz0"
},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from openai import OpenAI\n",
"from google.colab import drive\n",
"from huggingface_hub import login\n",
"from google.colab import userdata\n",
"from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n",
"import torch\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"source": [
"# Constants\n",
"\n",
"AUDIO_MODEL = \"whisper-1\"\n",
"LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\""
],
"metadata": {
"id": "q3D1_T0uG_Qh"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# New capability - connect this Colab to my Google Drive\n",
"# See immediately below this for instructions to obtain denver_extract.mp3\n",
"\n",
"drive.mount(\"/content/drive\")\n",
"audio_filename = \"/content/drive/MyDrive/llms/denver_extract.mp3\""
],
"metadata": {
"id": "Es9GkQ0FGCMt"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Download denver_extract.mp3\n",
"\n",
"You can either use the same file as me, the extract from Denver city council minutes, or you can try your own..\n",
"\n",
"If you want to use the same as me, then please download my extract here, and put this on your Google Drive: \n",
"https://drive.google.com/file/d/1N_kpSojRR5RYzupz6nqM8hMSoEF_R7pU/view?usp=sharing\n"
],
"metadata": {
"id": "HTl3mcjyzIEE"
}
},
{
"cell_type": "code",
"source": [
"# Sign in to HuggingFace Hub\n",
"\n",
"hf_token = userdata.get('HF_TOKEN')\n",
"login(hf_token, add_to_git_credential=True)"
],
"metadata": {
"id": "xYW8kQYtF-3L"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Sign in to OpenAI using Secrets in Colab\n",
"\n",
"openai_api_key = userdata.get('OPENAI_API_KEY')\n",
"openai = OpenAI(api_key=openai_api_key)"
],
"metadata": {
"id": "qP6OB2OeGC2C"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Initialize Llama model and tokenizer\n",
"\n",
"quant_config = BitsAndBytesConfig(\n",
" load_in_4bit=True,\n",
" bnb_4bit_use_double_quant=True,\n",
" bnb_4bit_compute_dtype=torch.bfloat16,\n",
" bnb_4bit_quant_type=\"nf4\"\n",
")\n",
"\n",
"tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n",
"tokenizer.pad_token = tokenizer.eos_token\n",
"\n",
"model = AutoModelForCausalLM.from_pretrained(\n",
" LLAMA,\n",
" device_map=\"auto\",\n",
" quantization_config=quant_config\n",
")"
],
"metadata": {
"id": "hgQBeIYUyaqj"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Generate meeting minutes\n",
"\n",
"def generate_minutes(transcription, model, tokenizer, progress=gr.Progress()):\n",
" progress(0.6, desc=\"Generating meeting minutes from transcript...\")\n",
"\n",
" system_message = \"You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown.\"\n",
" user_prompt = f\"Below is an extract transcript of a meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\\n{transcription}\"\n",
"\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]\n",
"\n",
" inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n",
" outputs = model.generate(inputs, max_new_tokens=2000)\n",
" response = tokenizer.decode(outputs[0])\n",
"\n",
" # Clean up the response, keep only the minutes\n",
" progress(0.9, desc=\"Cleaning and formatting minutes...\")\n",
" response = response.split(\"<|end_header_id|>\")[-1].strip().replace(\"<|eot_id|>\",\"\")\n",
"\n",
" return response"
],
"metadata": {
"id": "u9aFA7tjy3Ri"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Transcribe the uploaded audio file using OpenAI's Whisper model\n",
"\n",
"def transcribe_audio(audio_path, progress=gr.Progress()):\n",
" progress(0.3, desc=\"Creating transcript from audio...\")\n",
"\n",
" try:\n",
" with open(audio_path, \"rb\") as audio_file:\n",
" transcription = openai.audio.transcriptions.create(\n",
" model=AUDIO_MODEL,\n",
" file=audio_file,\n",
" response_format=\"text\"\n",
" )\n",
" return transcription\n",
" except Exception as e:\n",
" return f\"Error during transcription: {str(e)}\""
],
"metadata": {
"id": "OEuqR90Vy4AZ"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Process the uploaded audio file, transcribe it, and generate meeting minutes\n",
"\n",
"def process_upload(audio_file, progress=gr.Progress()):\n",
" progress(0.1, desc=\"Starting process...\")\n",
"\n",
" if audio_file is None:\n",
" return \"Please upload an audio file.\"\n",
"\n",
" try:\n",
" # Check file format\n",
" if not str(audio_file).lower().endswith('.mp3'):\n",
" return \"Please upload an MP3 file.\"\n",
"\n",
" # Get transcription\n",
" transcription = transcribe_audio(audio_file)\n",
" if transcription.startswith(\"Error\"):\n",
" return transcription\n",
"\n",
" # Generate minutes\n",
" minutes = generate_minutes(transcription, model, tokenizer)\n",
" progress(1.0, desc=\"Process complete!\")\n",
" return minutes\n",
"\n",
" except Exception as e:\n",
" return f\"Error processing file: {str(e)}\""
],
"metadata": {
"id": "lmdsy2iDy5d7"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Create Gradio interface\n",
"\n",
"interface = gr.Interface(\n",
" fn=process_upload,\n",
" inputs=gr.Audio(type=\"filepath\", label=\"Upload MP3 File\", format=\"mp3\"),\n",
" outputs=gr.Markdown(label=\"Meeting Minutes\", min_height=60),\n",
" title=\"Meeting Minutes Generator\",\n",
" description=\"Upload an MP3 recording of your meeting to get AI-generated meeting minutes. This process may take a few minutes.\",\n",
" flagging_mode=\"never\"\n",
")"
],
"metadata": {
"id": "k2U2bWtey7Yo"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"# Launch Gradio interface\n",
"\n",
"interface.launch()"
],
"metadata": {
"id": "X3JbzRNRy9oG"
},
"execution_count": null,
"outputs": []
}
]
}

211
week5/community-contributions/day5 - generating answers with citations.ipynb

@ -0,0 +1,211 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ec3276da-2cc6-4558-beb3-00cf4dc1ac0a",
"metadata": {},
"source": [
"# Generating answers with source citations\n",
"### This Notebook contains a sample showing how to generate answers with inline & end of the answer citations pointing to the original source document used to answer the question\n",
"<div class=\"alert alert-block alert-warning\">\n",
"<h4><u>Prerequisite:</u> Please run the <a href=\"../day5.ipynb\" >Day 5 notebook</a> to create & populate the vector database before executing this notebook</h4>\n",
"</div>\n",
"\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59921cc4-ecb7-460a-a15a-1b4490f3cf25",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"import os\n",
"from dotenv import load_dotenv\n",
"import gradio as gr\n",
"from openai import OpenAI\n",
"from langchain_openai import OpenAIEmbeddings\n",
"from langchain_chroma import Chroma\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba5d79d2-d7fb-473e-bd0f-79abbb7b69ea",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv()\n",
"MODEL = \"gpt-4o-mini\"\n",
"db_name = \"vector_db\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b34c33fc-b7d8-4880-ba8f-0db51b24a8a8",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"embeddings = OpenAIEmbeddings()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7c92170d-f4e9-45d4-8b72-f3779103a551",
"metadata": {},
"outputs": [],
"source": [
"# Load the existing vector database that you created from the Day5 notebook\n",
"if os.path.exists(f\"..\\\\{db_name}\"):\n",
" vectorstore = Chroma(embedding_function=embeddings, persist_directory=f\"..\\\\{db_name}\")\n",
" print(f\"Vectorstore loaded with {vectorstore._collection.count()} documents\")\n",
"else:\n",
" print(\"Vector store doesn't exist. Please run the Day 5 notebook first to create Chroma Vector DB & injest the data.\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78809d3b-bea3-436d-bb79-6adc93757a91",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"\"\"You are an assistant for question-answering tasks. \n",
"Use the following pieces of retrieved context to answer the question. \n",
"If you don't know the answer, just say that you don't know.\n",
"Use three sentences maximum and keep the answer concise.\n",
"Use the following markdown format to answer the question along with the Source used to generate the answer, add inline citation for each sentence & add end of the answer citations:\n",
"'CEO of Insurellm is Avery Lancaster [[1]](Source Link 1). Who is also a co-founder [[2]](Source Link 2)\n",
"Citations: (Note: No duplicates allowed in the below list)\n",
"\n",
"[1 - Source Title 1](Link 1)\n",
"[2 - Source Title 2](Link 2)\n",
"...\n",
"[n - Source Title n](Link n)'\n",
" \n",
"Example answer: \n",
"'CEO of Insurellm is Avery Lancaster [[1]](knowledge-base\\\\company\\\\about.md). Who is also a co-founder [[2]](knowledge-base\\\\employees\\\\Avery Lancaster.md)\n",
"Citations:\n",
"\n",
"[1 - About Company](knowledge-base\\\\company\\\\about.md)\n",
"[2 - Avery Lancaster employees](knowledge-base\\\\employees\\\\Avery Lancaster.md)'\n",
" \n",
"Important Note: Have unique end of the answer citations. Don't give duplicate citation numbers for the same source link, reuse the same citation number if the same source link is referenced multiple times.\n",
"'\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dee6afbd-03af-448e-b587-991c555930bf",
"metadata": {},
"outputs": [],
"source": [
"# Change the below port if jupyter notebook is running in a different port\n",
"jupyter_notebook_port = \"8888\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ed36033b-a67e-4278-b990-c52255274b63",
"metadata": {},
"outputs": [],
"source": [
"def generate_user_prompt(message):\n",
" retriever = vectorstore.as_retriever()\n",
" results = retriever.invoke(message)\n",
" doc_chunk_merged = \"\"\n",
" for doc_chunk in results: \n",
" source = f\"http://localhost:{jupyter_notebook_port}/lab/tree/week5/\" + doc_chunk.metadata.get(\"source\").replace(\"\\\\\",\"/\")\n",
" title = doc_chunk.metadata.get(\"doc_type\") + \" -> \" + source.split('\\\\')[-1][:-3]\n",
" doc_chunk_merged += f\"Content: {doc_chunk.page_content}\\n Source title: {title}\\n Source link: {source}\\n\\n\"\n",
" return f\"Question: {message}\\n {doc_chunk_merged}\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26281283-7792-486f-96b0-c781952b6078",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": generate_user_prompt(message)}]\n",
" stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True, seed=3, max_tokens=1000)\n",
" response = \"\"\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "44518359-4e55-4679-8460-7406a85cc26f",
"metadata": {},
"outputs": [],
"source": [
"#Testing the Answer generation - 1\n",
"user_prompt = \"Please explain what Insurellm is in a couple of sentences\"\n",
"\n",
"display_handle = display(Markdown(\"\"), display_id=True)\n",
"for chunk in chat(user_prompt, []):\n",
" update_display(Markdown(chunk), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f6ad1f9d-fd9e-4846-ad83-a33c96f38b72",
"metadata": {},
"outputs": [],
"source": [
"#Testing the Answer generation - 2\n",
"user_prompt = \"Please explain in short on what products are available in Insurellm\"\n",
"\n",
"display_handle = display(Markdown(\"\"), display_id=True)\n",
"for chunk in chat(user_prompt, []):\n",
" update_display(Markdown(chunk), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d50adb06-e901-41a8-99ed-6f3b4bfacd40",
"metadata": {},
"outputs": [],
"source": [
"#Launch Gradio\n",
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading…
Cancel
Save