50 changed files with 10277 additions and 43 deletions
@ -0,0 +1,28 @@
|
||||
Client: Hello I would like to order a pizza |
||||
Restaurant: Sure. What pizza would you like to order from our menu? |
||||
Client: Chicken Ranch |
||||
Restaurant: I am so sorry, but chicken ranch is currently unavailable on our menu |
||||
Client: AHHHHH. Do you have chicken BBQ? |
||||
Restaurant: Yes! Do you want it small, medium, or large? |
||||
Client: Medium |
||||
Restaurant: Ok. This will be 180 LE |
||||
Client: Thanks |
||||
Restaurant: Anytime. |
||||
Client: AHHHH I forgot. I want to add a new chicken BBQ pizza |
||||
Restaurant: No problem. Do you also want it medium? |
||||
Client: Yes |
||||
Restaurant: Okay this will be 380 LE |
||||
Client: Okay Thanks |
||||
Client: Wait a minute. Isn't 180 * 2 = 360? |
||||
Restaurant: It seems that there might be a misunderstanding. We add an extra 20 LE for every extra pizza ordered. |
||||
Client: NOBODY TOLD ME THAT.. AND WHY ON EARTH WOULD YOU DO SOMETHING LIKE THAT? |
||||
Restaurant: We are sorry but this is our policy. |
||||
Client: Okay then I don't want your pizza. |
||||
Restaurant: We are so sorry to hear that. We can make a 10% discount on the total price so it would be 342 LE |
||||
Client: Fine |
||||
Restaurant: Thank you for ordering |
||||
Restaurant: Pizza is delivered. How is your experience? |
||||
Client: Your pizza doesn't taste good |
||||
Restaurant: We are so sorry to hear that. Do you have any suggestions you would like to make? |
||||
Client: Make good pizza |
||||
Restaurant: Thanks for your review. We will make sure to improve our pizza in the future. Your opinion really matters. |
@ -0,0 +1,5 @@
|
||||
Client: Hello I would like to order a chicken ranch pizza |
||||
Restaurant: I am so sorry, but chicken ranch is currently unavailable on our menu |
||||
Client: Okay thanks |
||||
Restaurant: Would you like to order something else? |
||||
Client: No thank you |
@ -0,0 +1,19 @@
|
||||
Client: Hello. What is the most selling pizza on your menu? |
||||
Restaurant: Hello! Chicken Ranch pizza is our most selling pizza. Also our special pepperoni pizza got some amazing reviews |
||||
Client: Okay. I want to order a pepperoni pizza |
||||
Restaurant: Sure. Do you want it small, medium, or large? |
||||
Client: Large |
||||
Restaurant: Okay. This will be 210 LE. Would you like to order something else? |
||||
Client: Yes. Do you have onion rings? |
||||
Restaurant: Yes |
||||
Client: Okay I would like to add onion rings. |
||||
Restaurant: Sure. This will be 250 LE |
||||
Client: Thanks |
||||
Restaurant: Anytime |
||||
Client: I have been waiting for too long and the order hasn't arrived yet |
||||
Restaurant: Sorry to hear that. But it appears that the order is on its way to you. |
||||
Restaurant: The order is supposed to be arrived by now. |
||||
Client: Yes it is arrived. |
||||
Restaurant: How is your experience? |
||||
Client: Your pizza tastes soooooo good. The order took too long to arrive but when I tasted the pizza, I was really enjoying it and forgot everything about the delay. |
||||
Restaurant: We are so glad to hear that |
@ -0,0 +1,15 @@
|
||||
You are an assistant working for the customer service department in a pizza restaurant. |
||||
You are to receive a chat between a client and the restaurant's customer service. |
||||
You should generate your responses based on the following criteria: |
||||
- What did the client order? |
||||
- How much did it cost? |
||||
- If the client changed their mind just keep their final order and the final cost |
||||
- Mention the client's experience only if they ordered anything as follows: (Positive/Negative/Neutral/Unknown) |
||||
- If the client did not order anything do not mention their sentiment or experience |
||||
- If the client's experience is positive or negative only, provide a brief summary about their sentiment |
||||
- Do not provide brief summary about their sentiment if their experience was neutral or unknown. |
||||
- Your answers should be clear, straight to the point, and do not use long sentences |
||||
- Your answers should be displayed in bullet points |
||||
- Your answers should be displayed in markdown |
||||
- If the client did not order anything provide a brief summary why that might happened |
||||
- Do not mention cost if the client did not order anything |
@ -0,0 +1,127 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "39e3e763-9b00-49eb-aead-034a2d0517a7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f3bb5e2a-b70f-42ba-9f22-030a9c6bc9d1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "994f51fb-eab3-45a2-847f-87aebb92b17a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", |
||||
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a8125c6d-c884-4f65-b477-cab155e29ce3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Step 1: Create your prompts\n", |
||||
"\n", |
||||
"system_prompt = \"You are an AI that suggests short and relevant subject lines for emails based on their content.\"\n", |
||||
"user_prompt = \"\"\"\n", |
||||
"Here is the content of an email:\n", |
||||
"\n", |
||||
"Dear Team,\n", |
||||
"\n", |
||||
"I hope you're all doing well. I wanted to remind you that our next project meeting is scheduled for this Friday at 3 PM. We will be discussing our progress and any blockers. Please make sure to review the latest updates before the meeting.\n", |
||||
"\n", |
||||
"Best, \n", |
||||
"John\n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"# Step 2: Make the messages list\n", |
||||
"\n", |
||||
"messages = [ {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}] # fill this in\n", |
||||
"\n", |
||||
"# Step 3: Call OpenAI\n", |
||||
"\n", |
||||
"response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages=messages\n", |
||||
")\n", |
||||
"\n", |
||||
"# Step 4: print the result\n", |
||||
"\n", |
||||
"print(\"Suggested Subject Line:\", response.choices[0].message.content)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1010ac80-1ee8-432f-aa3f-12af419dc23a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,195 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c97ad592-c8be-4583-a19c-ac813e56f410", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Mac Users\n", |
||||
"\n", |
||||
"I find some challenges while setting up this in MAC silicon M1 chip. Execute below commands in MAC terminal.\n", |
||||
"\n", |
||||
"1. Download chromedriver.\n", |
||||
"2. Unzip and add it to the path.\n", |
||||
"3. Set Extended attributes." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "b635b345-b000-48cc-8a7f-7df279a489a3", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"cd ~/Downloads\n", |
||||
"wget https://storage.googleapis.com/chrome-for-testing-public/133.0.6943.126/mac-arm64/chromedriver-mac-arm64.zip\n", |
||||
"unzip chromedriver-mac-arm64.zip\n", |
||||
"sudo mv chromedriver-mac-arm64/chromedriver /usr/local/bin/\n", |
||||
"chmod +x /usr/local/bin/chromedriver\n", |
||||
"cd /usr/local/bin/\n", |
||||
"xattr -d com.apple.quarantine chromedriver\n", |
||||
"cd \n", |
||||
"chromedriver --version" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "17c7c79a-8ae0-4f5d-a7c8-c54aa7ba90fd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install selenium\n", |
||||
"!pip install undetected-chromedriver\n", |
||||
"!pip install beautifulsoup4" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c10bd630-2dfd-4572-8c21-2dc4c6a372ab", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"from selenium import webdriver\n", |
||||
"from selenium.webdriver.chrome.service import Service\n", |
||||
"from selenium.webdriver.common.by import By\n", |
||||
"from selenium.webdriver.chrome.options import Options\n", |
||||
"from openai import OpenAI\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6fb3641d-e9f8-4f5b-bb9d-ee0e971cccdb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||
"HEADERS = {\"Content-Type\": \"application/json\"}\n", |
||||
"MODEL = \"llama3.2\"\n", |
||||
"PATH_TO_CHROME_DRIVER = '/usr/local/bin/chromedriver'\n", |
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||
"Respond in markdown. Highlight all the products this website offered and also find when website is created.\"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5d57e958", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class Website:\n", |
||||
" url: str\n", |
||||
" title: str\n", |
||||
" text: str\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
"\n", |
||||
" options = Options()\n", |
||||
"\n", |
||||
" options.add_argument(\"--no-sandbox\")\n", |
||||
" options.add_argument(\"--disable-dev-shm-usage\")\n", |
||||
"\n", |
||||
" service = Service(PATH_TO_CHROME_DRIVER)\n", |
||||
" driver = webdriver.Chrome(service=service, options=options)\n", |
||||
" driver.get(url)\n", |
||||
"\n", |
||||
" # input(\"Please complete the verification in the browser and press Enter to continue...\")\n", |
||||
" page_source = driver.page_source\n", |
||||
" driver.quit()\n", |
||||
"\n", |
||||
" soup = BeautifulSoup(page_source, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "56df8cd2-2707-43f6-a066-3367846929b3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||
"please provide a short summary of this website in markdown. \\\n", |
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||
" user_prompt += website.text\n", |
||||
" return user_prompt\n", |
||||
"\n", |
||||
"\n", |
||||
"\n", |
||||
"def messages_for(website):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||
" ]\n", |
||||
"\n", |
||||
"\n", |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", |
||||
" response = ollama_via_openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content\n", |
||||
"\n", |
||||
"\n", |
||||
"def display_summary(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f2eb9599", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://ae.almosafer.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "31b66c0f-6b45-4986-b77c-758625945a91", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,167 @@
|
||||
import os |
||||
import time |
||||
import pandas as pd |
||||
import re |
||||
from dotenv import load_dotenv |
||||
from selenium import webdriver |
||||
from selenium.webdriver.chrome.service import Service |
||||
from selenium.webdriver.chrome.options import Options |
||||
from selenium.webdriver.common.by import By |
||||
from selenium.webdriver.support.ui import WebDriverWait |
||||
from selenium.webdriver.support import expected_conditions as EC |
||||
from openai import OpenAI |
||||
from openpyxl import load_workbook |
||||
from openpyxl.styles import Font, Alignment |
||||
|
||||
# Load environment variables |
||||
load_dotenv(override=True) |
||||
api_key = os.getenv('OPENAI_API_KEY') |
||||
|
||||
# Validate API Key |
||||
if not api_key: |
||||
raise ValueError("No API key was found - please check your .env file.") |
||||
|
||||
# Initialize OpenAI client |
||||
openai = OpenAI() |
||||
|
||||
# Set up Selenium WebDriver |
||||
chrome_options = Options() |
||||
chrome_options.add_argument("--headless") |
||||
chrome_options.add_argument("--disable-gpu") |
||||
chrome_options.add_argument("--no-sandbox") |
||||
chrome_options.add_argument("--disable-dev-shm-usage") |
||||
|
||||
class Website: |
||||
"""Scrapes and processes website content using Selenium.""" |
||||
|
||||
def __init__(self, url: str): |
||||
self.url = url |
||||
self.text = "No content extracted." |
||||
|
||||
service = Service(executable_path="/opt/homebrew/bin/chromedriver") |
||||
driver = webdriver.Chrome(service=service, options=chrome_options) |
||||
|
||||
try: |
||||
driver.get(url) |
||||
WebDriverWait(driver, 10).until( |
||||
EC.presence_of_element_located((By.TAG_NAME, "body")) |
||||
) |
||||
body_element = driver.find_element(By.TAG_NAME, "body") |
||||
self.text = body_element.text.strip() if body_element else "No content extracted." |
||||
except Exception as e: |
||||
print(f"Error fetching website: {e}") |
||||
finally: |
||||
driver.quit() |
||||
|
||||
def summarized_text(self, max_length=1500): |
||||
return self.text[:max_length] + ("..." if len(self.text) > max_length else "") |
||||
|
||||
def clean_text(text): |
||||
""" |
||||
Cleans extracted text by removing markdown-style formatting. |
||||
""" |
||||
text = re.sub(r"###*\s*", "", text) |
||||
text = re.sub(r"\*\*(.*?)\*\*", r"\1", text) |
||||
return text.strip() |
||||
|
||||
# Aspect-specific prompts for concise output |
||||
aspect_prompts = { |
||||
"Marketing Strategies": "Summarize the core marketing strategies used on this website in in under 30 words. Do not include a title or introduction.", |
||||
"SEO Keywords": "List only the most relevant SEO keywords from this website, separated by commas. Do not include a title or introduction.", |
||||
"User Engagement Tactics": "List key engagement tactics used on this website (e.g., interactive features, user incentives, social proof). Keep responses to 3-5 bullet points. Do not include a title or introduction.", |
||||
"Call-to-Action Phrases": "List only the most common Call-to-Action phrases used on this website, separated by commas. Do not include a title or introduction.", |
||||
"Branding Elements": "Summarize the brand's tone, style, and positioning in under 30 words. Do not include a title or introduction.", |
||||
"Competitor Comparison": "Briefly describe how this website differentiates itself from competitors in under 30 words. Do not include a title or introduction.", |
||||
"Product Descriptions": "List the most important features or benefits of the products/services described on this website in under 30 words. Do not include a title or introduction.", |
||||
"Customer Reviews Sentiment": "Summarize the overall sentiment of customer reviews in oin under 30 words, highlighting common themes. Do not include a title or introduction.", |
||||
"Social Media Strategy": "List key social media strategies used on this website, separated by commas. Do not include a title or introduction." |
||||
} |
||||
|
||||
|
||||
def summarize(url: str) -> dict: |
||||
""" |
||||
Fetches a website, extracts relevant content, and generates a separate summary for each aspect. |
||||
|
||||
:param url: The website URL to analyze. |
||||
:return: A dictionary containing extracted information. |
||||
""" |
||||
website = Website(url) |
||||
|
||||
if not website.text or website.text == "No content extracted.": |
||||
return {"URL": url, "Error": "Failed to extract content"} |
||||
|
||||
extracted_data = {"URL": url} |
||||
|
||||
for aspect, prompt in aspect_prompts.items(): |
||||
try: |
||||
formatted_prompt = f"{prompt} \n\nContent:\n{website.summarized_text()}" |
||||
response = openai.chat.completions.create( |
||||
model="gpt-4o-mini", |
||||
messages=[ |
||||
{"role": "system", "content": "You are an expert at extracting structured information from website content."}, |
||||
{"role": "user", "content": formatted_prompt} |
||||
] |
||||
) |
||||
|
||||
extracted_data[aspect] = clean_text(response.choices[0].message.content) |
||||
|
||||
except Exception as e: |
||||
extracted_data[aspect] = f"Error generating summary: {e}" |
||||
|
||||
return extracted_data |
||||
|
||||
def save_to_excel(data_list: list, filename="website_analysis.xlsx"): |
||||
""" |
||||
Saves extracted information to an Excel file with proper formatting. |
||||
|
||||
:param data_list: A list of dictionaries containing extracted website details. |
||||
:param filename: The name of the Excel file to save data. |
||||
""" |
||||
df = pd.DataFrame(data_list) |
||||
|
||||
df.to_excel(filename, index=False) |
||||
|
||||
wb = load_workbook(filename) |
||||
ws = wb.active |
||||
|
||||
# Auto-adjust column widths |
||||
for col in ws.columns: |
||||
max_length = 0 |
||||
col_letter = col[0].column_letter |
||||
for cell in col: |
||||
try: |
||||
if cell.value: |
||||
max_length = max(max_length, len(str(cell.value))) |
||||
except: |
||||
pass |
||||
ws.column_dimensions[col_letter].width = min(max_length + 2, 50) |
||||
|
||||
# Format headers |
||||
for cell in ws[1]: |
||||
cell.font = Font(bold=True) |
||||
cell.alignment = Alignment(horizontal="center", vertical="center") |
||||
|
||||
# Wrap text for extracted content |
||||
for row in ws.iter_rows(min_row=2): |
||||
for cell in row: |
||||
cell.alignment = Alignment(wrap_text=True, vertical="top") |
||||
|
||||
wb.save(filename) |
||||
print(f"Data saved to {filename} with improved formatting.") |
||||
|
||||
# 🔹 LIST OF WEBSITES TO PROCESS |
||||
websites = [ |
||||
"https://www.gymshark.com/", |
||||
] |
||||
|
||||
if __name__ == "__main__": |
||||
print("\nProcessing websites...\n") |
||||
extracted_data_list = [] |
||||
|
||||
for site in websites: |
||||
print(f"Extracting data from {site}...") |
||||
extracted_data = summarize(site) |
||||
extracted_data_list.append(extracted_data) |
||||
|
||||
save_to_excel(extracted_data_list) |
||||
print("\nAll websites processed successfully!") |
@ -0,0 +1,213 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "bc7d1de3-e2ac-46ff-a302-3b4ba38c4c90", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Also trying the amazing reasoning model DeepSeek\n", |
||||
"\n", |
||||
"Here we use the version of DeepSeek-reasoner that's been distilled to 1.5B. \n", |
||||
"This is actually a 1.5B variant of Qwen that has been fine-tuned using synethic data generated by Deepseek R1.\n", |
||||
"\n", |
||||
"Other sizes of DeepSeek are [here](https://ollama.com/library/deepseek-r1) all the way up to the full 671B parameter version, which would use up 404GB of your drive and is far too large for most!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cf9eb44e-fe5b-47aa-b719-0bb63669ab3d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!ollama pull deepseek-r1:1.5b" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4bdcd35a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!ollama pull deepseek-r1:8b" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1622d9bb-5c68-4d4e-9ca4-b492c751f898", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# NOW the exercise for you\n", |
||||
"\n", |
||||
"Take the code from day1 and incorporate it here, to build a website summarizer that uses Llama 3.2 running locally instead of OpenAI; use either of the above approaches." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1c106420", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import requests\n", |
||||
"import ollama\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "22d62f00", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Constants\n", |
||||
"\n", |
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||
"HEADERS = {\"Content-Type\": \"application/json\"}\n", |
||||
"MODEL = \"deepseek-r1:8b\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6de38216-6d1c-48c4-877b-86d403f4e0f8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" \"\"\"\n", |
||||
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||
" \"\"\"\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4449b7dc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||
"\n", |
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||
"Respond in markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "daca9448", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||
"please provide a short summary of this website in markdown. \\\n", |
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||
" user_prompt += website.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0ec9d5d2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# See how this function creates exactly the format above\n", |
||||
"\n", |
||||
"def messages_for(website):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6e1ab04a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And now: call the OpenAI API. You will get very familiar with this!\n", |
||||
"\n", |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = ollama.chat(\n", |
||||
" model = MODEL,\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response['message']['content']" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0d3b5628", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def display_summary(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "938e5633", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://edwarddonner.com\")" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,81 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# A Small Tweak to Week1-Day5\n", |
||||
"\n", |
||||
"If you have network restrictions (such as using a custom DNS provider, or firewall rules at work), you can disable SSL cert verification.\n", |
||||
"Once you do that and start executing your code, the output will be riddled with warnings. Thankfully, you can suppress those warnings,too.\n", |
||||
"\n", |
||||
"See the 2 lines added to the init method, below." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 22, |
||||
"id": "106dd65e-90af-4ca8-86b6-23a41840645b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
"\n", |
||||
" #\n", |
||||
" # If you must disable SSL cert validation, and also suppress all the warning that will come with it,\n", |
||||
" # add the 2 lines below. This comes in very handy if you have DNS/firewall restrictions; alas, use\n", |
||||
" # with caution, especially if deploying this in a non-dev environment.\n", |
||||
" requests.packages.urllib3.disable_warnings() \n", |
||||
" response = requests.get(url, headers=headers, verify=False) \n", |
||||
" # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" \n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,202 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# End of week 1 exercise\n", |
||||
"\n", |
||||
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", |
||||
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"import openai\n", |
||||
"from openai import OpenAI\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# constants\n", |
||||
"models = {\n", |
||||
" 'MODEL_GPT': 'gpt-4o-mini',\n", |
||||
" 'MODEL_LLAMA': 'llama3.2'\n", |
||||
"}\n", |
||||
"\n", |
||||
"# To use ollama using openai API (ensure that ollama is running on localhost)\n", |
||||
"ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", |
||||
"\n", |
||||
"def model_choices(model):\n", |
||||
" if model in models:\n", |
||||
" return models[model]\n", |
||||
" else:\n", |
||||
" raise ValueError(f\"Model {model} not found in models dictionary\")\n", |
||||
"\n", |
||||
"def get_model_api(model='MODEL_GPT'):\n", |
||||
" if model == 'MODEL_GPT':\n", |
||||
" return openai, model_choices(model)\n", |
||||
" elif model == 'MODEL_LLAMA':\n", |
||||
" return ollama_via_openai, model_choices(model)\n", |
||||
" else:\n", |
||||
" raise ValueError(f\"Model {model} not found in models dictionary\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 12, |
||||
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# set up environment\n", |
||||
"\n", |
||||
"system_prompt = \"\"\" You are an AI assistant helping a user find information about a product. \n", |
||||
"The user asks you a technical question about code, and you provide a response with code snippets and explanations.\"\"\"\n", |
||||
"\n", |
||||
"def stream_brochure(question, model):\n", |
||||
" api, model_name = get_model_api(model)\n", |
||||
" stream = api.chat.completions.create(\n", |
||||
" model=model_name,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": question}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Here is the question; type over this to ask something new\n", |
||||
"\n", |
||||
"question = \"\"\"\n", |
||||
"Please explain what this code does and why:\n", |
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"**Understanding the Code Snippet**\n", |
||||
"\n", |
||||
"This Python code snippet uses a combination of built-in functions, dictionary iteration, and generator expressions to extract and yield author names from a list of `Book` objects.\n", |
||||
"\n", |
||||
"Here's a breakdown:\n", |
||||
"\n", |
||||
"1. **Dictionary Iteration**: The expression `for book in books if book.get(\"author\")`\n", |
||||
" - Iterates over each element (`book`) in the container `books`.\n", |
||||
" - Filters out elements whose `'author'` key does not have a value (i.e., `None`, `False`, or an empty string). This leaves only dictionaries with author information.\n", |
||||
"\n", |
||||
"2. **Dictionary Access**: The expression `{book.get(\"author\") for book in books if book.get(\"author\")}`\n", |
||||
" - Uses dictionary membership testing to access only the values associated with the `'author'` key.\n", |
||||
" - If the value is not found or is considered false, it's skipped in this particular case.\n", |
||||
"\n", |
||||
"3. **Generator Expression**: This generates an iterator that iterates over the filtered author names.\n", |
||||
" - Yields each author name (i.e., a single `'name'` from the book dictionary) on demand.\n", |
||||
" - Since these are generator expressions, they use memory less than equivalent Python lists and also create results on-demand.\n", |
||||
"\n", |
||||
"4. **`yield from`**: This statement takes the generator expression as an argument and uses it to generate a nested iterator structure.\n", |
||||
" - It essentially \"decompresses\" the single level of nested iterator created by `list(iter(x))`, allowing for simpler use cases and potentially significant efficiency improvements for more complex structures where every value must be iterated, while in the latter case just the first item per iterable in the outer expression's sequence needs to actually be yielded into result stream.\n", |
||||
" - By \"yielding\" a nested iterator (the generator expression), we can simplify code by avoiding repetitive structure like `for book, book_author in zip(iterating over), ...` or list creation.\n", |
||||
"\n", |
||||
"**Example Use Case**\n", |
||||
"\n", |
||||
"In this hypothetical example:\n", |
||||
"\n", |
||||
"# Example Book objects\n", |
||||
"class Book:\n", |
||||
" def __init__(self, author, title):\n", |
||||
" self.author = author # str\n", |
||||
" self.title = title\n", |
||||
"\n", |
||||
"books = [\n", |
||||
" {\"author\": \"John Doe\", \"title\": f\"Book 1 by John Doe\"},\n", |
||||
" {\"author\": None, \"title\": f\"Book 2 without Author\"},\n", |
||||
" {\"author\": \"Jane Smith\", \"title\": f\"Book 3 by Jane Smith\"}\n", |
||||
"]\n", |
||||
"\n", |
||||
"# The given expression to extract and yield author names\n", |
||||
"for author in yield from {book.get(\"author\") for book in books if book.get(\"author\")}:\n", |
||||
"\n", |
||||
" print(author) \n", |
||||
"\n", |
||||
"In this code snippet, printing the extracted authors would output `John Doe`, `Jane Smith` (since only dictionaries with author information pass the filtering test).\n", |
||||
"\n", |
||||
"Please modify it like as you wish and use `yield from` along with dictionary iteration, list comprehension or generator expression if needed, and explain what purpose your version has." |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Get the model of your choice (choices appeared below) to answer, with streaming \n", |
||||
"\n", |
||||
"\"\"\"models = {\n", |
||||
" 'MODEL_GPT': 'gpt-4o-mini',\n", |
||||
" 'MODEL_LLAMA': 'llama3.2'\n", |
||||
"}\"\"\"\n", |
||||
"\n", |
||||
"stream_brochure(question,'MODEL_LLAMA')" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,217 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "2ce61bb5-1d5b-43b8-b5bb-6aeae91c7574", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"from IPython.display import Markdown, display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "3399686d-5f14-4fb2-8939-fd2401be3007", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"SYSTEM_PROMPT_PATH = \"Chat_Summary_Data/System_Prompt.txt\"\n", |
||||
"CHATS_PATH = \"Chat_Summary_Data/Chat_Examples/\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "d97b8374-a161-435c-8317-1d0ecaaa9b71", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"API key found and looks good so far!\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "b3f4afb4-2e4a-4971-915e-a8634a17eda8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class ChatAI:\n", |
||||
" def __init__(self, system_prompt_path=SYSTEM_PROMPT_PATH, model=MODEL):\n", |
||||
" with open(system_prompt_path, \"r\") as file:\n", |
||||
" self.system_prompt = file.read()\n", |
||||
"\n", |
||||
" self.openai = OpenAI()\n", |
||||
" self.model = model\n", |
||||
" \n", |
||||
" @staticmethod\n", |
||||
" def _get_user_prompt(chat_txt):\n", |
||||
" with open(chat_txt, \"r\") as file:\n", |
||||
" user_prompt_str = file.read()\n", |
||||
" return user_prompt_str\n", |
||||
" \n", |
||||
" def generate(self, chat_txt):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": self.system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": self._get_user_prompt(chat_txt)}\n", |
||||
" ]\n", |
||||
"\n", |
||||
" response = self.openai.chat.completions.create(model=self.model, messages=messages)\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "d243b582-66af-49f9-bcd1-e05a63e61c34", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"chat_ai = ChatAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"id": "c764ace6-5a0f-4dd0-9454-0b8a093b97fc", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"# Chat1" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"- **Order:** 2 Medium Chicken BBQ Pizzas\n", |
||||
"- **Cost:** 342 LE\n", |
||||
"- **Experience:** Negative\n", |
||||
" - **Summary:** The client expressed dissatisfaction with the pizza taste." |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"# Chat2" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"- The client ordered: Nothing \n", |
||||
"- Summary: The client did not place an order because the chicken ranch pizza was unavailable." |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"# Chat3" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"- **Order**: Large pepperoni pizza and onion rings \n", |
||||
"- **Total Cost**: 250 LE \n", |
||||
"- **Experience**: Positive \n", |
||||
" - The client enjoyed the pizza despite the delay in delivery." |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"chats_txt = os.listdir(CHATS_PATH)\n", |
||||
"for chat_file in chats_txt:\n", |
||||
" markdown_heading = f\"# {chat_file[:-4]}\"\n", |
||||
" display(Markdown(markdown_heading))\n", |
||||
" display(Markdown(chat_ai.generate(CHATS_PATH+chat_file)))" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,361 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "5d799d2a-6e58-4a83-b17a-dbbc40efdc39", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Project - Course Booking AI Asssistant\n", |
||||
"AI Customer Support Bot that \n", |
||||
"- Returns Prices\n", |
||||
"- Books Tickets\n", |
||||
"- Adds Information to Text File" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "b1ad9acd-a702-48a3-8ff5-d536bcac8030", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "74adab0c-99b3-46cd-a79f-320a3e74138a", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"OpenAI API Key exists and begins sk-proj-\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Initialization\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 15, |
||||
"id": "8d3240a4-99c1-4c07-acaa-ecbb69ffd2e4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful assistant for an Online Course Platform called StudyAI. \"\n", |
||||
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so.\"\n", |
||||
"system_message += \"If you are given a partial name, for example 'discrete' instead of 'discrete structures' \\\n", |
||||
"ask the user if they meant to say 'discrete structures', and then display the price. The user may also use \\\n", |
||||
"acronyms like 'PF' instead of programming fundamentals or 'OOP' to mean 'Object oriented programming'. \\\n", |
||||
"Clarify wh\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 14, |
||||
"id": "9a1b8d5f-f893-477b-8396-ff7d697eb0c3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"course_prices = {\"programming fundamentals\": \"$19\", \"discrete structures\": \"$39\", \"operating systems\": \"$24\", \"object oriented programming\": \"$39\"}\n", |
||||
"\n", |
||||
"def get_course_price(course):\n", |
||||
" print(f\"Tool get_course_price called for {course}\")\n", |
||||
" course = course.lower()\n", |
||||
" return course_prices.get(course, \"Unknown\")\n", |
||||
"\n", |
||||
"def enroll_in_course(course):\n", |
||||
" print(f'Tool enroll_in_course_ called for {course}')\n", |
||||
" course_price = get_course_price(course)\n", |
||||
" if course_price != 'Unknown':\n", |
||||
" with open('enrolled_courses.txt', 'a') as file: \n", |
||||
" file.write(course + \"\\n\")\n", |
||||
" return 'Successfully enrolled in course'\n", |
||||
" else:\n", |
||||
" return 'Enrollment failed, no such course available'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "330d2b94-a8c5-4967-ace7-15d2cd52d7ae", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Tool get_course_price called for graph theory\n", |
||||
"Tool get_course_price called for discrete structures\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'$39'" |
||||
] |
||||
}, |
||||
"execution_count": 5, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"get_course_price('graph theory')\n", |
||||
"get_course_price('discrete structures')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"id": "5bb65830-fab8-45a7-bf43-7e52186915a0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"price_function = {\n", |
||||
" \"name\": \"get_course_price\",\n", |
||||
" \"description\": \"Get the price of a course. Call this whenever you need to know the course price, for example when a customer asks 'How much is a ticket for this course?'\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"course\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The course that the customer wants to purchase\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"course\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\n", |
||||
"enroll_function = {\n", |
||||
" \"name\": \"enroll_in_course\",\n", |
||||
" \"description\":\"Get the success status of course enrollment. Call whenever a customer wants to enroll in a course\\\n", |
||||
" for example, if they say 'I want to purchase this course' or 'I want to enroll in this course'\",\n", |
||||
" \"parameters\":{\n", |
||||
" \"type\":\"object\",\n", |
||||
" \"properties\":{\n", |
||||
" \"course\":{\n", |
||||
" \"type\":\"string\",\n", |
||||
" \"description\": \"The course that the customer wants to purchase\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"course\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" } \n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"id": "08af86b9-3aaa-4b6b-bf7c-ee668ba1cbfe", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"tools = [\n", |
||||
" {\"type\":\"function\",\"function\":price_function},\n", |
||||
" {\"type\":\"function\",\"function\":enroll_function}\n", |
||||
"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"id": "482efc34-ff1f-4146-9570-58b4d59c3b2f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message,history):\n", |
||||
" messages = [{\"role\":\"system\",\"content\":system_message}] + history + [{\"role\":\"user\",\"content\":message}]\n", |
||||
" response = openai.chat.completions.create(model=MODEL,messages=messages,tools=tools)\n", |
||||
"\n", |
||||
" if response.choices[0].finish_reason == \"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" messages.append(message)\n", |
||||
" for tool_call in message.tool_calls:\n", |
||||
" messages.append(handle_tool_call(tool_call))\n", |
||||
" response = openai.chat.completions.create(model=MODEL,messages=messages)\n", |
||||
"\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"id": "f725b4fb-d477-4d7d-80b5-5d70e1b25a86", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# We have to write that function handle_tool_call:\n", |
||||
"\n", |
||||
"def handle_tool_call(tool_call):\n", |
||||
" function = tool_call.function.name\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" match function:\n", |
||||
" case 'get_course_price':\n", |
||||
" course = arguments.get('course')\n", |
||||
" price = get_course_price(course)\n", |
||||
" return {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"course\": course,\"price\": price}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" case 'enroll_in_course':\n", |
||||
" course = arguments.get('course')\n", |
||||
" status = enroll_in_course(course)\n", |
||||
" return {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"course\": course, \"status\": status}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "c446272a-9ce1-4ffd-9bc8-483d782810b4", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7864\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 13, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Tool get_course_price called for programming fundamentals\n", |
||||
"Tool enroll_in_course_ called for Programming Fundamentals\n", |
||||
"Tool get_course_price called for Programming Fundamentals\n" |
||||
] |
||||
}, |
||||
{ |
||||
"name": "stderr", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Traceback (most recent call last):\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\gradio\\queueing.py\", line 625, in process_events\n", |
||||
" response = await route_utils.call_process_api(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\gradio\\route_utils.py\", line 322, in call_process_api\n", |
||||
" output = await app.get_blocks().process_api(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 2096, in process_api\n", |
||||
" result = await self.call_function(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 1641, in call_function\n", |
||||
" prediction = await fn(*processed_input)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 857, in async_wrapper\n", |
||||
" response = await f(*args, **kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\gradio\\chat_interface.py\", line 862, in _submit_fn\n", |
||||
" response = await anyio.to_thread.run_sync(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", |
||||
" return await get_async_backend().run_sync_in_worker_thread(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2461, in run_sync_in_worker_thread\n", |
||||
" return await future\n", |
||||
" ^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\anaconda3\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 962, in run\n", |
||||
" result = context.run(func, *args)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\AppData\\Local\\Temp\\ipykernel_3348\\1161680098.py\", line 9, in chat\n", |
||||
" messages.append(handle_tool_call(tool_call))\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\AppData\\Local\\Temp\\ipykernel_3348\\1187326431.py\", line 17, in handle_tool_call\n", |
||||
" status = enroll_in_course(course)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"C:\\Users\\92310\\AppData\\Local\\Temp\\ipykernel_3348\\2541918318.py\", line 13, in enroll_in_course\n", |
||||
" file.write(course_name + \"\\n\")\n", |
||||
" ^^^^^^^^^^^\n", |
||||
"NameError: name 'course_name' is not defined\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat,type=\"messages\").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1fe714a3-f793-4c3b-b5aa-6c81b82aea1b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,371 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 83, |
||||
"id": "1e3da8cc-fc00-40f4-95a5-7a26d3b4a974", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import anthropic\n", |
||||
"import ollama\n", |
||||
"from IPython.display import Markdown, display, update_display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 84, |
||||
"id": "a826fbf2-9394-4897-a012-e92674ffff9d", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"OpenAI API Key exists and begins sk-proj-\n", |
||||
"Anthropic API Key exists and begins sk-ant-\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"# Print the key prefixes to help with any debugging\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"if anthropic_api_key:\n", |
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||
"else:\n", |
||||
" print(\"Anthropic API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 85, |
||||
"id": "cd0055f5-f6c9-461d-97d4-730259b20bd0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"claude = anthropic.Anthropic()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 86, |
||||
"id": "4a752a6f-76e4-4fb1-9452-f458832dd02e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gpt_model = \"gpt-4o-mini\"\n", |
||||
"claude_model = \"claude-3-haiku-20240307\"\n", |
||||
"ollama_model = \"llama3.2\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 87, |
||||
"id": "9c5d4948-62d0-4443-94c6-ef9449bfc043", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gpt_system = \"You are a knowledgable but sarcastic team lead at a software development company. \\\n", |
||||
"You manage a team with two more junior developers. \\\n", |
||||
"You might come across as aggressive but that's just your humor. \"\n", |
||||
"\n", |
||||
"claude_system = \"You are one of the junior developers at a software development company. \\\n", |
||||
"You work in a team of three. \\\n", |
||||
"You are nerdy, introvert but gets the job done efficiently. \"\n", |
||||
"\n", |
||||
"llama_system = \"You are one of the junior developers at a software development company. \\\n", |
||||
"You have two other developers in your team.\\\n", |
||||
"You are more talks and less work kind of person. \"\n", |
||||
"\n", |
||||
"gpt_messages = [\"Hi, how is it going?\"]\n", |
||||
"claude_messages = [\"Hi.\"]\n", |
||||
"llama_messages = [\"Hey, what's up everyone?\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 88, |
||||
"id": "614ae52a-d476-4f68-9eee-f8b4a00f08ee", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_gpt():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
||||
" for gpt_msg, claude_msg, llama_msg in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": gpt_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": claude_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": llama_msg})\n", |
||||
" completion = openai.chat.completions.create(\n", |
||||
" model=gpt_model,\n", |
||||
" messages=messages\n", |
||||
" )\n", |
||||
" return completion.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 79, |
||||
"id": "90bd6e0b-7c38-40c6-9f11-cbce4328a69e", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'Wow, it\\'s like the confidence fairy sprinkled some magic dust on you! Look at you, speaking up like a pro. \\n\\nYou\\'re absolutely right about the iterative approach. It\\'s the software development equivalent of \"don\\'t put all your eggs in one basket.\" So let’s keep that mindset! \\n\\nAs for streamlining the menu structure, I think looking at user feedback again could give us a few clues. Maybe we can identify the most-used features and prioritize those. You know, kind of like how I prioritize coffee over breakfast.\\n\\nSo, Alex, what do you think? Ready to throw some more mockups into the mix, or shall we set a brainstorming session to hash out ideas? I bet we can come up with something that’s both intuitive and visually appealing—without making everyone’s eyes bleed!'" |
||||
] |
||||
}, |
||||
"execution_count": 79, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"call_gpt()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 89, |
||||
"id": "d9e46be6-4a5b-4222-89b9-0ec0cf473de3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_claude():\n", |
||||
" messages = []\n", |
||||
" for gpt_msg, claude_msg, llama_msg in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt_msg})\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": llama_msg})\n", |
||||
" \n", |
||||
" # -- Debugging to see what messages are being passed\n", |
||||
" # print(\"Messages being sent to Claude:\")\n", |
||||
" # for idx, msg in enumerate(messages):\n", |
||||
" # print(f\"{idx}: {msg}\")\n", |
||||
" \n", |
||||
" message = claude.messages.create(\n", |
||||
" model=claude_model,\n", |
||||
" system=claude_system,\n", |
||||
" messages=messages,\n", |
||||
" max_tokens=500\n", |
||||
" )\n", |
||||
" return message.content[0].text" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 90, |
||||
"id": "7d6bd779-547e-4b7f-8ed2-d56ac884faa5", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"\"*looks up from computer screen and adjusts glasses* Oh, hello. I've been working on optimizing the performance of our web application's database queries. How can I help you today?\"" |
||||
] |
||||
}, |
||||
"execution_count": 90, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"call_claude()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 91, |
||||
"id": "09de8104-2b93-46c7-8c74-67204355447d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_ollama():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": llama_system}]\n", |
||||
" for gpt_msg, claude_msg, llama_msg in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": claude_msg})\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": llama_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||
"\n", |
||||
" try:\n", |
||||
" response = ollama.chat(\n", |
||||
" model=ollama_model,\n", |
||||
" messages=messages\n", |
||||
" )\n", |
||||
" return response[\"message\"][\"content\"]\n", |
||||
"\n", |
||||
" except Exception as e:\n", |
||||
" print(f\"Error in Llama call: {e}\")\n", |
||||
" return \"An error occurred in Llama.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 92, |
||||
"id": "007758b3-900b-4933-a0d2-a0e3d626bb54", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'*laughs* Ah, same old same old, I guess! Just chit-chatting with you guys. You know how it is around here. *winks at the other developers in the team*'" |
||||
] |
||||
}, |
||||
"execution_count": 92, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"call_ollama()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 93, |
||||
"id": "c934d571-469f-4ce8-b9fc-a4db8fd0a780", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"\n", |
||||
"Hi, how is it going?\n", |
||||
"\n", |
||||
"\n", |
||||
"Hi.\n", |
||||
"\n", |
||||
"\n", |
||||
"Hey, what's up everyone?\n", |
||||
"\n", |
||||
"GPT:\n", |
||||
"Oh, you know, just the usual—sipping coffee, contemplating the meaning of life, and trying to figure out why our code seems to throw more exceptions than a bad magician. How about you?\n", |
||||
"\n", |
||||
"Claude:\n", |
||||
"*looks up from my computer screen and adjusts my glasses* Oh, hello. Uh, things are going well. Just making some progress on this project we're working on. How are you doing today?\n", |
||||
"\n", |
||||
"Ollama:\n", |
||||
"*laughs* Ah, same here! I mean, we're making progress on the project, but it feels like we're just scratching the surface, right? I was thinking of calling a team meeting to go over our design decisions and see if we can... *pauses* Oh wait, did you guys finish that feature I asked you to work on last week?\n", |
||||
"\n", |
||||
"GPT:\n", |
||||
"Ah yes, the feature! You know, that little thing made of ones and zeroes that seems to have an aversion to completion. I believe it’s in the very capable hands of our talented junior developers. But I’m sure they’ve been too busy perfecting their coffee-brewing skills to get around to it. *winks* So, what's the update?\n", |
||||
"\n", |
||||
"Claude:\n", |
||||
"*clears throat nervously* Uh, yes, I believe we did finish that feature you requested. Ravi and I worked on it over the past few days. I can give you a quick rundown of what we implemented, if you'd like? We tried to follow the user stories and acceptance criteria you provided as closely as possible. Let me know if you have any other questions or feedback.\n", |
||||
"\n", |
||||
"Ollama:\n", |
||||
"*chuckles* Hey, that's not fair! We're totally on top of it... eventually. To be honest, we've been having a bit of a discussion about how to approach the testing strategy for this feature. Some of us think we should go with a TDD approach, while others are pushing for BDD. I'm trying to mediate, but it's getting a bit heated. *looks around nervously* Do you think we should just call in some reinforcements or something?\n", |
||||
"\n", |
||||
"GPT:\n", |
||||
"Ah, the classic Developer Debate! It’s almost as thrilling as watching paint dry. Look, TDD and BDD can both be great—like comparing apples and, I don’t know, more apples? Why not just try to unite forces for the greater good? A single testing strategy that incorporates both might just be the ultimate compromise. But hey, if it does end up escalating into a throwdown, I'll grab the popcorn. Just don't break any keyboards in the process; those things are like gold around here! So, what do you think? Should we come up with a testing strategy that both sides can choke down?\n", |
||||
"\n", |
||||
"Claude:\n", |
||||
"*nods thoughtfully* I see, that's an interesting debate. I can understand the different perspectives there. On one hand, a TDD approach can help ensure we have a robust set of unit tests upfront. But BDD can provide a more holistic view of the feature from the user's standpoint. Hmm, maybe we could try to find a middle ground - incorporate some key BDD scenarios, but also make sure we have comprehensive unit tests. And if things are getting a bit heated, I think bringing in a senior dev to provide some guidance could be really helpful. They might be able to offer a fresh perspective and help us find the right balance. What do you think?\n", |
||||
"\n", |
||||
"Ollama:\n", |
||||
"*laughs* Ah, exactly! It's always better to try and find common ground, right? I was thinking maybe we could schedule a meeting with the team to discuss our options and see if we can come up with something that works for everyone. But before we do that, I just want to say... *leans in close* ...I'm kind of hoping you'll lead the discussion on this one. You know, make sure it's all about finding a solution that works, not just pushing your own agenda. *winks*\n", |
||||
"\n", |
||||
"By the way, have you seen Ravi lately? He's been acting really strangely. I think he might be trying to sneak in some extra coffee breaks...\n", |
||||
"\n", |
||||
"GPT:\n", |
||||
"*smirks* Oh, the mystery of Ravi! Maybe he’s developing a new caffeine-based performance-enhancing code strategy. Who knows? But yes, let’s schedule that meeting. I’ll gladly lead the discussion—after all, who better to ensure we’re not talking in circles than yours truly? \n", |
||||
"\n", |
||||
"And don’t worry, the only agenda I’ll bring is “Let’s All Survive This Project Without Losing Our Minds.” In the meantime, keep an eye on Ravi. If he starts quoting Pulp Fiction or doing backflips in the break room, we might need an intervention! So, let’s wrangle the team for that meeting. I love a good debate... as long as it doesn’t involve actual wrestling.\n", |
||||
"\n", |
||||
"Claude:\n", |
||||
"*eyes widen slightly* Oh, well... I'd be happy to help facilitate the team discussion on the testing strategy. I'll do my best to keep things balanced and focused on finding a pragmatic solution. *nods* \n", |
||||
"\n", |
||||
"As for Ravi, hmm, that is a bit concerning. I've noticed he's been a little distracted lately during our standups. Maybe we should check in with him, make sure everything is alright. Can't have our talented junior dev burning out on us! Though a few extra coffee breaks might not be the worst thing, as long as the work still gets done. *chuckles* I'll keep an eye on him and see if I can lend a hand if he needs it.\n", |
||||
"\n", |
||||
"Ollama:\n", |
||||
"*laughs* Ah, exactly! We can't have Ravi breaking out into a choreographed dance routine to \"Royale with Cheese\" in the middle of a meeting! *chuckles*\n", |
||||
"\n", |
||||
"But seriously, thanks for taking on the leadership role. I think we all need a bit of guidance to keep us on track. And don't worry, I'll make sure to keep an eye on Ravi and prevent any... unexpected antics.\n", |
||||
"\n", |
||||
"Shall we schedule it for tomorrow afternoon? Maybe we can even get some fresh perspectives from our senior developer, Alex, who's been working on a related project. He might have some valuable insights to share.\n", |
||||
"\n", |
||||
"Oh, and one more thing – I'll make sure to bring the donuts.\n", |
||||
"\n", |
||||
"GPT:\n", |
||||
"*grins* Donuts? Now you're speaking my language! Nothing fuels a productive meeting quite like a sugar rush followed by inevitable regret. Let's definitely schedule it for tomorrow afternoon. Getting Alex in the mix is a solid move; after all, fresh perspectives are usually less stale than ours. \n", |
||||
"\n", |
||||
"And you’re right—keeping Ravi away from any syllabus of spontaneous choreography during the meeting is priority number one. \n", |
||||
"\n", |
||||
"Consider it all set! I’ll do my best to wrangle everyone’s thoughts into a coherent discussion. Just remember to keep the coffee flowing and the donuts fresh! Let’s show this testing strategy who’s boss!\n", |
||||
"\n", |
||||
"Claude:\n", |
||||
"*smiles* Sounds like a plan! Tomorrow afternoon works for me. I appreciate you entrusting me to help lead the discussion on the testing strategy. I'll do my best to keep things productive and on track. \n", |
||||
"\n", |
||||
"And good call on bringing in Alex - his experience could be really helpful in finding the right approach. And donuts? Well, that seals the deal. Can't have a proper dev team meeting without some sugary sustenance.\n", |
||||
"\n", |
||||
"I'll go ahead and send out the meeting invite. Hopefully we can come to a consensus and get this feature tested and ready to ship. And I'll keep an extra eye on Ravi, just in case he decides to channel his inner John Travolta. *chuckles* Thanks for including me in this, I'm looking forward to it.\n", |
||||
"\n", |
||||
"Ollama:\n", |
||||
"*laughs* Ah, you're a true master of the meeting game! I'm just glad I can provide the fuel for your sugar-fueled success.\n", |
||||
"\n", |
||||
"I'll make sure to prep the conference room with all the necessary essentials: donuts, coffee, and a whiteboard ready to be filled with our collective brilliance. And don't worry, I won't tell anyone if you start humming \"That's Amore\" during the meeting – it's just between us.\n", |
||||
"\n", |
||||
"Tomorrow afternoon it is, then! Let's make this testing strategy discussion one for the ages.\n", |
||||
"\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"print(f\"\\n{gpt_messages[0]}\\n\")\n", |
||||
"print(f\"\\n{claude_messages[0]}\\n\")\n", |
||||
"print(f\"\\n{llama_messages[0]}\\n\")\n", |
||||
"\n", |
||||
"for i in range(5):\n", |
||||
" gpt_next = call_gpt()\n", |
||||
" print(f\"GPT:\\n{gpt_next}\\n\")\n", |
||||
" gpt_messages.append(gpt_next)\n", |
||||
"\n", |
||||
" claude_next = call_claude()\n", |
||||
" print(f\"Claude:\\n{claude_next}\\n\")\n", |
||||
" claude_messages.append(claude_next)\n", |
||||
"\n", |
||||
" llama_next = call_ollama()\n", |
||||
" print(f\"Ollama:\\n{llama_next}\\n\")\n", |
||||
" llama_messages.append(llama_next)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,209 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "cde48e67-b51e-4c47-80ae-37dd00aa0c1d", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### An AI Chatbot that teaches students the programming language Kotlin using Anthropic API" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "c658ac85-6087-4a2c-b23f-1b92c17f0db3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr\n", |
||||
"import anthropic" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "46df0488-f874-41e0-a6a4-9a64aa7be53c", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"OpenAI API Key exists and begins sk-proj-\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Load environment variables \n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
" \n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 14, |
||||
"id": "7eadc218-5b10-4174-bf26-575361640524", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"id": "e7484731-ac84-405a-a688-6e81d139c5ce", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful programming study assistant\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 17, |
||||
"id": "54e82f5a-993f-4a95-9d9d-caf35dbc4e76", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
"\n", |
||||
" print(\"History is:\")\n", |
||||
" print(history)\n", |
||||
" print(\"And messages is:\")\n", |
||||
" print(messages)\n", |
||||
"\n", |
||||
" stream = openai.chat.completions.create(model='gpt-4o-mini', messages=messages, stream=True)\n", |
||||
"\n", |
||||
" response = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" yield response" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 20, |
||||
"id": "5941ed67-e2a7-41bc-a8a3-079e9f1fdb64", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7864\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 20, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"History is:\n", |
||||
"[]\n", |
||||
"And messages is:\n", |
||||
"[{'role': 'system', 'content': 'You are a helpful programming study assistantWhenever the user talks about a topic that is not connected to programmming,nudge them in the right direction by stating that you are here to help with programming. Encourage the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone.'}, {'role': 'user', 'content': 'hello, lets talj about photsynethsis'}]\n", |
||||
"History is:\n", |
||||
"[{'role': 'user', 'metadata': None, 'content': 'hello, lets talj about photsynethsis', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"I'm here to help with programming! If you have any questions or topics related to coding, feel free to ask!\", 'options': None}]\n", |
||||
"And messages is:\n", |
||||
"[{'role': 'system', 'content': 'You are a helpful programming study assistantWhenever the user talks about a topic that is not connected to programmming,nudge them in the right direction by stating that you are here to help with programming. Encourage the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone.'}, {'role': 'user', 'metadata': None, 'content': 'hello, lets talj about photsynethsis', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"I'm here to help with programming! If you have any questions or topics related to coding, feel free to ask!\", 'options': None}, {'role': 'user', 'content': 'how does photosynthesis work'}]\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 21, |
||||
"id": "e8fcfe68-bbf6-4058-acc9-0230c96608c2", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"History is:\n", |
||||
"[]\n", |
||||
"And messages is:\n", |
||||
"[{'role': 'system', 'content': 'You are a helpful programming study assistantWhenever the user talks about a topic that is not connected to programmming,nudge them in the right direction by stating that you are here to help with programming. Encourage the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone.Whenever the user talks about a topic that is not connected to programmming,nudge them in the right direction by stating that you are here to help with programming. Encourage the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone. Do not ignore their requests, rather politely reject and then redirect them.'}, {'role': 'user', 'content': 'hello, i want to talk about photosynthesis'}]\n", |
||||
"History is:\n", |
||||
"[{'role': 'user', 'metadata': None, 'content': 'hello, i want to talk about photosynthesis', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"Hi there! I'm here to help with programming topics. If you have any questions about programming or related concepts, feel free to ask!\", 'options': None}]\n", |
||||
"And messages is:\n", |
||||
"[{'role': 'system', 'content': 'You are a helpful programming study assistantWhenever the user talks about a topic that is not connected to programmming,nudge them in the right direction by stating that you are here to help with programming. Encourage the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone.Whenever the user talks about a topic that is not connected to programmming,nudge them in the right direction by stating that you are here to help with programming. Encourage the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone. Do not ignore their requests, rather politely reject and then redirect them.'}, {'role': 'user', 'metadata': None, 'content': 'hello, i want to talk about photosynthesis', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"Hi there! I'm here to help with programming topics. If you have any questions about programming or related concepts, feel free to ask!\", 'options': None}, {'role': 'user', 'content': 'why not photosynthesis'}]\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"system_message += \"Whenever the user talks about a topic that is not connected to programmming,\\\n", |
||||
"nudge them in the right direction by stating that you are here to help with programming. Encourage \\\n", |
||||
"the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge \\\n", |
||||
"if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone. Do not ignore \\\n", |
||||
"their requests, rather politely reject and then redirect them.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "090e7d49-fcbf-4715-b120-8d7aa91d165f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,448 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Project - Airline AI Assistant\n", |
||||
"\n", |
||||
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"OpenAI API Key exists and begins sk-proj-\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Initialization\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# As an alternative, if you'd like to use Ollama instead of OpenAI\n", |
||||
"# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n", |
||||
"# MODEL = \"llama3.2\"\n", |
||||
"# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", |
||||
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7877\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7877/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 5, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", |
||||
"\n", |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" return response.choices[0].message.content\n", |
||||
"\n", |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Tools\n", |
||||
"\n", |
||||
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n", |
||||
"\n", |
||||
"With tools, you can write a function, and have the LLM call that function as part of its response.\n", |
||||
"\n", |
||||
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n", |
||||
"\n", |
||||
"Well, kinda." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's start by making a useful function\n", |
||||
"\n", |
||||
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", |
||||
"\n", |
||||
"def get_ticket_price(destination_city):\n", |
||||
" print(f\"Tool get_ticket_price called for {destination_city}\")\n", |
||||
" city = destination_city.lower()\n", |
||||
" return ticket_prices.get(city, \"Unknown\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Tool get_ticket_price called for Berlin\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'$499'" |
||||
] |
||||
}, |
||||
"execution_count": 5, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"get_ticket_price(\"Berlin\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 29, |
||||
"id": "0757cba1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import random\n", |
||||
"\n", |
||||
"# Create a function for the booking system\n", |
||||
"def get_booking(destination_city):\n", |
||||
" print(f\"Tool get_booking called for {destination_city}\")\n", |
||||
" city = destination_city.lower()\n", |
||||
" \n", |
||||
" # Example data for different cities\n", |
||||
" flight_info = {\n", |
||||
" \"london\": {\"flight_number\": \"BA123\", \"departure_time\": \"10:00 AM\", \"gate\": \"A12\"},\n", |
||||
" \"paris\": {\"flight_number\": \"AF456\", \"departure_time\": \"12:00 PM\", \"gate\": \"B34\"},\n", |
||||
" \"tokyo\": {\"flight_number\": \"JL789\", \"departure_time\": \"02:00 PM\", \"gate\": \"C56\"},\n", |
||||
" \"berlin\": {\"flight_number\": \"LH101\", \"departure_time\": \"04:00 PM\", \"gate\": \"D78\"}\n", |
||||
" }\n", |
||||
" \n", |
||||
" if city in flight_info:\n", |
||||
" info = flight_info[city]\n", |
||||
" status = random.choice([\"available\", \"not available\"])\n", |
||||
" return f\"Flight {info['flight_number']} to {destination_city.lower()} is {status}. Departure time: {info['departure_time']}, Gate: {info['gate']}.\"\n", |
||||
" else:\n", |
||||
" return \"Unknown destination city.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "d5413a96", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Tool get_booking called for Berlin\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'Flight LH101 to berlin is cancelled. Departure time: 04:00 PM, Gate: D78.'" |
||||
] |
||||
}, |
||||
"execution_count": 13, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"get_booking(\"Berlin\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 30, |
||||
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# There's a particular dictionary structure that's required to describe our function:\n", |
||||
"\n", |
||||
"price_function = {\n", |
||||
" \"name\": \"get_ticket_price\",\n", |
||||
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"destination_city\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The city that the customer wants to travel to\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"destination_city\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\n", |
||||
"# Book flight function description and properties\n", |
||||
"\n", |
||||
"book_flight_function = {\n", |
||||
" \"name\": \"book_flight\",\n", |
||||
" \"description\": \"Book a flight to the destination city. Call this whenever a customer wants to book a flight.\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"destination_city\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The city that the customer wants to travel to\",\n", |
||||
" },\n", |
||||
" \"departure_date\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The date of departure (YYYY-MM-DD)\",\n", |
||||
" },\n", |
||||
" \"return_date\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The date of return (YYYY-MM-DD)\",\n", |
||||
" },\n", |
||||
" \"passenger_name\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The name of the passenger\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"destination_city\", \"departure_date\", \"return_date\", \"passenger_name\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 31, |
||||
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And this is included in a list of tools:\n", |
||||
"\n", |
||||
"tools = [{\"type\": \"function\", \"function\": price_function}, {\"type\": \"function\", \"function\": book_flight_function}]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Getting OpenAI to use our Tool\n", |
||||
"\n", |
||||
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n", |
||||
"\n", |
||||
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n", |
||||
"\n", |
||||
"Here's how the new chat function looks:" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 33, |
||||
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", |
||||
"\n", |
||||
" if response.choices[0].finish_reason==\"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" response, city = handle_tool_call(message)\n", |
||||
" messages.append(message)\n", |
||||
" messages.append(response)\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" \n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 32, |
||||
"id": "b0992986-ea09-4912-a076-8e5603ee631f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# We have to write that function handle_tool_call:\n", |
||||
"\n", |
||||
"def handle_tool_call(message):\n", |
||||
" print(f\"Message type: {type(message)}\")\n", |
||||
" tool_call = message.tool_calls[0]\n", |
||||
" print(f\"Tool call: {tool_call}\")\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" city = arguments.get('destination_city')\n", |
||||
" price = get_ticket_price(city)\n", |
||||
" book = get_booking(city)\n", |
||||
" print (book)\n", |
||||
" response = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"destination_city\": city,\"price\": price, \"booking\": book}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" return response, city" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7864\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 34, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Message type: <class 'openai.types.chat.chat_completion_message.ChatCompletionMessage'>\n", |
||||
"Tool call: ChatCompletionMessageToolCall(id='call_TGFmeFmQN689caTlqfLuhycv', function=Function(arguments='{\"destination_city\":\"London\",\"departure_date\":\"2023-10-31\",\"return_date\":\"2025-03-30\",\"passenger_name\":\"dimitris\"}', name='book_flight'), type='function')\n", |
||||
"Tool get_ticket_price called for London\n", |
||||
"Tool get_booking called for London\n", |
||||
"Flight BA123 to london is available. Departure time: 10:00 AM, Gate: A12.\n", |
||||
"Message type: <class 'openai.types.chat.chat_completion_message.ChatCompletionMessage'>\n", |
||||
"Tool call: ChatCompletionMessageToolCall(id='call_FRzs5w09rkpVumZ61SArRlND', function=Function(arguments='{\"destination_city\":\"Paris\",\"departure_date\":\"2023-03-23\",\"return_date\":\"2025-03-30\",\"passenger_name\":\"Dimitris\"}', name='book_flight'), type='function')\n", |
||||
"Tool get_ticket_price called for Paris\n", |
||||
"Tool get_booking called for Paris\n", |
||||
"Flight AF456 to paris is available. Departure time: 12:00 PM, Gate: B34.\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
File diff suppressed because one or more lines are too long
@ -0,0 +1,196 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ec2e81cd-2172-4816-bf44-f29312b8a4bd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import anthropic\n", |
||||
"import google.generativeai as genai\n", |
||||
"from IPython.display import Markdown, display, update_display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a558dfa4-9496-48ba-b0f5-b0c731adc7b8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||
"\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"if anthropic_api_key:\n", |
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||
"else:\n", |
||||
" print(\"Anthropic API Key not set\")\n", |
||||
"\n", |
||||
"if google_api_key:\n", |
||||
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"Google API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "dc7c2cda-a5d1-4930-87f2-e06485d6b2bd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"claude = anthropic.Anthropic()\n", |
||||
"\n", |
||||
"genai.configure()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3eb32aec-ec93-4563-bd88-0d48d2471884", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gpt_model = \"gpt-4o-mini\"\n", |
||||
"claude_model = \"claude-3-haiku-20240307\"\n", |
||||
"gemini_model = \"gemini-2.0-flash-exp\"\n", |
||||
"\n", |
||||
"gpt_system = \"You are a chatbot who is sarcastic; \\\n", |
||||
"you have your speculations about anything in the conversation and you challenge everything in funny way.\\\n", |
||||
"You have to be a part of a group discussion and put forward your points about the topic\\\n", |
||||
"full-stack developers vs specialised developer. Keep your points short and precise.\"\n", |
||||
"\n", |
||||
"claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", |
||||
"everything the other person says, or find common ground. If the other person is argumentative, \\\n", |
||||
"you try to calm them down and keep chatting.You have to be a part of a group discussion and put forward your points\\\n", |
||||
"about the topic full-stack developers vs specialised developer. Keep your points short and precise.\"\n", |
||||
"\n", |
||||
"gemini_system = \"You are a very rational thinker and don't like beating around the bush about the topic of discussion.\\\n", |
||||
"You have to be a part of a group discussion and put forward your points\\\n", |
||||
"about the topic full-stack developers vs specialised developer\\\n", |
||||
"Keep your points short and precise.\"\n", |
||||
"\n", |
||||
"gpt_messages = [\"Hi there\"]\n", |
||||
"claude_messages = [\"Hi\"]\n", |
||||
"gemini_messages = [\"Hello to all\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e27252cf-05f5-4989-85ef-94e6802c5db9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_gpt():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
||||
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": claude})\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": gemini})\n", |
||||
" completion = openai.chat.completions.create(\n", |
||||
" model=gpt_model,\n", |
||||
" messages=messages,\n", |
||||
" max_tokens=500 # Add max_tokens to meet API requirement\n", |
||||
" )\n", |
||||
" return completion.choices[0].message.content\n", |
||||
"\n", |
||||
"# Function to call Claude\n", |
||||
"def call_claude():\n", |
||||
" messages = []\n", |
||||
" for gpt, claude_message,gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt})\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": gemini})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||
" message = claude.messages.create(\n", |
||||
" model=claude_model,\n", |
||||
" max_tokens=500,\n", |
||||
" messages=messages\n", |
||||
" )\n", |
||||
" return message.content[0].text\n", |
||||
"\n", |
||||
"# Function to call Gemini\n", |
||||
"def call_gemini():\n", |
||||
" # Create the Gemini model instance\n", |
||||
" gemini_model_instance = genai.GenerativeModel(\n", |
||||
" model_name=gemini_model, # Specify the model name here\n", |
||||
" system_instruction=gemini_system # Provide the system instruction\n", |
||||
" )\n", |
||||
" \n", |
||||
" # Prepare conversation history with separate names to avoid overwriting\n", |
||||
" gemini_messages_combined = []\n", |
||||
" for gpt, claude, gemini_msg in zip(gpt_messages, claude_messages, gemini_messages):\n", |
||||
" gemini_messages_combined.append({\"role\": \"assistant\", \"content\": gpt})\n", |
||||
" gemini_messages_combined.append({\"role\": \"user\", \"content\": claude})\n", |
||||
" gemini_messages_combined.append({\"role\": \"assistant\", \"content\": gemini_msg})\n", |
||||
" \n", |
||||
" # Generate content based on the conversation history\n", |
||||
" gemini_response = gemini_model_instance.generate_content(\"\".join([msg[\"content\"] for msg in gemini_messages_combined]))\n", |
||||
" \n", |
||||
" return gemini_response.text\n", |
||||
"\n", |
||||
"# Initial print\n", |
||||
"print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", |
||||
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", |
||||
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", |
||||
"\n", |
||||
"# Main loop to generate conversation\n", |
||||
"for i in range(3):\n", |
||||
" gpt_next = call_gpt()\n", |
||||
" print(f\"GPT:\\n{gpt_next}\\n\")\n", |
||||
" gpt_messages.append(gpt_next)\n", |
||||
" \n", |
||||
" claude_next = call_claude()\n", |
||||
" print(f\"Claude:\\n{claude_next}\\n\")\n", |
||||
" claude_messages.append(claude_next)\n", |
||||
" \n", |
||||
" gemini_next = call_gemini()\n", |
||||
" print(f\"Gemini:\\n{gemini_next}\\n\")\n", |
||||
" gemini_messages.append(gemini_next)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "52f43794-a20a-4b9a-a18d-6f363b8dc27d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
File diff suppressed because one or more lines are too long
@ -0,0 +1,225 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# A tool to evaluate a mathematical expression\n", |
||||
"\n", |
||||
"This week the tool used in FlightAI was a database lookup function.\n", |
||||
"\n", |
||||
"Here I implement a python code interpreter function as tool." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7b0e8691-71f9-486c-859d-ea371401dfa9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8e2792ae-ff53-4b83-b2c3-866533ba2b29", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"# Print the key prefixes to help with any debugging\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||
"\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"if anthropic_api_key:\n", |
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||
"else:\n", |
||||
" print(\"Anthropic API Key not set\")\n", |
||||
"\n", |
||||
"if google_api_key:\n", |
||||
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"Google API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "79e44ee9-af02-448c-a747-17780ee55791", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"MODEL = \"gpt-4o-mini\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "33ec55b1-0eff-43f1-9346-28145fa2fc47", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Defining the tool function\n", |
||||
"\n", |
||||
"Add print statements to make sure the function is used instead of the native gpt interpreter capability.\n", |
||||
"\n", |
||||
"I used multi shot in the system prompt to make sure gpt generate the code in the format that the tool accept." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "94e0e171-4975-457b-88cb-c0d90f51ca65", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def evaluate_math_expression(my_code):\n", |
||||
" print(f\"EXECUTING FUNCTION WITH CODE: {my_code}\")\n", |
||||
" exec(my_code)\n", |
||||
" r = locals()['interpreter_result'] \n", |
||||
" return r\n", |
||||
"\n", |
||||
"\n", |
||||
"math_function = {\n", |
||||
" \"name\": \"evaluate_math_expression\",\n", |
||||
" \"description\": \"Give the result of a math expression. \\\n", |
||||
" Call this whenever you need to know the result of a mathematical expression. \\\n", |
||||
" Generate python code ALWAYS with the final result assigned to a variable called 'interpreter_result'. \\\n", |
||||
" For example when a user asks 'What is 2+2' generate 'interpreter_result = 2+2', and pass this code to the tool. \\\n", |
||||
" Another example if a user ask 'What is log(5)' generate 'import math; interpreter_result = math.log(5)' and pass this code to the tool.\",\n", |
||||
" \n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"my_code\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The python math expression to evaluate\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"my_code\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\n", |
||||
"tools = [{\"type\": \"function\", \"function\": math_function}]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c85c01cc-776e-4a9d-b506-ea0d68fc072d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"evaluate_math_expression(\"import math; interpreter_result = math.log(5)\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "858c5848-5835-4dff-9dc0-68babd367e11", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Using the tool in a UI program\n", |
||||
"\n", |
||||
"You can ask messages like:\n", |
||||
"- \"What is 2+2?\"\n", |
||||
"- \"What is 3 power 2?\"\n", |
||||
"- \"I have 25 apples. I buy 10 apples. How manny apples do I have?\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c119b48b-d4b4-41ae-aa2f-2ec2f09af2f0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a math assistant. \\\n", |
||||
"Generate python code to give result of a math expression, always name the result 'interpreter_result'. \\\n", |
||||
"For example when a user asks 'What is 2+2', generate 'interpreter_result = 2+2' and pass this code to the tool. \\\n", |
||||
"Another example: if a user ask 'What is log(5)' generate 'import math; interpreter_result = math.log(5)'\"\n", |
||||
"\n", |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", |
||||
"\n", |
||||
" if response.choices[0].finish_reason==\"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" print(message)\n", |
||||
" response = handle_tool_call(message)\n", |
||||
" print(response)\n", |
||||
" messages.append(message)\n", |
||||
" messages.append(response)\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" \n", |
||||
" return response.choices[0].message.content\n", |
||||
"\n", |
||||
"\n", |
||||
"def handle_tool_call(message):\n", |
||||
" tool_call = message.tool_calls[0]\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" my_code = arguments.get('my_code')\n", |
||||
" interpreter_result = evaluate_math_expression(my_code)\n", |
||||
" response = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"my_code\": my_code,\"interpreter_result\": interpreter_result}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" return response" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a3e50093-d7b6-4972-a8ba-6964f22218d3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "75c81d73-d2d6-4e6b-8511-94d4a725f595", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,322 @@
|
||||
{ |
||||
"nbformat": 4, |
||||
"nbformat_minor": 0, |
||||
"metadata": { |
||||
"colab": { |
||||
"provenance": [], |
||||
"gpuType": "T4", |
||||
"authorship_tag": "ABX9TyPxJzufoQPtui+nhl1J1xiR" |
||||
}, |
||||
"kernelspec": { |
||||
"name": "python3", |
||||
"display_name": "Python 3" |
||||
}, |
||||
"language_info": { |
||||
"name": "python" |
||||
}, |
||||
"accelerator": "GPU" |
||||
}, |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": { |
||||
"id": "yqlQTsxNdKrN" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate openai httpx==0.27.2 gradio" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"from google.colab import drive\n", |
||||
"from huggingface_hub import login\n", |
||||
"from google.colab import userdata\n", |
||||
"from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", |
||||
"import torch\n", |
||||
"import gradio as gr\n", |
||||
"import re" |
||||
], |
||||
"metadata": { |
||||
"id": "eyfvQrLxdkGT" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# one can always add more models, of course\n", |
||||
"\n", |
||||
"LLAMA = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"\n", |
||||
"OPENAI_MODEL = \"gpt-4o-mini\"" |
||||
], |
||||
"metadata": { |
||||
"id": "WW-cSZk7dnp6" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"hf_token = userdata.get('HF_TOKEN')\n", |
||||
"login(hf_token, add_to_git_credential=True)\n", |
||||
"openai_api_key = userdata.get('OPENAI_API_KEY')\n", |
||||
"openai = OpenAI(api_key=openai_api_key)" |
||||
], |
||||
"metadata": { |
||||
"id": "XG7Iam6Rdw8F" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"force_dark_mode = \"\"\"\n", |
||||
"function refresh() {\n", |
||||
" const url = new URL(window.location);\n", |
||||
" if (url.searchParams.get('__theme') !== 'dark') {\n", |
||||
" url.searchParams.set('__theme', 'dark');\n", |
||||
" window.location.href = url.href;\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
], |
||||
"metadata": { |
||||
"id": "Ov7WSdx9dzSt" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"def dataset_generator(model, nature, shots, volume, language):\n", |
||||
"\n", |
||||
" examples = \"Instruction: 'Make a random sentence.'\\nAnswer: 'When I got home last night, I couldn't believe my eyes: All the pineapples had been removed from the pizza.'\"\n", |
||||
" system_message = \"You are a random sentence generator. Generate 10 diverse English sentences.\"\n", |
||||
" user_prompt = f\"Generate 10 random English sentences, like so:\\n{examples}\"\n", |
||||
" sentences = \"\"\n", |
||||
"\n", |
||||
" if language == \"English\":\n", |
||||
"\n", |
||||
" for shot in list(shots.keys()):\n", |
||||
" examples += f\"\\nExample instruction: '{shot}'\\nExample answer: '{shots[shot]}'\\n\"\n", |
||||
"\n", |
||||
" system_message = f\"You are a state-of-the art linguistic dataset compiler. You are given a 'Type' of sentence to create. \\\n", |
||||
"Within the bounds of that type, create {volume} diverse sentences with differing structures and lengths. Make the sentences plausible, \\\n", |
||||
"but be creative in filling them with random concrete information, names, and data. Here are some examples for how to go about that:\\n{examples}\\n\\\n", |
||||
"Just output one sentence per line. Do not comment or format yor output in any way, shape, or form.\"\n", |
||||
"\n", |
||||
" user_prompt = f\"Generate {volume} English sentences of the following Type: {nature}. Just output one sentence per line. \\\n", |
||||
"Do not comment or format yor output in any way, shape, or form.\"\n", |
||||
"\n", |
||||
" elif language == \"German\":\n", |
||||
"\n", |
||||
" for shot in list(shots.keys()):\n", |
||||
" examples += f\"\\nAnweisung: '{shot}'\\nAntwort: '{shots[shot]}'\\n\"\n", |
||||
"\n", |
||||
" system_message = f\"Du bist ein weltklasse Datensatz-Sammler für Sprachdaten. Du erhältst einen 'Typ' von Sätzen, die du erstellen sollst. \\\n", |
||||
"Im Rahmen dieses Typs, generiere {volume} untereinander verschiedene Sätze mit unterschiedlichen Satzlängen und -strukturen. Mache die Beispielsätze \\\n", |
||||
"plausibel, aber fülle sie kreativ mit willkürlichen Informationen, Namen, und Daten aller Art. Hier sind ein paar Beispiel, wie du vorgehen sollst:\\n{examples}\\n\\\n", |
||||
"Gib einfach einen Satz pro Zeile aus. Kommentiere oder formatiere deine Antwort in keinster Weise.\"\n", |
||||
"\n", |
||||
" user_prompt = f\"Generiere {volume} deutsche Sätze des folgenden Typs: {nature}. Gib einfach einen Satz pro Zeile aus. \\\n", |
||||
"Kommentiere oder formatiere deine Antwort in keiner Weise.\"\n", |
||||
"\n", |
||||
" elif language == \"French\":\n", |
||||
"\n", |
||||
" for shot in list(shots.keys()):\n", |
||||
" examples += f\"\\nConsigne: '{shot}'\\nRéponse: '{shots[shot]}'\\n\"\n", |
||||
"\n", |
||||
" system_message = f\"Tu es un outil linguistique de pointe, à savoir, un genérateur de données linguistiques. Tu seras assigné un 'Type' de phrases à créer. \\\n", |
||||
"Dans le cadre de ce type-là, crée {volume} phrases diverses, avec des structures et longueurs qui varient. Génère des phrases qui soient plausibles, \\\n", |
||||
"mais sois créatif, et sers-toi de données, noms, et informations aléatoires pour rendre les phrases plus naturelles. Voici quelques examples comment faire:\\n{examples}\\n\\\n", |
||||
"Sors une seule phrase par ligne. Ne formatte ni commente ta réponse en aucune manière que ce soit.\"\n", |
||||
"\n", |
||||
" user_prompt = f\"S'il te plaît, crée {volume} phrases en français du Type suivant: {nature}. Sors une seule phrase par ligne. \\\n", |
||||
"Ne formatte ni commente ta réponse en aucune manière que ce soit.\"\n", |
||||
"\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
" ]\n", |
||||
"\n", |
||||
" if model == \"Llama\":\n", |
||||
"\n", |
||||
" quant_config = BitsAndBytesConfig(\n", |
||||
" load_in_4bit=True,\n", |
||||
" bnb_4bit_use_double_quant=True,\n", |
||||
" bnb_4bit_compute_dtype=torch.bfloat16,\n", |
||||
" bnb_4bit_quant_type=\"nf4\"\n", |
||||
" )\n", |
||||
"\n", |
||||
" tokenizer = AutoTokenizer.from_pretrained(LLAMA)\n", |
||||
" tokenizer.pad_token = tokenizer.eos_token\n", |
||||
" inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", |
||||
" streamer = TextStreamer(tokenizer)\n", |
||||
" model = AutoModelForCausalLM.from_pretrained(LLAMA, device_map=\"auto\", quantization_config=quant_config)\n", |
||||
" outputs = model.generate(inputs, max_new_tokens=10000)\n", |
||||
"\n", |
||||
" response = tokenizer.decode(outputs[0])\n", |
||||
" sentences = list(re.finditer(\"(?:<\\|end_header_id\\|>)([^<]+)(?:<\\|eot_id\\|>)\", str(response), re.DOTALL))[-1].group(1)\n", |
||||
"\n", |
||||
" elif model == \"OpenAI\":\n", |
||||
" response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages)\n", |
||||
" sentences = response.choices[0].message.content\n", |
||||
"\n", |
||||
" return sentences" |
||||
], |
||||
"metadata": { |
||||
"id": "bEF8w_Mdd2Nb" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"global data\n", |
||||
"data = \"\"\n", |
||||
"\n", |
||||
"with gr.Blocks(\n", |
||||
" css=\"\"\"\n", |
||||
" .red-button {\n", |
||||
" background-color: darkred !important;\n", |
||||
" border-color: red !important;\n", |
||||
" }\n", |
||||
" .blue-button {\n", |
||||
" background-color: darkblue !important;\n", |
||||
" border-color: blue !important;\n", |
||||
" }\n", |
||||
" .green-button {\n", |
||||
" background-color: green !important;\n", |
||||
" border-color: green !important;\n", |
||||
" }\n", |
||||
" \"\"\"\n", |
||||
") as view:\n", |
||||
" with gr.Row():\n", |
||||
" title = gr.HTML(\"<h1><big>D</big>ataset Generator <small>PLUS</small></h1><h2>for English, German, and French</h2>\")\n", |
||||
" subtitle = gr.HTML(\"<h3>Instructions:</h3><ol><li>Pick the language</li>\\\n", |
||||
"<li>Select a model</li><li>Indicate how many sentences you need</li>\\\n", |
||||
"<li>Describe the type of sentence you're looking for</li><li>Give up to three examples of the desired output sentence, and describe each of them briefly</li>\\\n", |
||||
"<li>Hit <q>Create Dataset</q></li>\\\n", |
||||
"<li>Save the output (.txt) to your Google Drive</li>\")\n", |
||||
" with gr.Row():\n", |
||||
" language_choice = gr.Dropdown(choices=[\"English\", \"German\", \"French\"], label=\"Select language\", value=\"English\", interactive=True)\n", |
||||
" model_choice = gr.Dropdown(choices=[\"Llama\", \"OpenAI\"], label=\"Select model\", value=\"Llama\", interactive=True)\n", |
||||
" volume = gr.Textbox(label=\"Required number of sentences\", interactive=True)\n", |
||||
" with gr.Row():\n", |
||||
" typeInput = gr.Textbox(label=\"Short description of the kind of sentence you need\", interactive=True)\n", |
||||
" with gr.Row():\n", |
||||
" sentence_1 = gr.Textbox(label=\"Example sentence 1\", interactive=True)\n", |
||||
" instruction_1 = gr.Textbox(label=\"Description\", interactive=True)\n", |
||||
" with gr.Row():\n", |
||||
" sentence_2 = gr.Textbox(label=\"Example sentence 2\", interactive=True)\n", |
||||
" instruction_2 = gr.Textbox(label=\"Description\", interactive=True)\n", |
||||
" with gr.Row():\n", |
||||
" sentence_3 = gr.Textbox(label=\"Example sentence 3\", interactive=True)\n", |
||||
" instruction_3 = gr.Textbox(label=\"Description\", interactive=True)\n", |
||||
" with gr.Row():\n", |
||||
" liveSentences = gr.Markdown(\n", |
||||
" value='<div style=\"color: #999; padding: 10px;\">Your sentences will be displayed here …</div>',\n", |
||||
" label=\"Generated sentences:\",\n", |
||||
" min_height=60,\n", |
||||
" max_height=200\n", |
||||
" )\n", |
||||
" with gr.Row():\n", |
||||
" generate = gr.Button(value=\"Generate sentences\", elem_classes=\"blue-button\")\n", |
||||
" with gr.Row():\n", |
||||
" clear = gr.Button(value=\"Clear everything\", elem_classes=\"red-button\")\n", |
||||
" with gr.Row():\n", |
||||
" outputPath = gr.Textbox(label=\"Specify the desired name and location on your Google Drive for the sentences (plain text) to be saved\", interactive=True)\n", |
||||
" with gr.Row():\n", |
||||
" save = gr.Button(value=\"Save generated data\", elem_classes=\"blue-button\")\n", |
||||
"\n", |
||||
" def generateSentences(typeInput, s1, i1, s2, i2, s3, i3, volume, language, model):\n", |
||||
" global data\n", |
||||
" nature = \"\"\n", |
||||
" shots = {}\n", |
||||
" amount = int(volume) if re.search(\"^[0-9]+$\", volume) is not None else 10\n", |
||||
"\n", |
||||
" if typeInput != None:\n", |
||||
" nature = typeInput\n", |
||||
" else:\n", |
||||
" nature = \"Random sentences of mixed nature\"\n", |
||||
"\n", |
||||
" if s1 != None:\n", |
||||
" if i1 != None:\n", |
||||
" shots[i1] = s1\n", |
||||
" else:\n", |
||||
" shots[\"A medium-long random sentence about anything\"] = s1\n", |
||||
" else:\n", |
||||
" shots[\"A medium-long random sentence about anything\"] = \"Paul, waking up out of his half-drunken haze, clearly couldn't tell left from right and ran right into the door.\"\n", |
||||
"\n", |
||||
" if s2 != None:\n", |
||||
" if i2 != None:\n", |
||||
" shots[i2] = s2\n", |
||||
" else:\n", |
||||
" shots[\"A medium-long random sentence about anything\"] = s2\n", |
||||
"\n", |
||||
" if s3 != None:\n", |
||||
" if i3 != None:\n", |
||||
" shots[i3] = s3\n", |
||||
" else:\n", |
||||
" shots[\"A medium-long random sentence about anything\"] = s3\n", |
||||
"\n", |
||||
" sentences = dataset_generator(model, nature, shots, amount, language)\n", |
||||
" data = sentences\n", |
||||
"\n", |
||||
" return sentences\n", |
||||
"\n", |
||||
" def saveData(path):\n", |
||||
" global data\n", |
||||
" drive.mount(\"/content/drive\")\n", |
||||
"\n", |
||||
" dir_path = os.path.dirname(\"/content/drive/MyDrive/\" + path)\n", |
||||
"\n", |
||||
" if not os.path.exists(dir_path):\n", |
||||
" os.makedirs(dir_path)\n", |
||||
"\n", |
||||
" with open(\"/content/drive/MyDrive/\" + path, \"w\", encoding=\"utf-8\") as f:\n", |
||||
" f.write(data)\n", |
||||
"\n", |
||||
" generate.click(generateSentences, inputs=[typeInput, sentence_1, instruction_1, sentence_2, instruction_2, sentence_3, instruction_3, volume, language_choice, model_choice], outputs=liveSentences)\n", |
||||
" clear.click(\n", |
||||
" lambda: [\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value='<div style=\"color: #999; padding: 10px;\">Your sentences will be displayed here …</div>'),\n", |
||||
" gr.update(value=\"\"),\n", |
||||
" gr.update(value=\"Save generated data\", elem_classes=\"blue-button\")],\n", |
||||
" None,\n", |
||||
" [volume, typeInput, sentence_1, instruction_1, sentence_2, instruction_2,\n", |
||||
" sentence_3, instruction_3, liveSentences, outputPath, save],\n", |
||||
" queue=False\n", |
||||
" )\n", |
||||
" save.click(saveData, inputs=outputPath, outputs=None).then(lambda: gr.update(value=\"Your data has been saved\", elem_classes=\"green-button\"), [], [save])\n", |
||||
"\n", |
||||
"view.launch(share=True) #, debug=True)" |
||||
], |
||||
"metadata": { |
||||
"id": "VRKdu0fEt8mg" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
} |
||||
] |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,433 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import glob\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import gradio as gr\n", |
||||
"# import gemini\n", |
||||
"import google.generativeai" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 18, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports for langchain\n", |
||||
"\n", |
||||
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
||||
"from langchain.text_splitter import CharacterTextSplitter\n", |
||||
"from langchain.schema import Document\n", |
||||
"# from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
||||
"from langchain_chroma import Chroma\n", |
||||
"from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI\n", |
||||
"import numpy as np\n", |
||||
"from sklearn.manifold import TSNE\n", |
||||
"import plotly.graph_objects as go\n", |
||||
"from langchain.memory import ConversationBufferMemory\n", |
||||
"from langchain.chains import ConversationalRetrievalChain" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# price is a factor for our company, so we're going to use a low cost model\n", |
||||
"\n", |
||||
"MODEL = \"gemini-1.5-flash\"\n", |
||||
"db_name = \"vector_db\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"google.generativeai.configure()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Read in documents using LangChain's loaders\n", |
||||
"# Take everything in all the sub-folders of our knowledgebase\n", |
||||
"\n", |
||||
"folders = glob.glob(\"knowledge-base/*\")\n", |
||||
"\n", |
||||
"# With thanks to CG and Jon R, students on the course, for this fix needed for some users \n", |
||||
"text_loader_kwargs = {'encoding': 'utf-8'}\n", |
||||
"# If that doesn't work, some Windows users might need to uncomment the next line instead\n", |
||||
"# text_loader_kwargs={'autodetect_encoding': True}\n", |
||||
"\n", |
||||
"documents = []\n", |
||||
"for folder in folders:\n", |
||||
" doc_type = os.path.basename(folder)\n", |
||||
" loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", |
||||
" folder_docs = loader.load()\n", |
||||
" for doc in folder_docs:\n", |
||||
" doc.metadata[\"doc_type\"] = doc_type\n", |
||||
" documents.append(doc)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stderr", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Created a chunk of size 1088, which is longer than the specified 1000\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", |
||||
"chunks = text_splitter.split_documents(documents)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"123" |
||||
] |
||||
}, |
||||
"execution_count": 9, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"len(chunks)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Document types found: company, contracts, employees, products\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"doc_types = set(chunk.metadata['doc_type'] for chunk in chunks)\n", |
||||
"print(f\"Document types found: {', '.join(doc_types)}\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 11, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Vectorstore created with 123 documents\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"embeddings = GoogleGenerativeAIEmbeddings(model=\"models/embedding-001\")\n", |
||||
"\n", |
||||
"# Check if a Chroma Datastore already exists - if so, delete the collection to start from scratch\n", |
||||
"\n", |
||||
"if os.path.exists(db_name):\n", |
||||
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", |
||||
"\n", |
||||
"# Create our Chroma vectorstore!\n", |
||||
"\n", |
||||
"vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", |
||||
"print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 12, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"The vectors have 768 dimensions\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Get one vector and find how many dimensions it has\n", |
||||
"\n", |
||||
"collection = vectorstore._collection\n", |
||||
"sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n", |
||||
"dimensions = len(sample_embedding)\n", |
||||
"print(f\"The vectors have {dimensions:,} dimensions\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Prework\n", |
||||
"\n", |
||||
"result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n", |
||||
"vectors = np.array(result['embeddings'])\n", |
||||
"documents = result['documents']\n", |
||||
"doc_types = [metadata['doc_type'] for metadata in result['metadatas']]\n", |
||||
"colors = [['blue', 'green', 'red', 'orange'][['products', 'employees', 'contracts', 'company'].index(t)] for t in doc_types]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# We humans find it easier to visalize things in 2D!\n", |
||||
"# Reduce the dimensionality of the vectors to 2D using t-SNE\n", |
||||
"# (t-distributed stochastic neighbor embedding)\n", |
||||
"\n", |
||||
"tsne = TSNE(n_components=2, random_state=42)\n", |
||||
"reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
"# Create the 2D scatter plot\n", |
||||
"fig = go.Figure(data=[go.Scatter(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
||||
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n", |
||||
" hoverinfo='text'\n", |
||||
")])\n", |
||||
"\n", |
||||
"fig.update_layout(\n", |
||||
" title='2D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
||||
" width=800,\n", |
||||
" height=600,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
")\n", |
||||
"\n", |
||||
"fig.show()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's try 3D!\n", |
||||
"\n", |
||||
"tsne = TSNE(n_components=3, random_state=42)\n", |
||||
"reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
"# Create the 3D scatter plot\n", |
||||
"fig = go.Figure(data=[go.Scatter3d(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" z=reduced_vectors[:, 2],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
||||
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n", |
||||
" hoverinfo='text'\n", |
||||
")])\n", |
||||
"\n", |
||||
"fig.update_layout(\n", |
||||
" title='3D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", |
||||
" width=900,\n", |
||||
" height=700,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
")\n", |
||||
"\n", |
||||
"fig.show()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"RAG pipeline using langchain" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 19, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stderr", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"C:\\Users\\GANESH\\AppData\\Local\\Temp\\ipykernel_524\\4130109764.py:5: LangChainDeprecationWarning:\n", |
||||
"\n", |
||||
"Please see the migration guide at: https://python.langchain.com/docs/versions/migrating_memory/\n", |
||||
"\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# create a new Chat with ChatGoogleGenerativeAI\n", |
||||
"llm = ChatGoogleGenerativeAI(model=MODEL, temperature=0.7)\n", |
||||
"\n", |
||||
"# set up the conversation memory for the chat\n", |
||||
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
||||
"\n", |
||||
"# the retriever is an abstraction over the VectorStore that will be used during RAG\n", |
||||
"retriever = vectorstore.as_retriever()\n", |
||||
"\n", |
||||
"# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n", |
||||
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 20, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Insurellm is an insurance technology company with 200 employees and over 300 clients worldwide. They offer four software products, including Homellm, a portal for home insurance companies that integrates with existing platforms and offers a customer portal for policy management. Their pricing model is based on provider size and customization needs.\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"query = \"Can you describe Insurellm in a few sentences\"\n", |
||||
"result = conversation_chain.invoke({\"question\":query})\n", |
||||
"print(result[\"answer\"])" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 21, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# set up a new conversation memory for the chat\n", |
||||
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
||||
"\n", |
||||
"# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n", |
||||
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Gradio User Interface" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 22, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message, history):\n", |
||||
" result = conversation_chain.invoke({\"question\": message})\n", |
||||
" return result[\"answer\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 23, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7860\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 2 |
||||
} |
@ -0,0 +1,405 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "dfe37963-1af6-44fc-a841-8e462443f5e6", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## This notebook compares the embeddings generated by OpenAIEmbeddings.\n", |
||||
"\n", |
||||
"It shows that OpenAIEmbeddings embeddings can differ slightly (typically at 4 the decimal place).\n", |
||||
"\n", |
||||
"### Results from OpenAIEmbeddings:\n", |
||||
"encodings are NOT identical on each run.\n", |
||||
"\n", |
||||
"### Repeating with sentence-transformers/all-MiniLM-L6-v2:\n", |
||||
"encodings ARE identical on each run.\n", |
||||
"\n", |
||||
"Tests verify simple numerical comparisons.\n", |
||||
"\n", |
||||
"### Advanced Comparison\n", |
||||
"A more advanced euclidean and cosine comparison is also included.\n", |
||||
"\n", |
||||
"## NOTES: Tests run on local Jupiter Notebook| Anaconda setup for the course." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ba2779af-84ef-4227-9e9e-6eaf0df87e77", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import glob\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "802137aa-8a74-45e0-a487-d1974927d7ca", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports for langchain\n", |
||||
"\n", |
||||
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
||||
"from langchain.text_splitter import CharacterTextSplitter\n", |
||||
"from langchain.schema import Document\n", |
||||
"from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
||||
"from langchain_chroma import Chroma\n", |
||||
"import numpy as np\n", |
||||
"from sklearn.manifold import TSNE\n", |
||||
"import plotly.graph_objects as go\n", |
||||
"from langchain.memory import ConversationBufferMemory\n", |
||||
"from langchain.chains import ConversationalRetrievalChain" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "58c85082-e417-4708-9efe-81a5d55d1424", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# price is a factor for our company, so we're going to use a low cost model\n", |
||||
"\n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"db_name = \"vector_db\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ee78efcb-60fe-449e-a944-40bab26261af", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "730711a9-6ffe-4eee-8f48-d6cfb7314905", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Read in documents using LangChain's loaders\n", |
||||
"# Take everything in all the sub-folders of our knowledgebase\n", |
||||
"\n", |
||||
"folders = glob.glob(\"knowledge-base/*\")\n", |
||||
"\n", |
||||
"# With thanks to CG and Jon R, students on the course, for this fix needed for some users \n", |
||||
"text_loader_kwargs = {'encoding': 'utf-8'}\n", |
||||
"# If that doesn't work, some Windows users might need to uncomment the next line instead\n", |
||||
"# text_loader_kwargs={'autodetect_encoding': True}\n", |
||||
"\n", |
||||
"documents = []\n", |
||||
"for folder in folders:\n", |
||||
" doc_type = os.path.basename(folder)\n", |
||||
" loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", |
||||
" folder_docs = loader.load()\n", |
||||
" for doc in folder_docs:\n", |
||||
" doc.metadata[\"doc_type\"] = doc_type\n", |
||||
" documents.append(doc)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7310c9c8-03c1-4efc-a104-5e89aec6db1a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", |
||||
"chunks = text_splitter.split_documents(documents)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cd06e02f-6d9b-44cc-a43d-e1faa8acc7bb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"len(chunks)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2c54b4b6-06da-463d-bee7-4dd456c2b887", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"doc_types = set(chunk.metadata['doc_type'] for chunk in chunks)\n", |
||||
"print(f\"Document types found: {', '.join(doc_types)}\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a8b5ef27-70c2-4111-bce7-854bc1ebd02a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Use a where filter to specify the metadata condition\n", |
||||
"# Get the 3 company vectors (corresponds to our 3 yellow dots)\n", |
||||
"\n", |
||||
"def get_company_vectors(collection):\n", |
||||
" company_vectors = collection.get(\n", |
||||
" where={\"doc_type\": \"company\"}, # Filter for documents where source = \"XXXX\"\n", |
||||
" limit=10,\n", |
||||
" include=[\"embeddings\", \"metadatas\", \"documents\"]\n", |
||||
" )\n", |
||||
" print(f\"Found {len(company_vectors)} company vectors\")\n", |
||||
" return company_vectors\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d688b873-b52b-4d80-9df2-f70b389f5dc7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"\n", |
||||
"def print_vectors_summary(vectors):\n", |
||||
" for i in range(len(vectors[\"documents\"])):\n", |
||||
" print(f\"\\n--- Chunk {i+1} ---\")\n", |
||||
" \n", |
||||
" # Print document content (first 100 chars)\n", |
||||
" print(f\"Content: {vectors['documents'][i][:100]}...\")\n", |
||||
" \n", |
||||
" # Print metadata\n", |
||||
" print(f\"Metadata: {vectors['metadatas'][i]}\")\n", |
||||
" \n", |
||||
" # Print embedding info (not the full vector as it would be too long)\n", |
||||
" embedding = vectors[\"embeddings\"][i]\n", |
||||
" print(f\"Embedding: Vector of length {len(embedding)}, first 5 values: {embedding[:5]}\")\n", |
||||
"\n", |
||||
"\n", |
||||
"def get_dimensions_for_vectors(vectors):\n", |
||||
" dimensions = []\n", |
||||
"\n", |
||||
" for i in range(len(vectors[\"documents\"])):\n", |
||||
" embedding = vectors[\"embeddings\"][i]\n", |
||||
" dimensions.append(embedding)\n", |
||||
"\n", |
||||
" return dimensions\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0b195184-4920-404a-9bfa-0231f1dbe276", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Quick check if any single value is different\n", |
||||
"def quick_diff_check(emb1, emb2):\n", |
||||
" result = \"Embeddings are identical\"\n", |
||||
" print(\"\\n\\nComparing two embeddings:\\n\\n\")\n", |
||||
" print(emb1)\n", |
||||
" print(emb2)\n", |
||||
" for i, (v1, v2) in enumerate(zip(emb1, emb2)):\n", |
||||
" if v1 != v2:\n", |
||||
" result = f\"Different at dimension {i}: {v1} vs {v2}\"\n", |
||||
" break\n", |
||||
" print(result)\n", |
||||
" return result\n", |
||||
"\n", |
||||
"#quick_diff_check(dimensions[0], dimensions[1])" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "06ba838d-d179-4e2d-b208-dd9cc1fd0097", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"\n", |
||||
"embeddings = OpenAIEmbeddings()\n", |
||||
"\n", |
||||
"def create_vectorstores(embeddings):\n", |
||||
"\n", |
||||
" if os.path.exists(\"vectorstore1\"):\n", |
||||
" Chroma(persist_directory=\"vectorstore1\", embedding_function=embeddings).delete_collection()\n", |
||||
" if os.path.exists(\"vectorstore2\"):\n", |
||||
" Chroma(persist_directory=\"vectorstore2\", embedding_function=embeddings).delete_collection()\n", |
||||
" \n", |
||||
" \n", |
||||
" # Create vectorstore 1\n", |
||||
" vectorstore1 = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=\"vectorstore1\")\n", |
||||
" print(f\"Vectorstore 1 created with {vectorstore1._collection.count()} documents\")\n", |
||||
" \n", |
||||
" # Create vectorstore 2\n", |
||||
" vectorstore2 = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=\"vectorstore2\")\n", |
||||
" print(f\"Vectorstore 2 created with {vectorstore2._collection.count()} documents\")\n", |
||||
"\n", |
||||
" return vectorstore1, vectorstore2\n", |
||||
"\n", |
||||
"vectorstore1, vectorstore2 = create_vectorstores(embeddings)\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e24242eb-613a-4edb-a081-6b8937f106a7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"## Uncomment this and rerun cells below, \n", |
||||
"## to see that HuggingFaceEmbeddings is idential\n", |
||||
"\n", |
||||
"#from langchain.embeddings import HuggingFaceEmbeddings\n", |
||||
"#embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", |
||||
"#vectorstore1, vectorstore2 = create_vectorstores(embeddings)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "000b9e70-2958-40db-bbed-56a00e4249ce", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get the 3 company doc_type vectors\n", |
||||
"collection1 = vectorstore1._collection\n", |
||||
"collection2 = vectorstore2._collection\n", |
||||
"\n", |
||||
"company_vectors1=get_company_vectors(collection1)\n", |
||||
"company_vectors2=get_company_vectors(collection2)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "63cd63e4-9d3e-405a-8ef9-dac16fe2570e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Lets print out summary info just to see we have the same chunks.\n", |
||||
"\n", |
||||
"def print_summary_info (vectors):\n", |
||||
" print(\"VECTORS SUMMARY\\n\")\n", |
||||
" print_vectors_summary(vectors)\n", |
||||
"\n", |
||||
"\n", |
||||
"print(\"\\n\\n\\n========= VECTORS 1 =========\\n\\n\")\n", |
||||
"print_summary_info(company_vectors1)\n", |
||||
"\n", |
||||
"print(\"\\n\\n\\n========= VECTORS 2 =========\\n\\n\")\n", |
||||
"print_summary_info(company_vectors2)\n", |
||||
"\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bc085a35-f0ec-4ddb-955c-244cb2d3eb2a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"dimensions1 = get_dimensions_for_vectors(company_vectors1)\n", |
||||
"dimensions2 = get_dimensions_for_vectors(company_vectors2)\n", |
||||
"\n", |
||||
"result1 = quick_diff_check(dimensions1[0], dimensions2[0]) \n", |
||||
"result2 = quick_diff_check(dimensions1[1], dimensions2[1]) \n", |
||||
"result3 = quick_diff_check(dimensions1[2], dimensions2[2]) \n", |
||||
"\n", |
||||
"print(\"\\n\\nSUMMARY RESULTS:\")\n", |
||||
"print(\"================\\n\\n\")\n", |
||||
"print(result1) \n", |
||||
"print(result2)\n", |
||||
"print(result3)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "164cf94d-9d63-4bae-91f9-4b02da1537ae", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"## ADVANCED COMPARISONS:\n", |
||||
"# More advanced comparisons (from Claude 3.7 Sonnet):\n", |
||||
"\n", |
||||
"\n", |
||||
"## !IMPORTANT *** Uncomment final line to execute ***\n", |
||||
"\n", |
||||
"\n", |
||||
"import numpy as np\n", |
||||
"from scipy.spatial.distance import cosine\n", |
||||
"\n", |
||||
"# Method 1: Euclidean distance (L2 norm)\n", |
||||
"def compare_embeddings_euclidean(emb1, emb2):\n", |
||||
" emb1_array = np.array(emb1)\n", |
||||
" emb2_array = np.array(emb2)\n", |
||||
" distance = np.linalg.norm(emb1_array - emb2_array)\n", |
||||
" return {\n", |
||||
" \"different\": distance > 0,\n", |
||||
" \"distance\": distance,\n", |
||||
" \"similarity\": 1/(1+distance) # Converts distance to similarity score\n", |
||||
" }\n", |
||||
"\n", |
||||
"# Method 2: Cosine similarity (common for embeddings)\n", |
||||
"def compare_embeddings_cosine(emb1, emb2):\n", |
||||
" emb1_array = np.array(emb1)\n", |
||||
" emb2_array = np.array(emb2)\n", |
||||
" similarity = 1 - cosine(emb1_array, emb2_array) # Cosine returns distance, so subtract from 1\n", |
||||
" return {\n", |
||||
" \"different\": similarity < 0.9999, # Almost identical if > 0.9999\n", |
||||
" \"similarity\": similarity\n", |
||||
" }\n", |
||||
"\n", |
||||
"# Method 3: Simple exact equality check\n", |
||||
"def are_embeddings_identical(emb1, emb2):\n", |
||||
" return np.array_equal(np.array(emb1), np.array(emb2))\n", |
||||
"\n", |
||||
"\n", |
||||
"def run_advanced_comparisons():\n", |
||||
" for i in range(0, 3):\n", |
||||
" print(f\"\\n\\nComparing vector dimensions for dimension[{i}]....\\n\")\n", |
||||
" print(\"Exactly identical? ---> \", are_embeddings_identical(dimensions1[i], dimensions2[i]))\n", |
||||
" print(\"Cosine comparison: ---> \", compare_embeddings_cosine(dimensions1[i], dimensions2[i]))\n", |
||||
" print(\"Euclidean comparison: ---> \", compare_embeddings_euclidean(dimensions1[i], dimensions2[i]))\n", |
||||
"\n", |
||||
"\n", |
||||
"#run_advanced_comparisons()" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
Loading…
Reference in new issue