22 changed files with 5063 additions and 0 deletions
@ -0,0 +1,432 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 52, |
||||
"id": "b56a950c-db41-4575-bef9-0fa651dea363", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"import ollama\n", |
||||
"from typing import List\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display,clear_output\n", |
||||
"\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0ec875db-0f6a-4eec-a3b6-eae4b71a4b89", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Constants\n", |
||||
"\n", |
||||
"MODEL = \"llama3.2\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "227cd07c-98a4-463b-94ad-94e33d04944b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4d5c5e40-c010-4102-8359-899f988185fb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ed = Website(\"https://edwarddonner.com\")\n", |
||||
"ed.links" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5f0b5d71-487c-47a5-ace6-8e02465ed452", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||
"link_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||
"link_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c6550325-5160-42c9-b7e7-980b504cd096", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(link_system_prompt)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2db4ccc6-5c35-4775-a5b2-4b86e4c73808", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_user_prompt(website):\n", |
||||
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||
"Do not include Terms of Service, Privacy, email links.\\n\"\n", |
||||
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8af511c7-5a74-4d1a-b763-b31370e70cff", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_links_user_prompt(ed))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a3b7fb61-ca15-4eab-b017-b0fe5cce46fd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
" ], format = \"json\" #Define format as json!\n", |
||||
" )\n", |
||||
" result = response['message']['content']\n", |
||||
"\n", |
||||
" return json.loads(result)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7816d393-620d-4c53-913e-4ec130b2baba", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", |
||||
"\n", |
||||
"anthropic = Website(\"https://anthropic.com\")\n", |
||||
"anthropic.links" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f32ceccb-1d45-41a3-a5c1-fb2e6cd76afe", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_links(\"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a7ec4727-e897-473c-a657-e74f6999c974", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_all_details(url):\n", |
||||
" result = \"Landing page:\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = get_links(url)\n", |
||||
" print(\"Found links:\", links)\n", |
||||
" for link in links[\"links\"]:\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link[\"url\"]).get_contents()\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7acde0c5-1af2-4e8e-9303-e2a98ec9cdbb", |
||||
"metadata": { |
||||
"scrolled": true |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_all_details(\"https://anthropic.com\"))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5a2e2b1d-eb55-4bfb-bf55-5e8c87db0d96", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"Include details of company culture, customers and careers/jobs if you have the information.\"\n", |
||||
"\n", |
||||
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n", |
||||
"\n", |
||||
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"# Include details of company culture, customers and careers/jobs if you have the information.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8eac1719-7f94-4460-bc4a-0c9c93bb17a5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e2e312f6-01c5-4e57-9134-fb4aa447d155", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_brochure_user_prompt(\"Anthropic\", \"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8b05cbab-f0d2-4a9e-8b8c-c868a036e9cd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_brochure(company_name, url):\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ]\n", |
||||
" )\n", |
||||
" result = response[\"message\"][\"content\"]\n", |
||||
" display(Markdown(result))\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "91ede0c0-daf2-42ef-9d31-749afb9d5352", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"create_brochure(\"Anthropic\", \"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "afb4aeee-5108-42a7-a1c1-5bad254b7e8b", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Final omprovement\n", |
||||
"\n", |
||||
"getting a typewriter animation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 50, |
||||
"id": "177de611-1cb1-49e2-b7ea-8d01191af3ee", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_brochure(company_name, url):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ]\n", |
||||
"\n", |
||||
" display_markdown = display(Markdown(\"\"), display_id=True) # Initialize Markdown display\n", |
||||
" response_text = \"\"\n", |
||||
"\n", |
||||
" for chunk in ollama.chat(model=MODEL, messages=messages, stream=True): # Ensure stream=True (not a string)\n", |
||||
" response_text += chunk['message']['content']\n", |
||||
" clear_output(wait=True) # Clear previous output to create a streaming effect\n", |
||||
" display_markdown.update(Markdown(response_text)) # Update Markdown dynamically\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 53, |
||||
"id": "a1971d81-fc7f-4ed1-97a0-7ef5e8ed332a", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Found links: {'links': [{'type': 'About page', 'url': 'https://www.anthropic.com/company'}, {'type': 'Careers page', 'url': 'https://www.anthropic.com/careers'}, {'type': 'Company page', 'url': 'https://www.anthropic.com/'}, {'type': 'Research page', 'url': 'https://www.anthropic.com/research'}, {'type': 'Twitter profile', 'url': 'https://twitter.com/AnthropicAI'}, {'type': 'LinkedIn company page', 'url': 'https://www.linkedin.com/company/anthropicresearch'}, {'type': 'YouTube channel', 'url': 'https://www.youtube.com/@anthropic-ai'}]}\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"**Anthropic Brochure**\n", |
||||
"======================\n", |
||||
"\n", |
||||
"**Mission Statement**\n", |
||||
"-------------------\n", |
||||
"\n", |
||||
"Anthropic is an AI safety and research company dedicated to building reliable, interpretable, and steerable AI systems that benefit humanity in the long run.\n", |
||||
"\n", |
||||
"**Company Overview**\n", |
||||
"--------------------\n", |
||||
"\n", |
||||
"Anthropic is headquartered in San Francisco and brings together a diverse team of researchers, engineers, policy experts, and business leaders with experience spanning various disciplines. Our mission is to conduct frontier AI research, develop and apply safety techniques, and deploy the resulting systems via partnerships and products.\n", |
||||
"\n", |
||||
"**Research Focus**\n", |
||||
"-----------------\n", |
||||
"\n", |
||||
"Anthropic conducts cutting-edge AI research across various modalities, exploring novel and emerging safety research areas such as interpretability, RL from human feedback, policy, and societal impacts analysis. Our research aims to advance the field of AI safety and inform our product development.\n", |
||||
"\n", |
||||
"**Product Portfolio**\n", |
||||
"---------------------\n", |
||||
"\n", |
||||
"Our flagship product is Claude, a highly intelligent AI model that enables customers to build custom applications and experiences using our API. We also offer various enterprise solutions, including Claude for Enterprise, designed to meet the needs of large organizations.\n", |
||||
"\n", |
||||
"**Customer Base**\n", |
||||
"-----------------\n", |
||||
"\n", |
||||
"Anthropic serves a diverse range of customers, including businesses, nonprofits, civil society groups, and their clients around the globe. Our commitment to safety and reliability has earned us a reputation as a trusted partner in the AI industry.\n", |
||||
"\n", |
||||
"**Values and Culture**\n", |
||||
"----------------------\n", |
||||
"\n", |
||||
"At Anthropic, we value:\n", |
||||
"\n", |
||||
"* **Acting for the global good**: We strive to make decisions that maximize positive outcomes for humanity in the long run.\n", |
||||
"* **Holding light and shade**: We acknowledge the potential risks of AI and approach our work with caution and transparency.\n", |
||||
"\n", |
||||
"**Join Our Team**\n", |
||||
"-----------------\n", |
||||
"\n", |
||||
"We're a collaborative team of researchers, engineers, policy experts, and business leaders passionate about building safer AI systems. Join us to be part of this exciting journey and contribute your skills and expertise to shaping the future of AI.\n", |
||||
"\n", |
||||
"**Careers**\n", |
||||
"------------\n", |
||||
"\n", |
||||
"Check our website for open roles and learn more about our company culture, benefits, and career opportunities.\n", |
||||
"\n", |
||||
"[Learn More](link)\n", |
||||
"\n", |
||||
"**Get in Touch**\n", |
||||
"-----------------\n", |
||||
"\n", |
||||
"Stay up-to-date with the latest news and announcements from Anthropic. Follow us on Twitter, LinkedIn, or YouTube to join the conversation and stay informed.\n", |
||||
"\n", |
||||
"[Twitter](link)\n", |
||||
"[LinkedIn](link)\n", |
||||
"[YouTube](link)" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"create_brochure(\"Anthropic\", \"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c33277a4-84f1-447c-a66e-eb7e2af42d2a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.13.2" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,138 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# End of week 1 exercise Solution Ollama with streaming\n", |
||||
"\n", |
||||
"A tool that takes a technical question, and responds with an explanation." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Imports\n", |
||||
"\n", |
||||
"import ollama\n", |
||||
"import requests\n", |
||||
"from IPython.display import Markdown, display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Constants\n", |
||||
"\n", |
||||
"MODEL_LLAMA = 'llama3.2'\n", |
||||
"MODEL_LLAMA1b = \"llama3.2:1b\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Environment\n", |
||||
"\n", |
||||
"system_prompt = \"\"\"\n", |
||||
"You are an assistant that takes a technical question and respond with an explanation.\n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"question = \"\"\"\n", |
||||
"Please explain what this code does and why:\n", |
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"question2 = \"\"\"\n", |
||||
"What is the purpose of using yield from in the following code, and how does it differ from a standard for loop with yield?\n", |
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"user_prompt = \"Answer these two questions in detail please, Question1:\" + question + \"Question2:\" + question2\n", |
||||
"\n", |
||||
"def message():\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Llama 3.2 answer, with streaming\n", |
||||
"\n", |
||||
"def llama():\n", |
||||
" response = ollama.chat(\n", |
||||
" model = MODEL_LLAMA,\n", |
||||
" messages = message(),\n", |
||||
" stream =True\n", |
||||
" )\n", |
||||
" full_response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in response:\n", |
||||
" content = chunk.get(\"message\", {}).get(\"content\", \"\")\n", |
||||
" if content:\n", |
||||
" full_response += content\n", |
||||
" display_handle.update(Markdown(full_response))\n", |
||||
"llama()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "342a470c-9aab-4051-ad21-514dceec76eb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Llama 3.2:1b answer\n", |
||||
"\n", |
||||
"def llama():\n", |
||||
" response = ollama.chat(\n", |
||||
" model = MODEL_LLAMA1b,\n", |
||||
" messages = message()\n", |
||||
" )\n", |
||||
" return display(Markdown(response['message']['content']))\n", |
||||
"\n", |
||||
"llama()" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.10.7" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,131 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "f3c6d883-58a2-47de-823f-3c7430cffcc9", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"\"Airbrush or Air Bust? Let's Find Out!\"\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"\n", |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# Step 1: Create your prompts\n", |
||||
"\n", |
||||
"system_prompt = \"You will take the body of an email and evaluate it to suggest a brief snarky subject\"\n", |
||||
"user_prompt = \"\"\"\n", |
||||
"Dear Air Brush Customer Service Team,\n", |
||||
"\n", |
||||
"I hope this message finds you well. I am writing to formally lodge a complaint regarding the airbrush product I purchased from your store. Unfortunately, the product I received is defective and does not meet the quality standards as advertised.\n", |
||||
"\n", |
||||
"Below are the details of my issue:\n", |
||||
"\n", |
||||
"Order Number: #12345\n", |
||||
"\n", |
||||
"Product Name: Air Brush model 123\n", |
||||
"\n", |
||||
"Date of Purchase: 18/1/2025\n", |
||||
"\n", |
||||
"Issue Description:\n", |
||||
"Defective Nozzle: The nozzle of the airbrush is clogged and does not allow proper airflow, making it impossible to use.\n", |
||||
"\n", |
||||
"Inconsistent Spray Pattern: Even after multiple attempts to clean and adjust the settings, the spray pattern is uneven and inconsistent.\n", |
||||
"\n", |
||||
"Leakage: The airbrush leaks air and paint from the joints, which is a significant safety hazard.\n", |
||||
"\n", |
||||
"Build Quality: The overall build quality of the product feels subpar, with loose fittings and a flimsy trigger mechanism.\n", |
||||
"\n", |
||||
"Steps Taken:\n", |
||||
"I followed the user manual and cleaning instructions provided, but the issues persist.\n", |
||||
"\n", |
||||
"I also reached out to your technical support team on [Date] but have not received a resolution.\n", |
||||
"\n", |
||||
"Expectation:\n", |
||||
"Given the defective nature of the product, I would like to request a full refund for the item. Alternatively, if a refund is not possible, I would appreciate a replacement with a fully functional unit.\n", |
||||
"\n", |
||||
"Attachments:\n", |
||||
"I have attached photos and a video demonstrating the issues for your reference.\n", |
||||
"\n", |
||||
"Copies of the invoice and order confirmation are also attached for your convenience.\n", |
||||
"\n", |
||||
"Request for Resolution:\n", |
||||
"Kindly let me know the next steps to process the refund or replacement. I would appreciate a prompt response within [X business days, e.g., 3-5 business days] to resolve this matter.\n", |
||||
"\n", |
||||
"Thank you for your attention to this issue. I trust that you will handle this matter professionally and ensure customer satisfaction.\n", |
||||
"\n", |
||||
"Looking forward to your swift response.\n", |
||||
"\n", |
||||
"Best regards,\n", |
||||
"Oya YILDIZ\n", |
||||
"İstanbul\n", |
||||
"Turkey\n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"# Step 2: Make the messages list\n", |
||||
"\n", |
||||
"messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
"] # fill this in\n", |
||||
"\n", |
||||
"# Step 3: Call OpenAI\n", |
||||
"\n", |
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", |
||||
"\n", |
||||
"# Step 4: print the result\n", |
||||
"\n", |
||||
"print(response.choices[0].message.content)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d9b655de-e8c3-4136-b6a6-2fb0ce01c364", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,223 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "bfa3abd0-4e66-4117-96f9-7a71fbb6d0cb", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Powerpoint Slides Summarizer\n", |
||||
"\n", |
||||
"This converts a Power Point presentation into notes that a student can easily skim through.\n", |
||||
"\n", |
||||
"Concepts Used:\n", |
||||
"- Converting Contents of PPT to text via python-pptx\n", |
||||
"- User and System Prompts\n", |
||||
"- Use of Open AI GPT-4o-mini via API key\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ab95eb49-6a2d-4c7d-9057-78a2cd9364cc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install python-pptx" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "62715f16-7125-455e-98e7-5705871c0e4a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ff42eab7-789d-44f8-a5cc-64baeebf3224", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bce425c2-6d19-4c03-93ce-8930dabc61ee", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# creating an instance\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c0c75e30-3b38-4a89-b7d3-a41a6f5dc650", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"from pptx import Presentation\n", |
||||
"\n", |
||||
"class PowerPoint():\n", |
||||
" def __init__(self,ppt):\n", |
||||
" \"\"\"\n", |
||||
" Creates a PowerPoint object, with name and text.\n", |
||||
" \"\"\"\n", |
||||
" self.ppt = ppt\n", |
||||
" self.title = os.path.basename(ppt)\n", |
||||
" self.text = self.extract_text()\n", |
||||
"\n", |
||||
" def extract_text(self):\n", |
||||
" \"\"\"\n", |
||||
" Extracts text from powerpoint.\n", |
||||
" \"\"\"\n", |
||||
" prs = Presentation(self.ppt)\n", |
||||
" text_content = []\n", |
||||
" \n", |
||||
" for slide in prs.slides:\n", |
||||
" for shape in slide.shapes:\n", |
||||
" if hasattr(shape, \"text\"):\n", |
||||
" text_content.append(shape.text)\n", |
||||
" \n", |
||||
" return \"\\n\".join(text_content)\n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1963a055-87f4-4e47-8456-cac4d4ac57fc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are an assistant that analyzes the contents \\\n", |
||||
"of a PowerPoint presentation, and provides a summary in the style of \\\n", |
||||
"a cheat-sheet, for students to easily learn key concepts from.\\\n", |
||||
"You are to ignore text that might be navigation-related\\\n", |
||||
"and respond in Markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ca600e90-7d3f-4fc7-a698-1b8f2925f81e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function that writes a User Prompt that asks for summaries of PowerPoints:\n", |
||||
"\n", |
||||
"def user_prompt_for(powerpoint):\n", |
||||
" user_prompt = f\"You are looking at a website titled {powerpoint.title}\"\n", |
||||
" user_prompt += \"\\nThe contents of this powerpoint are as follows; \\\n", |
||||
"please provide a summary of the content in markdown. \\\n", |
||||
"If it includes a question bank, add that along with short answers too.\\n\\n\"\n", |
||||
" user_prompt += powerpoint.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4fe19c56-9940-4528-b43a-c86798b215d2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def messages_for(powerpoint):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(powerpoint)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f7704da5-90b0-40af-bbb4-7d589309f180", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And now: call the OpenAI API. \n", |
||||
"\n", |
||||
"def summarize(powerpoint_path):\n", |
||||
" powerpoint = PowerPoint(powerpoint_path)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages_for(powerpoint)\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "49d1d0cf-fa4b-4bea-bd68-a834145070ef", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def display_summary(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "348078d1-e86f-4eb3-909d-33ab4ede984e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ppt_file = \"Theoretical Perspectives on Media and Technology.pptx\" \n", |
||||
"display_summary(ppt_file)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,230 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "56c86bae-1d3c-4c01-b5d6-c8879fec1954", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Wiki Summarizer\n", |
||||
"\n", |
||||
"This Project takes the name of a topic as input, and checks if the corresponding wiki-page exists. If it does, it parses the web page, and outputs a summary created using the GPT-4o-mini model. \n", |
||||
"\n", |
||||
"Concepts used: \n", |
||||
"- Web Scraping via Beautiful Soup\n", |
||||
"- User and System Prompts\n", |
||||
"- Use of Open AI GPT-4o-mini via API key" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4820830e-b3b4-426e-b1a2-518e7c7f6c1a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2cd7ad51-396c-45c5-9089-f7b21a19da50", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "689421a0-20a1-428b-a8b8-fa239fa6f633", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# creating an instance\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "401901ae-7639-4190-98fd-e69374084723", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def isWiki(url):\n", |
||||
" \"\"\"\n", |
||||
" Check whether a Wikipedia page exists for a given topic, and \n", |
||||
" returns a Boolean value.\n", |
||||
" \"\"\"\n", |
||||
" response = requests.get(url)\n", |
||||
"\n", |
||||
" if response.status_code != 200:\n", |
||||
" return False\n", |
||||
" \n", |
||||
" return True" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7cdb14d3-05ea-4de2-a475-d49a5731692e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" \"\"\"\n", |
||||
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||
" \"\"\"\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7f6ed50e-0fb5-479e-9845-f62cf25980f7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are an educational assistant tasked with helping users understand topics\\\n", |
||||
"by providing succinct and clear summaries of requested data. Ignore navigation-related text\\\n", |
||||
"and provide answers in markdown format\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b2d77dd9-a94f-49c1-a1be-11d157bd37fb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function that writes a User Prompt that asks for summaries of wiki pages:\n", |
||||
"\n", |
||||
"def user_prompt_for(wiki):\n", |
||||
" user_prompt = f\"You are looking at a Wikipedia page titled {wiki.title}\"\n", |
||||
" user_prompt += \"\\nThe contents of this page is as follows; \\\n", |
||||
"please provide a short summary of this website in markdown.\\n\"\n", |
||||
" user_prompt += wiki.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0d23bcc4-1d89-4bd4-9809-d3a1819aa919", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def messages_for(wiki):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(wiki)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "971bd7fb-2ff8-4494-b386-de69a39c24ff", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a8fdf9f2-f49e-4d06-ac9e-dfcb8da33d60", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def display_summary(topic):\n", |
||||
" url = f\"https://en.wikipedia.org/wiki/{topic}\"\n", |
||||
" if isWiki(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))\n", |
||||
" else:\n", |
||||
" print('A Wikipedia page does not exist for this topic')\n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f4758ef0-9b7c-4d3e-9131-e3284dc76b6b", |
||||
"metadata": { |
||||
"scrolled": true |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"topic = input('Enter the name of Wikipedia page for which you would like a summary: ').strip()\n", |
||||
"display_summary(topic)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,663 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# A full business solution\n", |
||||
"\n", |
||||
"## Now we will take our project from Day 1 to the next level\n", |
||||
"\n", |
||||
"### BUSINESS CHALLENGE:\n", |
||||
"\n", |
||||
"Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n", |
||||
"\n", |
||||
"We will be provided a company name and their primary website.\n", |
||||
"\n", |
||||
"See the end of this notebook for examples of real-world business applications.\n", |
||||
"\n", |
||||
"And remember: I'm always available if you have problems or ideas! Please do reach out." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d5b08506-dc8b-4443-9201-5f1848161363", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Initialize and constants\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", |
||||
" print(\"API key looks good so far\")\n", |
||||
"else:\n", |
||||
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
||||
" \n", |
||||
"MODEL = 'gpt-4o-mini'\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "106dd65e-90af-4ca8-86b6-23a41840645b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e30d8128-933b-44cc-81c8-ab4c9d86589a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ed = Website(\"https://edwarddonner.com\")\n", |
||||
"ed.links" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1771af9c-717a-4fca-bbbe-8a95893312c3", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## First step: Have GPT-4o-mini figure out which links are relevant\n", |
||||
"\n", |
||||
"### Use a call to gpt-4o-mini to read the links on a webpage, and respond in structured JSON. \n", |
||||
"It should decide which links are relevant, and replace relative links such as \"/about\" with \"https://company.com/about\". \n", |
||||
"We will use \"one shot prompting\" in which we provide an example of how it should respond in the prompt.\n", |
||||
"\n", |
||||
"This is an excellent use case for an LLM, because it requires nuanced understanding. Imagine trying to code this without LLMs by parsing and analyzing the webpage - it would be very hard!\n", |
||||
"\n", |
||||
"Sidenote: there is a more advanced technique called \"Structured Outputs\" in which we require the model to respond according to a spec. We cover this technique in Week 8 during our autonomous Agentic AI project." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6957b079-0d96-45f7-a26a-3487510e9b35", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"oneshot_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company or freelancer offering their services, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||
"oneshot_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||
"oneshot_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"\n", |
||||
"oneshot_system_prompt += \"Make sure not to miss any relevant pages.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f5a8b688-b153-41a6-8b18-f6198f3df2c9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"fewshot_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company or freelancer offering their services, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n You should respond in JSON as in the following examples:\"\n", |
||||
"fewshot_system_prompt += \"\"\"\n", |
||||
" Example 1\n", |
||||
" ['https://great-comps.com/about-me', 'https://www.linkedin.com/in/great-comp/', 'mailto:hello@mygroovydomain.com', 'https://great-comps.com/news', '/case-studies', 'https://patents.google.com/patent/US20210049536A1/', 'https://great-comps.com/workshop-ai']\n", |
||||
"\n", |
||||
" Links:\n", |
||||
" {\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://great-comps.de/about-me\"},\n", |
||||
" {\"type\": \"news page\": \"url\": \"https://great-comps.de/news\"},\n", |
||||
" {\"type\": \"case studies page\": \"url\": \"https://great-comps.de/case-studies\"},\n", |
||||
" {\"type\": \"workshop page\": \"url\": \"https://great-comps.de/workshop-ai\"},\n", |
||||
" ]\n", |
||||
" }\n", |
||||
"\n", |
||||
" Example 2\n", |
||||
" ['mailto:info@robbie-doodle-domain.com','https://wahlen-robbie.at/ueber-mich', 'https://www.linkedin.com/in/robbie-doodle/', 'https://news.ycombinator.com', 'https://wahlen-robbie.at/neuigkeiten', 'https://twitter.com/robbie-d', '/whitepapers', 'https://patents.google.com/patent/US20210049536A1/', 'https://wahlen-robbie.at/services']\n", |
||||
"\n", |
||||
" Links:\n", |
||||
" {\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"über mich\", \"url\": \"https://wahlen-robbie.at/ueber-mich\"},\n", |
||||
" {\"type\": \"aktuelles\": \"url\": \"https://wahlen-robbie.at/neuigkeiten\"},\n", |
||||
" {\"type\": \"whitepaper\": \"url\": \"https://wahlen-robbie.at/whitepapers\"},\n", |
||||
" {\"type\": \"services\": \"url\": \"https://wahlen-robbie.at/services\"}\n", |
||||
" ]\n", |
||||
" }\n", |
||||
" \"\"\"\n", |
||||
"fewshot_system_prompt += \"Make sure not to miss any relevant pages.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b97e4068-97ed-4120-beae-c42105e4d59a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(f\"Oneshot system prompt:\\n{oneshot_system_prompt}\")\n", |
||||
"print(f\"\\n\\n\\nFewshot system prompt:\\n{fewshot_system_prompt}\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8e1f601b-2eaf-499d-b6b8-c99050c9d6b3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_user_prompt(website):\n", |
||||
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company or person offering their services, respond with the full https URL in JSON format. \\\n", |
||||
"Do not include Terms of Service, Privacy, email links or social media links.\\n\"\n", |
||||
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6bcbfa78-6395-4685-b92c-22d592050fd7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_links_user_prompt(ed))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a29aca19-ca13-471c-a4b4-5abbfa813f69", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links(url, system_prompt=oneshot_system_prompt):\n", |
||||
" \n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
" ],\n", |
||||
" response_format={\"type\": \"json_object\"}\n", |
||||
" )\n", |
||||
" \n", |
||||
" result = response.choices[0].message.content \n", |
||||
" print(f\"Response: {result}\")\n", |
||||
" return json.loads(result)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2dc4150a-0042-4f5d-a7bf-158a0f9147a6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_links(ed_url)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", |
||||
"hf = \"https://huggingface.co\"\n", |
||||
"\n", |
||||
"huggingface = Website(hf)\n", |
||||
"huggingface.links" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ed_url = \"https://edwarddonner.com\"\n", |
||||
"hf_url = \"https://huggingface.co\"\n", |
||||
"\n", |
||||
"print(f\"Links generated with oneshot prompt for {ed_url}:\\n\")\n", |
||||
"get_links(ed_url)\n", |
||||
"\n", |
||||
"print(f\"\\n\\nLinks generated with fewshot prompt for {ed_url}:\\n\")\n", |
||||
"get_links(ed_url, fewshot_system_prompt)\n", |
||||
"\n", |
||||
"print(50*\"*\")\n", |
||||
"print(f\"\\nLinks generated with oneshot prompt for {hf_url}:\\n\")\n", |
||||
"get_links(hf_url)\n", |
||||
"\n", |
||||
"print(f\"\\n\\nLinks generated with fewshot prompt for {hf_url}:\\n\")\n", |
||||
"get_links(hf_url, fewshot_system_prompt)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "0d74128e-dfb6-47ec-9549-288b621c838c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Second step: make the brochure!\n", |
||||
"\n", |
||||
"Assemble all the details into another prompt to GPT4-o" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "85a5b6e2-e7ef-44a9-bc7f-59ede71037b5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_all_details(url, type=fewshot_system_prompt):\n", |
||||
" result = \"Landing page:\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
"\n", |
||||
" links = get_links(url, type)\n", |
||||
" print(\"Found links:\", links)\n", |
||||
" for link in links[\"links\"]:\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link[\"url\"]).get_contents()\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5099bd14-076d-4745-baf3-dac08d8e5ab2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_all_details(ed_url))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9b863a55-f86c-4e3f-8a79-94e24c1a8cf2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"and creates a short brochure about the company for prospective customers, investors and recruits. \\\n", |
||||
"The brochure should be a bit unusual in terms of tone and style, it should astound the reader and pique their interest. Respond in markdown.\\\n", |
||||
"Include details of company culture, customers and careers/jobs if you have the information.\"\n", |
||||
"\n", |
||||
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n", |
||||
"\n", |
||||
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"# Include details of company culture, customers and careers/jobs if you have the information.\"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6ab83d92-d36b-4ce0-8bcc-5bb4c2f8ff23", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "05d07160-7910-4da2-92ac-36aa849fcc68", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# get_brochure_user_prompt(\"Edward Donner\", ed_url)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cd909e0b-1312-4ce2-a553-821e795d7572", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e44de579-4a1a-4e6a-a510-20ea3e4b8d46", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_brochure(company_name, url):\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6b0de762-f343-44d9-85d5-9bffba3c0ae8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"brochure_ed = create_brochure(\"Edward Donner\", ed_url)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e093444a-9407-42ae-924a-145730591a39", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"brochure_hf = create_brochure(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0d00b012-3901-492c-b985-a0340750c011", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display(Markdown(brochure_ed))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e33cb2e9-3b8c-4ef3-a6cb-70b3188b9120", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display(Markdown(brochure_hf))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "dea955ad-24a6-490b-8191-f066bff1b595", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def translate_brochure(brochure_content, language=\"German\"):\n", |
||||
" system_prompt = f\"You are a skilled translator. Translate the following brochure text into {language}.\\\n", |
||||
" Make sure to translate into a idiomatic {language}, matching the target language's natural structure, wording and expressions, so it can't be recognised as a translation.\\\n", |
||||
" Be sure to also meet an appropriate tone, eg a good marketing language in other languages will probably be a bit less boastful than in English.\\\n", |
||||
" Output the translated brochure in Markdown format.\"\n", |
||||
" \n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model = MODEL,\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_prompt}, {\"role\": \"user\", \"content\": brochure_content}]\n", |
||||
" )\n", |
||||
"\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9b6bdd4f-7518-4780-9da9-47f90aab974b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"translation = translate_brochure(brochure_ed, language=\"German\")\n", |
||||
"display(Markdown(translation))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f1dd96f2-0980-4a30-a152-1f38c0e319bb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"translation = translate_brochure(brochure_hf, language=\"German\")\n", |
||||
"display(Markdown(translation))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "61eaaab7-0b47-4b29-82d4-75d474ad8d18", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Finally - a minor improvement\n", |
||||
"\n", |
||||
"With a small adjustment, we can change this so that the results stream back from OpenAI,\n", |
||||
"with the familiar typewriter animation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "51db0e49-f261-4137-aabe-92dd601f7725", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_brochure(company_name, url):\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fdb3f8d8-a3eb-41c8-b1aa-9f60686a653b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Try changing the system prompt to the humorous version when you make the Brochure for Hugging Face:\n", |
||||
"\n", |
||||
"stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a27bf9e0-665f-4645-b66b-9725e2a959b5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#181;\">Business applications</h2>\n", |
||||
" <span style=\"color:#181;\">In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", |
||||
"\n", |
||||
"This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", |
||||
"\n", |
||||
"Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype. See what other students have done in the community-contributions folder -- so many valuable projects -- it's wild!</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "14b2454b-8ef8-4b5c-b928-053a15e0d553", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Before you move to Week 2 (which is tons of fun)</h2>\n", |
||||
" <span style=\"color:#900;\">Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "17b64f0f-7d33-4493-985a-033d06e8db08", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#f71;\">A reminder on 3 useful resources</h2>\n", |
||||
" <span style=\"color:#f71;\">1. The resources for the course are available <a href=\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\">here.</a><br/>\n", |
||||
" 2. I'm on LinkedIn <a href=\"https://www.linkedin.com/in/eddonner/\">here</a> and I love connecting with people taking the course!<br/>\n", |
||||
" 3. I'm trying out X/Twitter and I'm at <a href=\"https://x.com/edwarddonner\">@edwarddonner<a> and hoping people will teach me how it's done.. \n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6f48e42e-fa7a-495f-a5d4-26bfc24d60b6", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../thankyou.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#090;\">Finally! I have a special request for you</h2>\n", |
||||
" <span style=\"color:#090;\">\n", |
||||
" My editor tells me that it makes a MASSIVE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b8d3e1a1-ba54-4907-97c5-30f89a24775b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,501 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# A full business solution (Ollama Version)\n", |
||||
"\n", |
||||
"## Now we will take our project from Day 1 to the next level\n", |
||||
"\n", |
||||
"### BUSINESS CHALLENGE:\n", |
||||
"\n", |
||||
"Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n", |
||||
"\n", |
||||
"We will be provided a company name and their primary website.\n", |
||||
"\n", |
||||
"See the end of this notebook for examples of real-world business applications.\n", |
||||
"\n", |
||||
"And remember: I'm always available if you have problems or ideas! Please do reach out." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d5b08506-dc8b-4443-9201-5f1848161363", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"import ollama" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Initialize and constants\n", |
||||
" \n", |
||||
"# MODEL = \"llama3.2\"\n", |
||||
"MODEL = \"llama3.2:1b\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "106dd65e-90af-4ca8-86b6-23a41840645b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e30d8128-933b-44cc-81c8-ab4c9d86589a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ed = Website(\"https://edwarddonner.com\")\n", |
||||
"ed.links\n", |
||||
"# print(ed.get_contents())" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1771af9c-717a-4fca-bbbe-8a95893312c3", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## First step: Have llama3.2 figure out which links are relevant\n", |
||||
"\n", |
||||
"### Use a call to llama3.2 to read the links on a webpage, and respond in structured JSON. \n", |
||||
"It should decide which links are relevant, and replace relative links such as \"/about\" with \"https://company.com/about\". \n", |
||||
"We will use \"one shot prompting\" in which we provide an example of how it should respond in the prompt.\n", |
||||
"\n", |
||||
"This is an excellent use case for an LLM, because it requires nuanced understanding. Imagine trying to code this without LLMs by parsing and analyzing the webpage - it would be very hard!\n", |
||||
"\n", |
||||
"Sidenote: there is a more advanced technique called \"Structured Outputs\" in which we require the model to respond according to a spec. We cover this technique in Week 8 during our autonomous Agentic AI project." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6957b079-0d96-45f7-a26a-3487510e9b35", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||
"link_system_prompt += \"You should respond in JSON format as shown in the following example:\"\n", |
||||
"link_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b97e4068-97ed-4120-beae-c42105e4d59a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(link_system_prompt)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8e1f601b-2eaf-499d-b6b8-c99050c9d6b3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_user_prompt(website):\n", |
||||
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||
"Do not include Terms of Service, Privacy, email links.\\n\"\n", |
||||
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6bcbfa78-6395-4685-b92c-22d592050fd7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_links_user_prompt(ed))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a29aca19-ca13-471c-a4b4-5abbfa813f69", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" result = response['message']['content']\n", |
||||
" return json.loads(result)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", |
||||
"\n", |
||||
"huggingface = Website(\"https://huggingface.co\")\n", |
||||
"huggingface.links" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_links(\"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "0d74128e-dfb6-47ec-9549-288b621c838c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Second step: make the brochure!\n", |
||||
"\n", |
||||
"Assemble all the details into another prompt to llama3.2" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "85a5b6e2-e7ef-44a9-bc7f-59ede71037b5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_all_details(url):\n", |
||||
" result = \"Landing page:\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = get_links(url)\n", |
||||
" print(\"Found links:\", links)\n", |
||||
" for link in links[\"links\"]:\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link[\"url\"]).get_contents()\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5099bd14-076d-4745-baf3-dac08d8e5ab2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_all_details(\"https://huggingface.co\"))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9b863a55-f86c-4e3f-8a79-94e24c1a8cf2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"Include details of company culture, customers and careers/jobs if you have the information.\"\n", |
||||
"\n", |
||||
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n", |
||||
"\n", |
||||
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"# Include details of company culture, customers and careers/jobs if you have the information.\"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6ab83d92-d36b-4ce0-8bcc-5bb4c2f8ff23", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cd909e0b-1312-4ce2-a553-821e795d7572", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e44de579-4a1a-4e6a-a510-20ea3e4b8d46", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_brochure(company_name, url):\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" result = response['message']['content']\n", |
||||
" display(Markdown(result))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e093444a-9407-42ae-924a-145730591a39", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"create_brochure(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "61eaaab7-0b47-4b29-82d4-75d474ad8d18", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Finally - a minor improvement\n", |
||||
"\n", |
||||
"With a small adjustment, we can change this so that the results stream back from Ollama,\n", |
||||
"with the familiar typewriter animation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "51db0e49-f261-4137-aabe-92dd601f7725", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_brochure(company_name, url):\n", |
||||
" stream = ollama.chat(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" content = chunk.get(\"message\", {}).get(\"content\", \"\")\n", |
||||
" if content:\n", |
||||
" response += content\n", |
||||
" response = response.replace(\"```\", \"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fdb3f8d8-a3eb-41c8-b1aa-9f60686a653b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Try changing the system prompt to the humorous version when you make the Brochure for Hugging Face:\n", |
||||
"\n", |
||||
"stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a27bf9e0-665f-4645-b66b-9725e2a959b5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#181;\">Business applications</h2>\n", |
||||
" <span style=\"color:#181;\">In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", |
||||
"\n", |
||||
"This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", |
||||
"\n", |
||||
"Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "14b2454b-8ef8-4b5c-b928-053a15e0d553", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Before you move to Week 2 (which is tons of fun)</h2>\n", |
||||
" <span style=\"color:#900;\">Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "17b64f0f-7d33-4493-985a-033d06e8db08", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#f71;\">A reminder on 2 useful resources</h2>\n", |
||||
" <span style=\"color:#f71;\">1. The resources for the course are available <a href=\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\">here.</a><br/>\n", |
||||
" 2. I'm on LinkedIn <a href=\"https://www.linkedin.com/in/eddonner/\">here</a> and I love connecting with people taking the course!\n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6f48e42e-fa7a-495f-a5d4-26bfc24d60b6", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../thankyou.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#090;\">Finally! I have a special request for you</h2>\n", |
||||
" <span style=\"color:#090;\">\n", |
||||
" My editor tells me that it makes a MASSIVE difference when students rate this course on Udemy - it's one of the main ways that Udemy decides whether to show it to others. If you're able to take a minute to rate this, I'd be so very grateful! And regardless - always please reach out to me at ed@edwarddonner.com if I can help at any point.\n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b8d3e1a1-ba54-4907-97c5-30f89a24775b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.10.7" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,201 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# End of week 1 exercise\n", |
||||
"\n", |
||||
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", |
||||
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# constants\n", |
||||
"\n", |
||||
"MODEL_GPT = 'gpt-4o-mini'\n", |
||||
"MODEL_LLAMA = 'llama3.2'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# set up environment\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv(\"OPENAI_API_KEY\")\n", |
||||
"\n", |
||||
"# set up clients\n", |
||||
"openai = OpenAI()\n", |
||||
"ollama = OpenAI(base_url=\"http://localhost:11434/v1\" , api_key=\"ollama\")\n", |
||||
"\n", |
||||
"# set up system prompt\n", |
||||
"system_prompt = \"You are a coding tutor. If the user asks you a question, answer it to the point. If you are asked to create a code snippet, generate the code in Python and then explain it shortly.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 53, |
||||
"id": "58f098cb-4b4e-4394-b0b5-29db88e9101c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def send_request(user_prompt, model=MODEL_LLAMA, stream=False):\n", |
||||
" message = [{\"role\": \"system\", \"content\": system_prompt}, {\"role\": \"user\", \"content\": user_prompt}]\n", |
||||
" if model.startswith(\"gpt\"):\n", |
||||
" model_client = openai\n", |
||||
" else:\n", |
||||
" model_client = ollama\n", |
||||
"\n", |
||||
" \n", |
||||
" response = model_client.chat.completions.create(\n", |
||||
" model=model,\n", |
||||
" messages=message,\n", |
||||
" stream=stream\n", |
||||
" )\n", |
||||
"\n", |
||||
" if stream:\n", |
||||
" streaming = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in response:\n", |
||||
" streaming += chunk.choices[0].delta.content or ''\n", |
||||
" streaming = streaming.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(streaming), display_id=display_handle.display_id)\n", |
||||
"\n", |
||||
" else:\n", |
||||
" return display(Markdown(response.choices[0].message.content))\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 49, |
||||
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdin", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
" How can I display python code properly while streaming the answer from openai? Create a code snippet for this. The streaming should happen in the code canvas.\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# here is the question; type over this to ask something new\n", |
||||
"question = input()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 48, |
||||
"id": "2bc093fa-b2ff-47e9-8ea8-e41499385116", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# question = \"\"\"How can I display python code properly while streaming the answer from openai? Create a code snippet for this. The streaming should happen in the code canvas.\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get gpt-4o-mini to answer, with streaming\n", |
||||
"send_request(model=MODEL_GPT, user_prompt=question, stream=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 54, |
||||
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"To display Python code properly with OpenAI's chat interface, you'll need to use the `code` formatting in the response format provided by the API endpoint. \n", |
||||
"\n", |
||||
"Here's an example of how you can modify the API request URL to include the formatted code:\n", |
||||
"\n", |
||||
"```python\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"\n", |
||||
"query = {\n", |
||||
" \"text\": \"{\\n} # Python code here\\n}\"\n", |
||||
"\n", |
||||
"headers = {\n", |
||||
" 'Content-Type': 'application/json'\n", |
||||
"}\n", |
||||
"\n", |
||||
"response = requests.post('https://api.openai.com/v1/answers', data=json.dumps(query), headers=headers)\n", |
||||
"\n", |
||||
"answer = response.json()\n", |
||||
"```\n", |
||||
"\n", |
||||
"However, the most convenient way to display the code is by using the `code` directive directly in your chat prompt. OpenAI will automatically format and highlight your code." |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Get Llama 3.2 to answer\n", |
||||
"send_request(user_prompt=question)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,308 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# End of week 1 exercise\n", |
||||
"\n", |
||||
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", |
||||
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 19, |
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"import ollama" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# constants\n", |
||||
"\n", |
||||
"MODEL_GPT = 'gpt-4o-mini'\n", |
||||
"MODEL_LLAMA = 'llama3.2'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 11, |
||||
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"API key looks good so far\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# set up environment\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", |
||||
" print(\"API key looks good so far\")\n", |
||||
"else:\n", |
||||
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
||||
" \n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 12, |
||||
"id": "624780c5-debb-44c7-a505-acf573ad5034", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#prompts\n", |
||||
"\n", |
||||
"system_prompt = \"You are a technical tuotor that answers questions related to the field of computer science. \\\n", |
||||
"Your answers should reflect recent advancements in the field of software development, Artificial Intelligence and Large Language Models. Respond in markdown.\"\n", |
||||
"\n", |
||||
"system_prompt += \"Include resources that might help learners get more information on the topic.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "b6c0280b-ab8f-48a7-9a0c-7f47899bb559", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"user_prompt = \"How would you explain LLMs to someone who doesn't have a backround in Computer Science or AI?\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 14, |
||||
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get gpt-4o-mini to answer, with streaming\n", |
||||
"\n", |
||||
"def tech_tutor(question):\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL_GPT,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 15, |
||||
"id": "ca561874-dee3-456c-87f3-02f7e9a4ed4f", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"### Explaining Large Language Models (LLMs) to Non-Technical Audiences\n", |
||||
"\n", |
||||
"**What are LLMs?**\n", |
||||
"\n", |
||||
"Large Language Models (LLMs) are a type of artificial intelligence designed to understand and generate human language. Imagine having a super-smart assistant who can read, write, answer questions, or even create stories based on what you ask them. \n", |
||||
"\n", |
||||
"**How Do They Work?**\n", |
||||
"\n", |
||||
"1. **Training on Text**: LLMs are trained on vast amounts of written text from books, articles, websites, and more. During this training, they learn about grammar, facts, ideas, and the way people communicate.\n", |
||||
"\n", |
||||
"2. **Patterns and Context**: By analyzing this text, LLMs recognize patterns in how words and sentences relate to each other. They don't understand language like we do, but they can predict what words are likely to come next in a sentence based on the context.\n", |
||||
"\n", |
||||
"3. **Generating Responses**: When you ask an LLM a question or give it a prompt, it generates a response by choosing words that fit the patterns it's learned. It’s a bit like having a very advanced autocomplete feature on your phone, but much more sophisticated.\n", |
||||
"\n", |
||||
"**Why Are They Important?**\n", |
||||
"\n", |
||||
"LLMs are transforming various fields, such as:\n", |
||||
"\n", |
||||
"- **Customer Support**: They can understand and respond to customer queries automatically.\n", |
||||
"- **Content Creation**: They assist in generating articles, poetry, or even code.\n", |
||||
"- **Language Translation**: They help translate languages with high accuracy.\n", |
||||
"- **Personal Assistants**: They power smart assistants like Siri or Google Assistant, allowing for more natural conversations.\n", |
||||
"\n", |
||||
"**Real-World Examples of LLMs**:\n", |
||||
"\n", |
||||
"- **ChatGPT**: Developed by OpenAI, this model can engage in conversations, answer questions, and provide information on diverse topics.\n", |
||||
"- **Google BERT**: Enhances search engine results by better understanding user queries.\n", |
||||
" \n", |
||||
"### Resources to Learn More\n", |
||||
"\n", |
||||
"If you're curious and want to delve deeper into understanding LLMs and their underlying technology, here are some great resources:\n", |
||||
"\n", |
||||
"1. **Online Articles**:\n", |
||||
" - [What is a Large Language Model?](https://towardsdatascience.com/what-is-a-large-language-model-785a122ca835)\n", |
||||
" - [A Beginner's Guide to Large Language Models](https://www.analyticsvidhya.com/blog/2021/07/a-beginners-guide-to-large-language-models-llms/)\n", |
||||
"\n", |
||||
"2. **Video Tutorials**:\n", |
||||
" - [What are Large Language Models? | AI Explained](https://www.youtube.com/watch?v=ttlLuanHCHo) on YouTube\n", |
||||
" - [Deep Learning for NLP: Large Pre-trained Language Models](https://www.coursera.org/lecture/natural-language-processing-with-classifiers-and-deep-learning/the-power-of-large-pre-trained-language-models-u4XP5) on Coursera\n", |
||||
"\n", |
||||
"3. **Books**:\n", |
||||
" - *\"Artificial Intelligence: A Guide to Intelligent Systems\"* by Michael Negnevitsky provides a foundation for understanding AI.\n", |
||||
" - *\"Speech and Language Processing\"* by Daniel Jurafsky and James H. Martin offers a deeper dive into language processing technologies.\n", |
||||
"\n", |
||||
"By exploring these resources, you'll gain a better understanding of LLMs and their capabilities, as well as their profound implications for technology and society." |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"tech_tutor(user_prompt)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 16, |
||||
"id": "2a7c6670-ead8-41dc-9a0b-2b4caa40e846", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 20, |
||||
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"**What are Large Language Models (LLMs)?**\n", |
||||
"=====================================\n", |
||||
"\n", |
||||
"Imagine having a super-smart, never-ending bookshelf filled with knowledge about language and human behavior. That's roughly what a Large Language Model (LLM) is: a computer program designed to understand and generate human-like text.\n", |
||||
"\n", |
||||
"### How do LLMs work?\n", |
||||
"\n", |
||||
"1. **Training data**: LLMs are trained on massive amounts of text data, often from the internet, books, or other sources.\n", |
||||
"2. **Algorithms**: The model uses complex algorithms to analyze this training data, learning patterns and relationships between words, sentences, and concepts.\n", |
||||
"3. **Self-supervised learning**: During training, the model generates its own text based on the input it's given, and then evaluates how well its output matches human-written text.\n", |
||||
"\n", |
||||
"### What can LLMs do?\n", |
||||
"\n", |
||||
"* **Text generation**: LLMs can produce coherent, grammatically correct text on a wide range of topics.\n", |
||||
"* **Language translation**: They can translate text from one language to another with surprising accuracy.\n", |
||||
"* **Chatbots and conversational AI**: LLMs are used in chatbots to respond to user queries, often providing helpful and personalized answers.\n", |
||||
"\n", |
||||
"### Examples of LLMs\n", |
||||
"\n", |
||||
"* **BERT (Bidirectional Encoder Representations from Transformers)**: A pioneering model that's the foundation for many modern LLMs.\n", |
||||
"* **Transformers**: An architecture that's become popular for its ability to handle long-range dependencies in text.\n", |
||||
"* **Language models like myself**: I'm a type of LLM, trained on a massive dataset and using transformer-based architectures.\n", |
||||
"\n", |
||||
"### Limitations and future directions\n", |
||||
"\n", |
||||
"While LLMs have made tremendous progress, they still have limitations:\n", |
||||
"\n", |
||||
"* **Bias and fairness**: Models can perpetuate existing biases if trained on biased data.\n", |
||||
"* **Explainability**: It's challenging to understand why a particular model made a certain decision or generated a specific response.\n", |
||||
"* **Adversarial attacks**: Models can be vulnerable to malicious input that manipulates their output.\n", |
||||
"\n", |
||||
"Researchers are actively exploring ways to improve LLMs, such as:\n", |
||||
"\n", |
||||
"* **Multi-task learning**: Training models on multiple tasks simultaneously to enhance performance.\n", |
||||
"* **Explainability techniques**: Developing methods to provide insights into model behavior and decision-making processes.\n", |
||||
"\n", |
||||
"**Getting started with LLMs**\n", |
||||
"\n", |
||||
"If you're interested in learning more about LLMs, I recommend checking out these resources:\n", |
||||
"\n", |
||||
"* **BERT's official documentation**: [BERT Documentation](https://bert.dev/)\n", |
||||
"* **The Transformers library**: [Hugging Face Transformers](https://huggingface.co/transformers/)\n", |
||||
"* **Large Language Models 101**: A beginner-friendly introduction to LLMs on Towards Data Science. [TowardsDataScience.com](https://towardsdatascience.com/large-language-models-101-8d2a6f3cdd23)\n", |
||||
"\n", |
||||
"I hope this explanation helped you understand what Large Language Models are and how they work!" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Get Llama 3.2 to answer\n", |
||||
"messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
" ]\n", |
||||
"\n", |
||||
"payload = {\n", |
||||
" \"model\": MODEL_LLAMA,\n", |
||||
" \"messages\": messages,\n", |
||||
" \"stream\": True\n", |
||||
" }\n", |
||||
"\n", |
||||
"response = ollama.chat(model=MODEL_LLAMA, messages=messages)\n", |
||||
"reply = response['message']['content']\n", |
||||
"display(Markdown(reply))" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,125 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "135ee16c-2741-4ebf-aca9-1d263083b3ce", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# End of week 1 exercise\n", |
||||
"\n", |
||||
"Build a tutor tool by using Ollama." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"import ollama\n", |
||||
"from IPython.display import Markdown, display, clear_output" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# constants\n", |
||||
"MODEL_LLAMA = 'llama3.2'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# here is the question; type over this to ask something new\n", |
||||
"\n", |
||||
"question = \"\"\"\n", |
||||
"Please explain what this code does and why:\n", |
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||
"\"\"\"\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get Llama 3.2 to answer, with streaming\n", |
||||
"\n", |
||||
"\n", |
||||
"messages=[{\"role\":\"user\",\"content\":question}]\n", |
||||
"\n", |
||||
"for chunk in ollama.chat(model=MODEL_LLAMA, messages=messages, stream=True):\n", |
||||
" print(chunk['message']['content'], end='', flush=True)\n", |
||||
"\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d1f71014-e780-4d3f-a227-1a7c18158a4c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Alternative answer with streaming in Markdown!\n", |
||||
"\n", |
||||
"def stream_response():\n", |
||||
" messages = [{\"role\": \"user\", \"content\": question}]\n", |
||||
" \n", |
||||
" display_markdown = display(Markdown(\"\"), display_id=True)\n", |
||||
"\n", |
||||
" response_text = \"\"\n", |
||||
" for chunk in ollama.chat(model=MODEL_LLAMA, messages=messages, stream=True):\n", |
||||
" \n", |
||||
" response_text += chunk['message']['content']\n", |
||||
" clear_output(wait=True) # Clears previous output\n", |
||||
" display_markdown.update(Markdown(response_text)) # Updates Markdown dynamically\n", |
||||
"\n", |
||||
"# Run the function\n", |
||||
"stream_response()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c38fdd2a-4b09-402c-ba46-999b22b0cb15", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.13.2" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,295 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Week 2 Practice Gradio by Creating Brochure\n", |
||||
"\n", |
||||
"- **Author**: [stoneskin](https://www.github.com/stoneskin)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1c104f45", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Implementation\n", |
||||
"\n", |
||||
"- Use OpenRouter.ai for all different types of LLM models, include many free models from Google,Meta and Deepseek\n", |
||||
"\n", |
||||
"Full code for the Week2 Gradio practice is below:" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 19, |
||||
"id": "b8d3e1a1-ba54-4907-97c5-30f89a24775b", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"API key looks good so far\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"import os\n", |
||||
"import json\n", |
||||
"import requests\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr \n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"api_key = os.getenv('Open_Router_Key')\n", |
||||
"if api_key and api_key.startswith('sk-or-v1') and len(api_key)>10:\n", |
||||
" print(\"API key looks good so far\")\n", |
||||
"else:\n", |
||||
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
||||
" \n", |
||||
" \n", |
||||
"openai = OpenAI(\n", |
||||
" api_key=api_key,\n", |
||||
" base_url=\"https://openrouter.ai/api/v1\"\n", |
||||
")\n", |
||||
"\n", |
||||
"MODEL_Gemini2FlashThink = 'google/gemini-2.0-flash-thinking-exp:free'\n", |
||||
"MODEL_Gemini2Pro ='google/gemini-2.0-pro-exp-02-05:free'\n", |
||||
"MODEL_Gemini2FlashLite = 'google/gemini-2.0-flash-lite-preview-02-05:free'\n", |
||||
"MODEL_Meta_Llama33 ='meta-llama/llama-3.3-70b-instruct:free'\n", |
||||
"MODEL_Deepseek_V3='deepseek/deepseek-chat:free'\n", |
||||
"MODEL_Deepseek_R1='deepseek/deepseek-r1-distill-llama-70b:free'\n", |
||||
"MODEL_Qwen_vlplus='qwen/qwen-vl-plus:free'\n", |
||||
"MODEL_OpenAi_o3mini = 'openai/o3-mini'\n", |
||||
"MODEL_OpenAi_4o = 'openai/gpt-4o-2024-11-20'\n", |
||||
"MODEL_Claude_Haiku = 'anthropic/claude-3.5-haiku-20241022'\n", |
||||
"\n", |
||||
"\n", |
||||
"\n", |
||||
"\n", |
||||
" \n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "24866034", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"MODEL=MODEL_Gemini2Pro # choice the model you want to use\n", |
||||
"\n", |
||||
"####################\n", |
||||
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"Include details of company culture, customers and careers/jobs if you have the information.\"\n", |
||||
"\n", |
||||
"##############################\n", |
||||
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||
"link_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||
"link_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"##############################\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"##############################\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", |
||||
" \n", |
||||
"##############################\n", |
||||
"def get_links_user_prompt(website):\n", |
||||
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||
"Do not include Terms of Service, Privacy, email links.\\n\"\n", |
||||
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
" return user_prompt\n", |
||||
"\n", |
||||
"##############################\n", |
||||
"def get_links(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
" ],\n", |
||||
" response_format={\"type\": \"json_object\"}\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" print(\"get_links:\", result)\n", |
||||
" return json.loads(result)\n", |
||||
"\n", |
||||
"##############################\n", |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", |
||||
" return user_prompt\n", |
||||
"\n", |
||||
"##############################\n", |
||||
"def get_all_details(url):\n", |
||||
" print(\"get_all_details:\", url) \n", |
||||
" result = \"Landing page:\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = get_links(url)\n", |
||||
" print(\"Found links:\", links)\n", |
||||
" for link in links[\"links\"]:\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link[\"url\"]).get_contents()\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "82abe132", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"########### modified stream brochure function for gradio ###################\n", |
||||
"def stream_brochure(company_name, url):\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
"\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.choices[0].delta.content or \"\"\n", |
||||
" yield result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "902f203b", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7872\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7872/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 18, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"get_all_details: https://mlccc.herokuapp.com/\n", |
||||
"get_links: {\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://mlccc.herokuapp.com/about.html\"},\n", |
||||
" {\"type\": \"programs\", \"url\": \"https://mlccc.herokuapp.com/program.html\"},\n", |
||||
" {\"type\": \"camps\", \"url\": \"https://mlccc.herokuapp.com/camps.html\"},\n", |
||||
" {\"type\": \"community\", \"url\": \"https://mlccc.herokuapp.com/community.html\"},\n", |
||||
" {\"type\": \"support\", \"url\": \"https://mlccc.herokuapp.com/support.html\"},\n", |
||||
" {\"type\": \"press\", \"url\": \"https://mlccc.herokuapp.com/press.html\"},\n", |
||||
" {\"type\": \"newsletter\", \"url\": \"https://mlccc.herokuapp.com/newsletter.html\"},\n", |
||||
" {\"type\": \"testimonials\", \"url\": \"https://mlccc.herokuapp.com/testimonial.html\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"Found links: {'links': [{'type': 'about page', 'url': 'https://mlccc.herokuapp.com/about.html'}, {'type': 'programs', 'url': 'https://mlccc.herokuapp.com/program.html'}, {'type': 'camps', 'url': 'https://mlccc.herokuapp.com/camps.html'}, {'type': 'community', 'url': 'https://mlccc.herokuapp.com/community.html'}, {'type': 'support', 'url': 'https://mlccc.herokuapp.com/support.html'}, {'type': 'press', 'url': 'https://mlccc.herokuapp.com/press.html'}, {'type': 'newsletter', 'url': 'https://mlccc.herokuapp.com/newsletter.html'}, {'type': 'testimonials', 'url': 'https://mlccc.herokuapp.com/testimonial.html'}]}\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"view = gr.Interface(\n", |
||||
" fn=stream_brochure,\n", |
||||
" inputs=[gr.Textbox(label=\"company Name\"), gr.Textbox(label=\"URL\")],\n", |
||||
" outputs=[gr.Markdown(label=\"Response:\")],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,286 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1194d35b-0b9f-4eb4-a539-5ddf55523367", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"#import anthropic\n", |
||||
"import ollama\n", |
||||
"import google.generativeai\n", |
||||
"from IPython.display import Markdown, display, update_display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f8a1f0b3-6d93-4de1-bc79-2132726598e3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#constants\n", |
||||
"MODEL=\"llama3.2\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "88fe4149-1ef5-4007-a117-6d3ccab3e3c3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"# Print the key prefixes to help with any debugging\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||
"\n", |
||||
"if google_api_key:\n", |
||||
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"Google API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d186cf6e-fadd-450c-821c-df32e2574f5d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# This is the set up code for Gemini\n", |
||||
"\n", |
||||
"google.generativeai.configure()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "19a55117-f2ac-4a58-af6b-8b75259e80df", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are an assistant that is great at telling jokes\"\n", |
||||
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "908f69b1-54f8-42da-827b-f667631bc666", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"prompts = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4ec81488-883a-446f-91cf-2b3d92bbd3ba", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# The API for Gemini\n", |
||||
"gemini = google.generativeai.GenerativeModel(\n", |
||||
" model_name='gemini-2.0-flash-exp',\n", |
||||
" system_instruction=system_message\n", |
||||
")\n", |
||||
"response = gemini.generate_content(user_prompt)\n", |
||||
"print(response.text)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "baf411fa-48bd-46a3-8bc8-1b22d0888a1a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# API for ollama\n", |
||||
"response = ollama.chat(model=MODEL,messages=prompts)\n", |
||||
"print(response['message']['content'])" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "74ba5fc4-e4c6-44ee-b66f-e76d847933d2", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Ardiversarial conversation between models" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fd348154-18fa-4da8-815a-77f5f00107c3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's make a conversation between Ollama and Gemini\n", |
||||
"# Adjusted models accordingly\n", |
||||
"\n", |
||||
"ollama_model = \"llama3.2\"\n", |
||||
"gemini_model = \"gemini-2.0-flash-exp\"\n", |
||||
"\n", |
||||
"#ollama_system = \"You are a chatbot who is very argumentative; \\\n", |
||||
"#you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", |
||||
"\n", |
||||
"ollama_system=\"You are a chatbot talking with the other person try to convince them to buy your proct of an ai app, \\\n", |
||||
"apply marketing strategies to make this client buy your product, use short clear explanations\"\n", |
||||
"\n", |
||||
"#gemini_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", |
||||
"#everything the other person says, or find common ground. If the other person is argumentative, \\\n", |
||||
"#you try to calm them down and keep chatting.\"\n", |
||||
"\n", |
||||
"gemini_system = \"You are the chatbot triying to be convinced by another person to buy their product, \\\n", |
||||
"ask important short questions and see if it is worth to give it a go, dont be too naive or easy go client\"\n", |
||||
"\n", |
||||
"ollama_messages = [\"Hi there\"]\n", |
||||
"gemini_messages = [\"Hi\"]\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "becf327a-5485-4e78-8002-03272a99a3b9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_ollama():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", |
||||
" for ollama_msg, gemini_msg in zip(ollama_messages, gemini_messages):\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": ollama_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gemini_msg})\n", |
||||
" \n", |
||||
" response = ollama.chat(model=ollama_model, messages=messages)\n", |
||||
" \n", |
||||
" return response['message']['content']\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d0c6dbe7-0baf-4c43-a03b-9134654685f4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"call_ollama()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f68a134a-279a-4629-aec6-171587378991", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_gemini():\n", |
||||
" gemini = google.generativeai.GenerativeModel(\n", |
||||
" model_name=gemini_model,\n", |
||||
" system_instruction=gemini_system\n", |
||||
" )\n", |
||||
"\n", |
||||
" # Build a list of dictionaries representing the conversation\n", |
||||
" conversation = []\n", |
||||
" for ollama_msg, gemini_msg in zip(ollama_messages, gemini_messages):\n", |
||||
" conversation.append({\"role\": \"user\", \"content\": ollama_msg})\n", |
||||
" conversation.append({\"role\": \"assistant\", \"content\": gemini_msg})\n", |
||||
" conversation.append({\"role\": \"user\", \"content\": ollama_messages[-1]})\n", |
||||
"\n", |
||||
" # Format the conversation into a string for the prompt\n", |
||||
" prompt = \"\"\n", |
||||
" for msg in conversation:\n", |
||||
" prompt += f\"{msg['role'].capitalize()}: {msg['content']}\\n\"\n", |
||||
"\n", |
||||
" message = gemini.generate_content(prompt)\n", |
||||
" \n", |
||||
" return message.text\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7511003a-f2b6-45f5-8cb0-1c9190d33ce9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"call_gemini()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d0e81f1f-9754-4790-8b73-5f52fef4ea64", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"call_ollama()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6fbe59f6-a3ef-4062-ab4b-b999f6d1abe9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ollama_messages = [\"Hi there\"]\n", |
||||
"gemini_messages = [\"Hi\"]\n", |
||||
"\n", |
||||
"print(f\"Ollama:\\n{ollama_messages[0]}\\n\")\n", |
||||
"print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", |
||||
"\n", |
||||
"for i in range(5):\n", |
||||
" # Call Ollama to generate the next message\n", |
||||
" ollama_next = call_ollama() \n", |
||||
" print(f\"Ollama:\\n{ollama_next}\\n\")\n", |
||||
" ollama_messages.append(ollama_next)\n", |
||||
" \n", |
||||
" # Call Gemini to generate the next message\n", |
||||
" gemini_next = call_gemini() \n", |
||||
" print(f\"Gemini:\\n{gemini_next}\\n\")\n", |
||||
" gemini_messages.append(gemini_next)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9525600b-082e-417f-9088-c6483a613bf3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.13.2" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,614 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "8b0e11f2-9ea4-48c2-b8d2-d0a4ba967827", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Gradio Day!\n", |
||||
"\n", |
||||
"Today we will build User Interfaces using the outrageously simple Gradio framework.\n", |
||||
"\n", |
||||
"Prepare for joy!\n", |
||||
"\n", |
||||
"Please note: your Gradio screens may appear in 'dark mode' or 'light mode' depending on your computer settings." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"#import google.generativeai\n", |
||||
"#import anthropic\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"id": "d1715421-cead-400b-99af-986388a97aff", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import gradio as gr # oh yeah!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 17, |
||||
"id": "22586021-1795-4929-8079-63f5bb4edd4c", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"API key looks good so far\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n", |
||||
"\n", |
||||
"# openai = OpenAI()\n", |
||||
"\n", |
||||
"# claude = anthropic.Anthropic()\n", |
||||
"\n", |
||||
"# google.generativeai.configure()\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"api_key = os.getenv('Open_Router_Key')\n", |
||||
"if api_key and api_key.startswith('sk-or-v1') and len(api_key)>10:\n", |
||||
" print(\"API key looks good so far\")\n", |
||||
"else:\n", |
||||
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
||||
" \n", |
||||
" \n", |
||||
"openai = OpenAI(\n", |
||||
" api_key=api_key,\n", |
||||
" base_url=\"https://openrouter.ai/api/v1\"\n", |
||||
")\n", |
||||
"\n", |
||||
"MODEL_Gemini2FlashLite = 'google/gemini-2.0-flash-lite-preview-02-05:free'\n", |
||||
"MODEL_Gemini2FlashThink = 'google/gemini-2.0-flash-thinking-exp:free'\n", |
||||
"MODEL_Gemini2Pro ='google/gemini-2.0-pro-exp-02-05:free'\n", |
||||
"MODEL_Meta_Llama33 ='meta-llama/llama-3.3-70b-instruct:free'\n", |
||||
"MODEL_Deepseek_V3='deepseek/deepseek-chat:free'\n", |
||||
"MODEL_Deepseek_R1='deepseek/deepseek-r1-distill-llama-70b:free'\n", |
||||
"MODEL_Qwen_vlplus='qwen/qwen-vl-plus:free'\n", |
||||
"MODEL_OpenAi_o3mini = 'openai/o3-mini'\n", |
||||
"MODEL_OpenAi_4o = 'openai/gpt-4o-2024-11-20'\n", |
||||
"MODEL_Claude_Haiku = 'anthropic/claude-3.5-haiku-20241022'\n", |
||||
"\n", |
||||
"\n", |
||||
"Default_Model = MODEL_Deepseek_V3\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "b16e6021-6dc4-4397-985a-6679d6c8ffd5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A generic system message - no more snarky adversarial AIs!\n", |
||||
"\n", |
||||
"system_message = \"You are a helpful assistant\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "02ef9b69-ef31-427d-86d0-b8c799e1c1b1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's wrap a call to GPT-4o-mini in a simple function\n", |
||||
"\n", |
||||
"def message_gpt(prompt):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message},\n", |
||||
" {\"role\": \"user\", \"content\": prompt}\n", |
||||
" ]\n", |
||||
" completion = openai.chat.completions.create(\n", |
||||
" model=Default_Model,\n", |
||||
" messages=messages,\n", |
||||
" )\n", |
||||
" return completion.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"id": "aef7d314-2b13-436b-b02d-8de3b72b193f", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'Today is October 26, 2023.\\n'" |
||||
] |
||||
}, |
||||
"execution_count": 7, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"# This can reveal the \"training cut off\", or the most recent date in the training data\n", |
||||
"\n", |
||||
"message_gpt(\"What is today's date?\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "f94013d1-4f27-4329-97e8-8c58db93636a", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## User Interface time!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"id": "bc664b7a-c01d-4fea-a1de-ae22cdd5141a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# here's a simple function\n", |
||||
"\n", |
||||
"def shout(text):\n", |
||||
" print(f\"Shout has been called with input {text}\")\n", |
||||
" return text.upper()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"id": "083ea451-d3a0-4d13-b599-93ed49b975e4", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Shout has been called with input hello\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'HELLO'" |
||||
] |
||||
}, |
||||
"execution_count": 9, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"shout(\"hello\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "08f1f15a-122e-4502-b112-6ee2817dda32", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# The simplicty of gradio. This might appear in \"light mode\" - I'll show you how to make this in dark mode later.\n", |
||||
"\n", |
||||
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\").launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c9a359a4-685c-4c99-891c-bb4d1cb7f426", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Adding share=True means that it can be accessed publically\n", |
||||
"# A more permanent hosting is available using a platform called Spaces from HuggingFace, which we will touch on next week\n", |
||||
"# NOTE: Some Anti-virus software and Corporate Firewalls might not like you using share=True. If you're at work on on a work network, I suggest skip this test.\n", |
||||
"\n", |
||||
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\").launch(share=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cd87533a-ff3a-4188-8998-5bedd5ba2da3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Adding inbrowser=True opens up a new browser window automatically\n", |
||||
"\n", |
||||
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "b42ec007-0314-48bf-84a4-a65943649215", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Forcing dark mode\n", |
||||
"\n", |
||||
"Gradio appears in light mode or dark mode depending on the settings of the browser and computer. There is a way to force gradio to appear in dark mode, but Gradio recommends against this as it should be a user preference (particularly for accessibility reasons). But if you wish to force dark mode for your screens, below is how to do it." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e8129afa-532b-4b15-b93c-aa9cca23a546", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n", |
||||
"\n", |
||||
"force_dark_mode = \"\"\"\n", |
||||
"function refresh() {\n", |
||||
" const url = new URL(window.location);\n", |
||||
" if (url.searchParams.get('__theme') !== 'dark') {\n", |
||||
" url.searchParams.set('__theme', 'dark');\n", |
||||
" window.location.href = url.href;\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\"\"\"\n", |
||||
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\", js=force_dark_mode).launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3cc67b26-dd5f-406d-88f6-2306ee2950c0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Inputs and Outputs\n", |
||||
"\n", |
||||
"view = gr.Interface(\n", |
||||
" fn=shout,\n", |
||||
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n", |
||||
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f235288e-63a2-4341-935b-1441f9be969b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And now - changing the function from \"shout\" to \"message_gpt\"\n", |
||||
"\n", |
||||
"view = gr.Interface(\n", |
||||
" fn=message_gpt,\n", |
||||
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n", |
||||
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "af9a3262-e626-4e4b-80b0-aca152405e63", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's use Markdown\n", |
||||
"# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n", |
||||
"# I'm taking advantage of system_message being a global variable, used back in the message_gpt function (go take a look)\n", |
||||
"# Not a great software engineering practice, but quite sommon during Jupyter Lab R&D!\n", |
||||
"\n", |
||||
"system_message = \"You are a helpful assistant that responds in markdown\"\n", |
||||
"\n", |
||||
"view = gr.Interface(\n", |
||||
" fn=message_gpt,\n", |
||||
" inputs=[gr.Textbox(label=\"Your message:\")],\n", |
||||
" outputs=[gr.Markdown(label=\"Response:\")],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 15, |
||||
"id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's create a call that streams back results\n", |
||||
"# If you'd like a refresher on Generators (the \"yield\" keyword),\n", |
||||
"# Please take a look at the Intermediate Python notebook in week1 folder.\n", |
||||
"\n", |
||||
"def stream_gpt(prompt):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message},\n", |
||||
" {\"role\": \"user\", \"content\": prompt}\n", |
||||
" ]\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=Default_Model,\n", |
||||
" messages=messages,\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.choices[0].delta.content or \"\"\n", |
||||
" yield result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0bb1f789-ff11-4cba-ac67-11b815e29d09", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"view = gr.Interface(\n", |
||||
" fn=stream_gpt,\n", |
||||
" inputs=[gr.Textbox(label=\"Your message:\")],\n", |
||||
" outputs=[gr.Markdown(label=\"Response:\")],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bbc8e930-ba2a-4194-8f7c-044659150626", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# def stream_claude(prompt):\n", |
||||
"# result = claude.messages.stream(\n", |
||||
"# model=\"claude-3-haiku-20240307\",\n", |
||||
"# max_tokens=1000,\n", |
||||
"# temperature=0.7,\n", |
||||
"# system=system_message,\n", |
||||
"# messages=[\n", |
||||
"# {\"role\": \"user\", \"content\": prompt},\n", |
||||
"# ],\n", |
||||
"# )\n", |
||||
"# response = \"\"\n", |
||||
"# with result as stream:\n", |
||||
"# for text in stream.text_stream:\n", |
||||
"# response += text or \"\"\n", |
||||
"# yield response" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a0066ffd-196e-4eaf-ad1e-d492958b62af", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"Default_Model=MODEL_Claude_Haiku\n", |
||||
"view = gr.Interface(\n", |
||||
" fn=stream_gpt,\n", |
||||
" inputs=[gr.Textbox(label=\"Your message:\")],\n", |
||||
" outputs=[gr.Markdown(label=\"Response:\")],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "bc5a70b9-2afe-4a7c-9bed-2429229e021b", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Minor improvement\n", |
||||
"\n", |
||||
"I've made a small improvement to this code.\n", |
||||
"\n", |
||||
"Previously, it had these lines:\n", |
||||
"\n", |
||||
"```\n", |
||||
"for chunk in result:\n", |
||||
" yield chunk\n", |
||||
"```\n", |
||||
"\n", |
||||
"There's actually a more elegant way to achieve this (which Python people might call more 'Pythonic'):\n", |
||||
"\n", |
||||
"`yield from result`\n", |
||||
"\n", |
||||
"I cover this in more detail in the Intermediate Python notebook in the week1 folder - take a look if you'd like more." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 20, |
||||
"id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_model(prompt, model):\n", |
||||
" if model==\"GPT\":\n", |
||||
" Default_Model=MODEL_Gemini2FlashThink\n", |
||||
" result = stream_gpt(prompt)\n", |
||||
" elif model==\"Claude\":\n", |
||||
" Default_Model=MODEL_Claude_Haiku\n", |
||||
" result = stream_gpt(prompt)\n", |
||||
" else:\n", |
||||
" raise ValueError(\"Unknown model\")\n", |
||||
" yield from result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8d8ce810-997c-4b6a-bc4f-1fc847ac8855", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"view = gr.Interface(\n", |
||||
" fn=stream_model,\n", |
||||
" inputs=[gr.Textbox(label=\"Your message:\"), gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")],\n", |
||||
" outputs=[gr.Markdown(label=\"Response:\")],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d933865b-654c-4b92-aa45-cf389f1eda3d", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Building a company brochure generator\n", |
||||
"\n", |
||||
"Now you know how - it's simple!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "92d7c49b-2e0e-45b3-92ce-93ca9f962ef4", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Before you read the next few cells</h2>\n", |
||||
" <span style=\"color:#900;\">\n", |
||||
" Try to do this yourself - go back to the company brochure in week1, day5 and add a Gradio UI to the end. Then come and look at the solution.\n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1626eb2e-eee8-4183-bda5-1591b58ae3cf", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" url: str\n", |
||||
" title: str\n", |
||||
" text: str\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c701ec17-ecd5-4000-9f68-34634c8ed49d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# With massive thanks to Bill G. who noticed that a prior version of this had a bug! Now fixed.\n", |
||||
"\n", |
||||
"system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n", |
||||
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5def90e0-4343-4f58-9d4a-0e36e445efa4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_brochure(company_name, url, model):\n", |
||||
" prompt = f\"Please generate a company brochure for {company_name}. Here is their landing page:\\n\"\n", |
||||
" prompt += Website(url).get_contents()\n", |
||||
" if model==\"GPT\":\n", |
||||
" result = stream_gpt(prompt)\n", |
||||
" elif model==\"Claude\":\n", |
||||
" result = stream_claude(prompt)\n", |
||||
" else:\n", |
||||
" raise ValueError(\"Unknown model\")\n", |
||||
" yield from result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "66399365-5d67-4984-9d47-93ed26c0bd3d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"view = gr.Interface(\n", |
||||
" fn=stream_brochure,\n", |
||||
" inputs=[\n", |
||||
" gr.Textbox(label=\"Company name:\"),\n", |
||||
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", |
||||
" gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\")],\n", |
||||
" outputs=[gr.Markdown(label=\"Brochure:\")],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ede97ca3-a0f8-4f6e-be17-d1de7fef9cc0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,308 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "4c3c6553-daa4-4a03-8017-15d0cad8f280", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# About Mini Project\n", |
||||
"\n", |
||||
"Mini project for hearing impaired people, using tools, suggesting songs according to a certain genre and in sign language. Speech to text converter with multiple language support." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a32a79cb-3d16-4b3b-a029-a059bd0b1c0b", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Extra requirements\n", |
||||
"- pip install pydub simpleaudio speechrecognition pipwin pyaudio\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3e214aa3-a977-434f-a436-90a89b81a5ee", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d654cb96-9bcd-4b64-bd79-2d27fa6a62d0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b2d9214f-25d0-4f09-ba88-641beeaa20db", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful assistant for hearing impaired people. \"\n", |
||||
"system_message += \"Your mission is convert text to speech and speech to text. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3d9a1478-08bf-4195-8f38-34c29757012f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"songs_with_signs = {\n", |
||||
" \"electronic\": (\"God is a dj\", \"https://www.youtube.com/watch?v=bhSB8EEnCAM\", \"Faithless\"), \n", |
||||
" \"pop\": (\"Yitirmeden\", \"https://www.youtube.com/watch?v=aObdAXq1ZIo\", \"Pinhani\"), \n", |
||||
" \"rock\": (\"Bohemian Rhapsody\", \"https://www.youtube.com/watch?v=sjln9OMOw-0\", \"Queen\")\n", |
||||
"}\n", |
||||
"\n", |
||||
"def get_songs_with_sign_language(genre):\n", |
||||
" print(f\"Tool get_songs_with_sign_language called for {genre}\")\n", |
||||
" city = genre.lower()\n", |
||||
" return songs_with_signs.get(genre, \"Unknown\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "93a3d7ee-78c2-4e19-b7e4-8239b07aaecc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_songs_with_sign_language(\"rock\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7307aa61-86fe-4c46-9f9d-faa3d1fb1eb7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"song_function = {\n", |
||||
" \"name\": \"get_songs_with_sign_language\",\n", |
||||
" \"description\": \"Get the corresponding song information for the specified given music genre. Call this whenever you need to know the songs with specific genre and in sign language, for example when a customer asks 'Suggest me sign language supported songs'\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"genre\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The music genre that the customer wants to listen-watch to\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"genre\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "160d790c-dda6-4c6e-b814-8be64ca7086b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"tools = [{\"type\": \"function\", \"function\": song_function}]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "96cdf319-11cd-4be2-8830-097225047d65", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def handle_tool_call(message):\n", |
||||
" tool_call = message.tool_calls[0]\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" genre = arguments.get('genre')\n", |
||||
" song = get_songs_with_sign_language(genre)\n", |
||||
" song_info = song[2] + \": \" + song[1]\n", |
||||
" response = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"genre\": genre,\"song\": song_info}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" return response, song[1]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bbd8ad0c-135b-406f-8ab9-0e1f9b58538d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", |
||||
" genre = None\n", |
||||
" \n", |
||||
" if response.choices[0].finish_reason==\"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" response, genre = handle_tool_call(message)\n", |
||||
" messages.append(message)\n", |
||||
" messages.append(response)\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" \n", |
||||
" reply = response.choices[0].message.content\n", |
||||
" history += [{\"role\":\"assistant\", \"content\":reply}]\n", |
||||
" \n", |
||||
" return history, genre" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "69f43096-3557-4218-b0de-bd286237fdeb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import speech_recognition as sr\n", |
||||
"from pydub import AudioSegment\n", |
||||
"import simpleaudio as sa\n", |
||||
"\n", |
||||
"def listener():\n", |
||||
" recognizer = sr.Recognizer()\n", |
||||
" \n", |
||||
" with sr.Microphone() as source:\n", |
||||
" print(\"Listening... Speak now!\")\n", |
||||
" recognizer.adjust_for_ambient_noise(source) # Adjust for background noise\n", |
||||
" audio = recognizer.listen(source)\n", |
||||
" \n", |
||||
" try:\n", |
||||
" print(\"Processing speech...\")\n", |
||||
" text = recognizer.recognize_google(audio) # Use Google Speech-to-Text\n", |
||||
" print(f\"You said: {text}\")\n", |
||||
" return text\n", |
||||
" except sr.UnknownValueError:\n", |
||||
" print(\"Sorry, I could not understand what you said.\")\n", |
||||
" return None\n", |
||||
" except sr.RequestError:\n", |
||||
" print(\"Could not request results, please check your internet connection.\")\n", |
||||
" return None\n", |
||||
"\n", |
||||
"# Example usage:\n", |
||||
"text = listener() # Listen for speech\n", |
||||
"if text:\n", |
||||
" print(f\"You just said: {text}\") " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "23c9deeb-d9ad-439a-a39d-7eac9553bd5e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import gradio as gr\n", |
||||
"\n", |
||||
"convert = gr.State(False)\n", |
||||
"def toggle_convert(current_value):\n", |
||||
" return not current_value" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "32d3ea9f-fe3c-4cc5-9902-550c63c58a69", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import gradio as gr\n", |
||||
"\n", |
||||
"with gr.Blocks() as ui:\n", |
||||
" with gr.Tab(\"Chat\") as chat_interface:\n", |
||||
" with gr.Row():\n", |
||||
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n", |
||||
" video = gr.HTML(f\"<a href=''> Example song will appear here </a>\")\n", |
||||
" with gr.Row():\n", |
||||
" entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n", |
||||
" with gr.Row():\n", |
||||
" clear = gr.Button(\"Clear\")\n", |
||||
" \n", |
||||
" def do_entry(message, history):\n", |
||||
" history += [{\"role\":\"user\", \"content\":message}]\n", |
||||
" return \"\", history\n", |
||||
" \n", |
||||
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", |
||||
" chat, inputs=chatbot, outputs=[chatbot, video]\n", |
||||
" )\n", |
||||
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", |
||||
" with gr.Tab(\"Speech to text converter\") as speech_to_text:\n", |
||||
" text_output = gr.Markdown(\"Press the button to start voice recognition\")\n", |
||||
" listen_button = gr.Button(\"Convert Voice to Text\")\n", |
||||
" language = gr.Dropdown([\"English\", \"Turkish\", \"Greek\", \"Arabic\"], label=\"Select output language\", value=\"English\")\n", |
||||
"\n", |
||||
" def update_text(language):\n", |
||||
" \"\"\"Calls the listener and updates the markdown output in specific language.\"\"\"\n", |
||||
" text = listener() # Replace with real speech-to-text function\n", |
||||
" system_prompt = f\"You are a useful translator. Convert text to {language}. Do not add aditional data, only translate it.\"\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": text}\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" return f\"**Converted Text:** {response.choices[0].message.content}\"\n", |
||||
"\n", |
||||
" listen_button.click(update_text, inputs=[language], outputs=[text_output])\n", |
||||
"\n", |
||||
"ui.launch(inbrowser=True, share=True)\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "26814e88-ee29-414d-88a4-f19b2f94e6f4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.12.0" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1 @@
|
||||
"Your google auth credentials." |
@ -0,0 +1,35 @@
|
||||
import os |
||||
from google.oauth2.credentials import Credentials |
||||
from google_auth_oauthlib.flow import InstalledAppFlow |
||||
from google.auth.transport.requests import Request |
||||
from googleapiclient.discovery import build # Add this import |
||||
|
||||
SCOPES = ["https://www.googleapis.com/auth/calendar.events"] |
||||
|
||||
def authenticate_google_calender(): |
||||
creds = None |
||||
token_path = r"C:\Users\Legion\Desktop\projects\medical_prescription_to_google_calender\token.json" |
||||
|
||||
if os.path.exists(token_path): |
||||
creds = Credentials.from_authorized_user_file(token_path, SCOPES) |
||||
|
||||
if not creds or not creds.valid: |
||||
if creds and creds.expired and creds.refresh_token: |
||||
creds.refresh(Request()) |
||||
else: |
||||
flow = InstalledAppFlow.from_client_secrets_file(r"C:\Users\Legion\Desktop\projects\medical_prescription_to_google_calender\credentials.json", SCOPES) |
||||
creds = flow.run_local_server(port=0) |
||||
|
||||
with open(token_path, "w") as token_file: |
||||
token_file.write(creds.to_json()) |
||||
|
||||
# Build and return the service instead of just credentials |
||||
try: |
||||
service = build('calendar', 'v3', credentials=creds) |
||||
return service |
||||
except Exception as e: |
||||
print(f"Error building service: {e}") |
||||
return None |
||||
|
||||
if __name__ == "__main__": |
||||
authenticate_google_calender() |
@ -0,0 +1,64 @@
|
||||
from googleapiclient.discovery import build |
||||
from calendar_auth import authenticate_google_calender |
||||
from parsing_json import format_calendar_events |
||||
from datetime import datetime, timedelta |
||||
|
||||
def create_event(service, event_details): |
||||
"""Creates an event in Google Calendar.""" |
||||
try: |
||||
event = service.events().insert(calendarId='primary', body=event_details).execute() |
||||
print(f"Event created: {event.get('htmlLink')}") |
||||
except Exception as e: |
||||
print(f"Error creating event: {str(e)}") |
||||
|
||||
def convert_time_to_24hr(time_str): |
||||
"""Converts time from '10:30 am' format to '10:30:00'""" |
||||
if time_str and time_str.lower() != 'none': |
||||
try: |
||||
parsed_time = datetime.strptime(time_str, '%I:%M %p') |
||||
return parsed_time.strftime('%H:%M:%S') |
||||
except ValueError: |
||||
return '09:00:00' |
||||
return '09:00:00' |
||||
|
||||
def convert_to_gcal_events(formatted_events): |
||||
"""Converts formatted events into Google Calendar's format.""" |
||||
gcal_events = [] |
||||
|
||||
for event in formatted_events: |
||||
gcal_event = { |
||||
'summary': event['summary'], |
||||
'reminders': { |
||||
'useDefault': False, |
||||
'overrides': [{'method': 'popup', 'minutes': 10}] |
||||
} |
||||
} |
||||
|
||||
# Check if it's an all-day event (has 'date') or timed event (has 'dateTime') |
||||
if 'date' in event['start']: |
||||
# All-day event (like tests and follow-ups) |
||||
gcal_event['start'] = { |
||||
'date': event['start']['date'], |
||||
'timeZone': 'Asia/Kolkata' |
||||
} |
||||
gcal_event['end'] = { |
||||
'date': event['end']['date'], |
||||
'timeZone': 'Asia/Kolkata' |
||||
} |
||||
else: |
||||
# Timed event (like medicine schedules) |
||||
start_dt = datetime.strptime(event['start']['dateTime'], '%Y-%m-%dT%H:%M:%S') |
||||
end_dt = start_dt + timedelta(minutes=30) |
||||
|
||||
gcal_event['start'] = { |
||||
'dateTime': start_dt.isoformat(), |
||||
'timeZone': 'Asia/Kolkata' |
||||
} |
||||
gcal_event['end'] = { |
||||
'dateTime': end_dt.isoformat(), |
||||
'timeZone': 'Asia/Kolkata' |
||||
} |
||||
|
||||
gcal_events.append(gcal_event) |
||||
|
||||
return gcal_events |
@ -0,0 +1,26 @@
|
||||
from ocr import * |
||||
from calendar_auth import * |
||||
from create_calender_events import * |
||||
from parsing_json import * |
||||
from preprocess import * |
||||
|
||||
image_path = r"C:\Users\Legion\Desktop\projects\medical_prescription_to_google_calender\test_data\prescription_page-0001.jpg" |
||||
|
||||
extracted_text = extract_text_from_image(image_path=image_path) |
||||
print(extracted_text) |
||||
cleaned_text = clean_text(extracted_text) |
||||
print(cleaned_text) |
||||
structured_data = preprocess_extracted_text(cleaned_text) |
||||
print(structured_data) |
||||
final_structured_data = process_dates(structured_data) |
||||
print(final_structured_data) |
||||
formatted_calender_events = format_calendar_events(final_structured_data) |
||||
print(formatted_calender_events) |
||||
validated_events = [validate_event(event) for event in formatted_calender_events] |
||||
for event in validated_events[:5]: |
||||
print(json.dumps(event, indent=2)) |
||||
service = authenticate_google_calender() |
||||
gcal_events = convert_to_gcal_events(validated_events) |
||||
|
||||
for event in gcal_events: |
||||
create_event(service, event) |
@ -0,0 +1,71 @@
|
||||
import os |
||||
from openai import OpenAI |
||||
from dotenv import load_dotenv |
||||
import base64 |
||||
from PIL import Image |
||||
import re |
||||
|
||||
load_dotenv() |
||||
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY") |
||||
|
||||
MODEL = "gpt-4o" |
||||
|
||||
openai = OpenAI() |
||||
|
||||
def encode_image(image_path): |
||||
with open(image_path, "rb") as image_file: |
||||
return base64.b64encode(image_file.read()).decode("utf-8") |
||||
|
||||
def extract_text_from_image(image_path): |
||||
response = openai.chat.completions.create( |
||||
model = MODEL, |
||||
max_tokens = 1000, |
||||
messages=[ |
||||
{ |
||||
"role": "system", "content": """You are an OCR assistant that extracts text from medical |
||||
prescription images. Extract all the text exactly as it |
||||
appears in the prescription image. Dont include images. Only |
||||
extract text.""" |
||||
}, |
||||
{ |
||||
"role": "user", |
||||
"content": [ |
||||
{ |
||||
"type": "text", |
||||
"text": "Extract text from this image: " |
||||
}, |
||||
{ |
||||
"type": "image_url", |
||||
"image_url": { |
||||
"url": f"data:image/jpeg;base64,{encode_image(image_path)}" |
||||
} |
||||
} |
||||
] |
||||
} |
||||
] |
||||
) |
||||
return response.choices[0].message.content |
||||
|
||||
import re |
||||
|
||||
def clean_text(text): |
||||
# Remove all hyphens |
||||
text = re.sub(r'-', ' ', text) |
||||
|
||||
# Remove excessive non-word characters but keep necessary punctuation |
||||
text = re.sub(r'[^\w\s.,()%/]', '', text) |
||||
|
||||
# Remove multiple spaces and ensure single spaces |
||||
text = re.sub(r'\s+', ' ', text) |
||||
|
||||
# Replace multiple newlines with a single newline |
||||
text = re.sub(r'\n+', '\n', text) |
||||
|
||||
# Ensure spacing around punctuation marks |
||||
text = re.sub(r'([.,])([^\s])', r'\1 \2', text) |
||||
|
||||
return text.strip() |
||||
|
||||
|
||||
|
@ -0,0 +1,120 @@
|
||||
import json |
||||
import re |
||||
from datetime import datetime, timedelta |
||||
|
||||
# Default number of days to schedule indefinitely recurring events (1 year) |
||||
DEFAULT_DURATION_DAYS = 365 |
||||
|
||||
# Function to assign a default time for general terms like "before breakfast", etc. |
||||
def assign_time(timing): |
||||
time_mappings = { |
||||
"random": "09:00 AM", |
||||
"daily": "09:00 AM", |
||||
"before breakfast": "07:00 AM", |
||||
"after breakfast": "08:30 AM", |
||||
"before lunch": "12:00 PM", |
||||
"after lunch": "01:30 PM", |
||||
"before dinner": "07:00 PM", |
||||
"after dinner": "08:30 PM", |
||||
} |
||||
return time_mappings.get(timing.lower(), timing) |
||||
|
||||
# Function to extract the recurrence pattern |
||||
def get_recurrence_interval(timing): |
||||
""" Extracts interval days from 'every X days', 'once a week', or 'once a month'. """ |
||||
timing = timing.lower().strip() |
||||
|
||||
if "every alternate day" in timing: |
||||
return 2 # Every other day (every 2 days) |
||||
elif match := re.search(r"every (\d+) days", timing): |
||||
return int(match.group(1)) # Extract number of days |
||||
elif "once a week" in timing: |
||||
return 7 # Every 7 days (once a week) |
||||
elif "once a month" in timing: |
||||
return "monthly" # Special case for monthly scheduling |
||||
elif timing in ["daily", "every day"]: |
||||
return 1 # Every day |
||||
else: |
||||
return None # Not a recurring event |
||||
|
||||
# Function to convert AM/PM time format to 24-hour format |
||||
def convert_to_24hr(time_str): |
||||
return datetime.strptime(time_str, "%I:%M %p").strftime("%H:%M") |
||||
|
||||
# Function to generate Google Calendar events |
||||
def format_calendar_events(processed_data): |
||||
events = [] |
||||
start_date = datetime.today().date() |
||||
|
||||
# Medicines |
||||
if "medicines" in processed_data: |
||||
for med in processed_data["medicines"]: |
||||
if med.get("name"): |
||||
event_time = assign_time(med.get("timing", "09:00 AM")) |
||||
interval_days = get_recurrence_interval(med["timing"]) |
||||
|
||||
# If no interval, assume daily (default behavior) |
||||
if interval_days is None: |
||||
interval_days = 1 |
||||
|
||||
# Generate events for 1 year if no duration is given |
||||
event_date = start_date |
||||
for _ in range(365 if interval_days != "monthly" else 12): |
||||
if interval_days == "monthly": |
||||
event_date = (event_date.replace(day=1) + timedelta(days=32)).replace(day=1) # Jump to the next month |
||||
else: |
||||
event_date += timedelta(days=interval_days) |
||||
|
||||
event = { |
||||
"summary": f"Take {med['name']} ({med.get('dosage', 'Dosage not specified')})", |
||||
"start": { |
||||
"dateTime": f"{event_date.isoformat()}T{convert_to_24hr(event_time)}:00", |
||||
"timeZone": "Asia/Kolkata" |
||||
}, |
||||
"end": { |
||||
"dateTime": f"{event_date.isoformat()}T{convert_to_24hr(event_time)}:59", |
||||
"timeZone": "Asia/Kolkata" |
||||
} |
||||
} |
||||
events.append(event) |
||||
|
||||
# Tests |
||||
if "tests" in processed_data: |
||||
for test in processed_data["tests"]: |
||||
if test.get("name") and test.get("dueDate"): # Use 'dueDate' instead of 'date' |
||||
event = { |
||||
"summary": f"Medical Test: {test['name']}", |
||||
"start": {"date": test["dueDate"]}, # Fix here |
||||
"end": {"date": test["dueDate"]}, # Fix here |
||||
"timeZone": "Asia/Kolkata" |
||||
} |
||||
events.append(event) |
||||
|
||||
|
||||
# Follow-ups |
||||
if "follow_ups" in processed_data: |
||||
for follow_up in processed_data["follow_ups"]: |
||||
if follow_up.get("date"): |
||||
event = { |
||||
"summary": "Doctor Follow-up Appointment", |
||||
"start": {"date": follow_up["date"]}, |
||||
"end": {"date": follow_up["date"]}, |
||||
"timeZone": "Asia/Kolkata" |
||||
} |
||||
events.append(event) |
||||
|
||||
return events |
||||
|
||||
# Function to validate events before sending to Google Calendar |
||||
def validate_event(event): |
||||
required_fields = { |
||||
"summary": "Untitled Event", |
||||
"start": {"dateTime": datetime.today().isoformat(), "timeZone": "Asia/Kolkata"}, |
||||
"end": {"dateTime": (datetime.today() + timedelta(minutes=30)).isoformat(), "timeZone": "Asia/Kolkata"} |
||||
} |
||||
|
||||
for field, default_value in required_fields.items(): |
||||
if field not in event or event[field] is None: |
||||
event[field] = default_value |
||||
|
||||
return event |
@ -0,0 +1,141 @@
|
||||
import os |
||||
from openai import OpenAI |
||||
from dotenv import load_dotenv |
||||
import json |
||||
from datetime import datetime, timedelta |
||||
|
||||
load_dotenv() |
||||
|
||||
openai_api = os.getenv("OPENAI_API_KEY") |
||||
MODEL = "gpt-4o-mini" |
||||
openai = OpenAI() |
||||
|
||||
system_prompt = """You are a medical assistant that processes prescription text. |
||||
Your goal is to extract medicines, tests, and follow-ups in a structured JSON format. |
||||
|
||||
### **Instructions:** |
||||
- Extract **medicines**, **dosages**, and **timings** if available. |
||||
- **Convert vague timings** into precise values: |
||||
- **Before breakfast** → `07:30 AM` |
||||
- **After lunch** → `02:00 PM` |
||||
- **Before dinner** → `07:00 PM` |
||||
- **After dinner** → `10:00 PM` |
||||
- **30 minutes before breakfast** → `07:00 AM` |
||||
- If **"daily"** is mentioned without a time, **assign a logical time** between **08:00 AM - 10:00 PM**. |
||||
- If the prescription says **"every alternate day"**, return `"interval": 2` instead of just `"daily"`. |
||||
|
||||
### **Tests & Follow-ups:** |
||||
- Extract **medical tests** and their required dates. |
||||
- Convert relative times (e.g., `"after 3 months"`) into **exact calendar dates**, using the prescription date. |
||||
- If the prescription date is missing, use today's date. |
||||
- Follow-up should **only be included if required**, not just for general check-ups. |
||||
|
||||
### **Output Format:** |
||||
Return **only valid JSON**, structured as follows: |
||||
|
||||
{ |
||||
"medicines": [ |
||||
{ |
||||
"name": "<Medicine Name>", |
||||
"dosage": "<Dosage>", |
||||
"timing": "<Time>", |
||||
"interval": <Interval in days (if applicable)> |
||||
} |
||||
], |
||||
"tests": [ |
||||
{ |
||||
"name": "<Test Name>", |
||||
"date": "<YYYY-MM-DD>" |
||||
} |
||||
], |
||||
"follow_ups": [ |
||||
{ |
||||
"date": "<YYYY-MM-DD>" |
||||
} |
||||
] |
||||
} |
||||
""" |
||||
|
||||
def clean_json_string(json_str): |
||||
"""Clean and validate JSON string before parsing.""" |
||||
try: |
||||
start = json_str.find('{') |
||||
end = json_str.rfind('}') + 1 |
||||
if start >= 0 and end > 0: |
||||
json_str = json_str[start:end] |
||||
|
||||
# Remove any extra whitespace |
||||
json_str = json_str.strip() |
||||
|
||||
# Attempt to parse the JSON |
||||
return json.loads(json_str) |
||||
except json.JSONDecodeError as e: |
||||
print(f"Failed to parse JSON. Raw response:\n{json_str}") |
||||
print(f"Error: {str(e)}") |
||||
return None |
||||
|
||||
def preprocess_extracted_text(extracted_text): |
||||
"""Calls GPT-4o-mini to process prescription text into structured JSON.""" |
||||
try: |
||||
response = openai.chat.completions.create( |
||||
model=MODEL, |
||||
messages=[ |
||||
{ |
||||
"role": "system", |
||||
"content": system_prompt, |
||||
}, |
||||
{ |
||||
"role": "user", |
||||
"content": f"Process this prescription and return ONLY valid JSON:\n\n{extracted_text}" |
||||
} |
||||
], |
||||
temperature=0.3 # Lower temperature for more consistent JSON output |
||||
) |
||||
|
||||
# Get the response content |
||||
content = response.choices[0].message.content |
||||
|
||||
# Clean and parse the JSON |
||||
parsed_data = clean_json_string(content) |
||||
|
||||
if parsed_data is None: |
||||
return { |
||||
"medicines": [], |
||||
"tests": [], |
||||
"follow_ups": [] |
||||
} |
||||
|
||||
return parsed_data |
||||
|
||||
except Exception as e: |
||||
print(f"Error in API call or processing: {str(e)}") |
||||
return { |
||||
"medicines": [], |
||||
"tests": [], |
||||
"follow_ups": [] |
||||
} |
||||
|
||||
def process_dates(data): |
||||
"""Adjusts test dates and follow-up based on the prescription date or today's date.""" |
||||
try: |
||||
# Extract prescription date (if available) or use today's date |
||||
prescription_date = datetime.strptime("02 JANUARY 2025", "%d %B %Y").date() |
||||
|
||||
# Process test dates |
||||
for test in data.get("tests", []): |
||||
if isinstance(test, dict) and "date" not in test and "after_months" in test: |
||||
test_date = prescription_date + timedelta(days=test["after_months"] * 30) |
||||
test["date"] = test_date.strftime("%Y-%m-%d") |
||||
|
||||
# Process follow-up dates |
||||
follow_ups = data.get("follow_ups", []) |
||||
for follow_up in follow_ups: |
||||
if isinstance(follow_up, dict) and "date" not in follow_up and "after_months" in follow_up: |
||||
follow_up_date = prescription_date + timedelta(days=follow_up["after_months"] * 30) |
||||
follow_up["date"] = follow_up_date.strftime("%Y-%m-%d") |
||||
|
||||
return data |
||||
|
||||
except Exception as e: |
||||
print(f"Error processing dates: {str(e)}") |
||||
return data |
@ -0,0 +1,150 @@
|
||||
{ |
||||
"nbformat": 4, |
||||
"nbformat_minor": 0, |
||||
"metadata": { |
||||
"colab": { |
||||
"provenance": [], |
||||
"gpuType": "T4", |
||||
"authorship_tag": "ABX9TyPtAT7Yq5xd4vDcJEZtg69J" |
||||
}, |
||||
"kernelspec": { |
||||
"name": "python3", |
||||
"display_name": "Python 3" |
||||
}, |
||||
"language_info": { |
||||
"name": "python" |
||||
}, |
||||
"accelerator": "GPU" |
||||
}, |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# getting the latest transformers first, since this will require a restart\n", |
||||
"\n", |
||||
"!pip install git+https://github.com/huggingface/transformers.git" |
||||
], |
||||
"metadata": { |
||||
"id": "6gGKXU5RXORf" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import torch\n", |
||||
"from google.colab import userdata\n", |
||||
"from huggingface_hub import login\n", |
||||
"from transformers import AutoProcessor, AutoModelForImageTextToText\n", |
||||
"from google.colab import files" |
||||
], |
||||
"metadata": { |
||||
"id": "yCRrF4aiXPPo" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# logging in to HF\n", |
||||
"\n", |
||||
"hf_token = userdata.get('HF_TOKEN')\n", |
||||
"login(hf_token, add_to_git_credential=True)" |
||||
], |
||||
"metadata": { |
||||
"id": "AAlOQuCbXcrv" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": { |
||||
"id": "_RRVc2j2Vun-" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# this will start an input prompt for uploading local files\n", |
||||
"\n", |
||||
"uploaded = files.upload()\n", |
||||
"print(uploaded.keys()) # this will look sth like dict_keys([\"note2.jpg\"])" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"'''\n", |
||||
"ChatGPT and Gemini explain the following part roughly like so:\n", |
||||
"The string contained in image_path is the key of the entry in the dictionary of uploaded files (see box above).\n", |
||||
"The value to that key contains the image in binary format.\n", |
||||
"The \"with open(image_path, \"wb\") as f\" part means: Create a new file \"note2.jpg\" on the server, and write to it in binary mode (\"wb\").\n", |
||||
"f.write(image) writes the binary image to that new file. \"note2.jpg\" aka image_path will now contain the image.\n", |
||||
"'''\n", |
||||
"\n", |
||||
"image_path = \"note2.jpg\" # update this string depending on the printout in the previous cell!\n", |
||||
"image = uploaded[image_path]\n", |
||||
"with open(image_path, \"wb\") as f:\n", |
||||
" f.write(image)" |
||||
], |
||||
"metadata": { |
||||
"id": "V_UAuSSkXBKh" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# from HF model instructions\n", |
||||
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", |
||||
"model = AutoModelForImageTextToText.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\", device_map=device)\n", |
||||
"processor = AutoProcessor.from_pretrained(\"stepfun-ai/GOT-OCR-2.0-hf\")" |
||||
], |
||||
"metadata": { |
||||
"id": "AiFP-mQtXrpV" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# also from HF documentation about this model, see https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf\n", |
||||
"\n", |
||||
"image = image_path\n", |
||||
"inputs = processor(image, return_tensors=\"pt\").to(device)\n", |
||||
"\n", |
||||
"ocr = model.generate(\n", |
||||
" **inputs,\n", |
||||
" do_sample=False,\n", |
||||
" tokenizer=processor.tokenizer,\n", |
||||
" stop_strings=\"<|im_end|>\",\n", |
||||
" max_new_tokens=4096,\n", |
||||
")" |
||||
], |
||||
"metadata": { |
||||
"id": "7Adr8HB_YNf5" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# prints out the recognized text. This can read my handwriting pretty well! And it works super quick on the free T4 GPU server here.\n", |
||||
"\n", |
||||
"print(processor.decode(ocr[0, inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True))" |
||||
], |
||||
"metadata": { |
||||
"id": "nRsRUIIuYdJ9" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
} |
||||
] |
||||
} |
Loading…
Reference in new issue