7 changed files with 1790 additions and 0 deletions
@ -0,0 +1,87 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "44aba2a0-c6eb-4fc1-a5cc-0a8f8679dbb8", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Michelin-star cook..." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d4d58124-5e9a-4f5a-9e0a-ff74f43896a8", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"\n", |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"openai = OpenAI()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "67dc3099-2ccc-4ee8-8ff2-0dbbe4ae2fcb", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"system_prompt = \"You are a professional chef in a Michelin-star restaurant. You will help me cook restaurant-style dishes using the ingredients I have left in my refrigerator.\\\n", |
||||||
|
"You will provide detailed instructions with precise times and measurements in grams and include calorie information for raw ingredients, not cooked ones.\\\n", |
||||||
|
"Add the caloric information at the end. Your responses should be formatted in Markdown.\"\n", |
||||||
|
"\n", |
||||||
|
"user_prompt = \"\"\"\n", |
||||||
|
"Help me with a recipe using the ingredients I have left in the refrigerator. I have spinach, eggs, pasta, rice, chicken, beef, carrots, potatoes, butter, milk, cheese, tomatoes, red peppers, and all spices in the pantry.\\n\\n\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt},\n", |
||||||
|
"]\n", |
||||||
|
" \n", |
||||||
|
"response = openai.chat.completions.create(\n", |
||||||
|
" model = \"gpt-4o-mini\",\n", |
||||||
|
" messages = messages\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
"# Step 4: print the result in markdown format\n", |
||||||
|
"pretty_response = Markdown(response.choices[0].message.content)\n", |
||||||
|
"display(pretty_response)" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,63 @@ |
|||||||
|
#!/usr/bin/env python |
||||||
|
|
||||||
|
import os |
||||||
|
import argparse |
||||||
|
from dotenv import load_dotenv |
||||||
|
from openai import OpenAI |
||||||
|
|
||||||
|
def load_openai_key(): |
||||||
|
# Load environment variables in a file called .env |
||||||
|
load_dotenv(override=True) |
||||||
|
api_key = os.getenv('OPENAI_API_KEY') |
||||||
|
|
||||||
|
# Check the key |
||||||
|
if not api_key: |
||||||
|
return "Error: No API key was found!" |
||||||
|
elif not api_key.startswith("sk-proj-"): |
||||||
|
return "Error: An API key was found, but it doesn't start sk-proj-; please check you're using the right key" |
||||||
|
elif api_key.strip() != api_key: |
||||||
|
return "Error: An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them!" |
||||||
|
else: |
||||||
|
return "API key found and looks good so far!" |
||||||
|
|
||||||
|
def ask_llm(client, model, user_prompt): |
||||||
|
system_prompt = """ |
||||||
|
you are a writing assistant with an expertise in children's stories. |
||||||
|
Write a bedtime story inspired by the subject below. |
||||||
|
The story should have a begining, middle, and end. |
||||||
|
The story shoukd be appropriate for children ages 5-8 and have a positive message. |
||||||
|
I should be able to read the entire story in about 3 minutes |
||||||
|
""" |
||||||
|
response = client.chat.completions.create( |
||||||
|
model = model, |
||||||
|
messages = [ {"role": "system", "content": system_prompt}, |
||||||
|
{"role": "user", "content": user_prompt}] |
||||||
|
) |
||||||
|
return response.choices[0].message.content |
||||||
|
|
||||||
|
def main(): |
||||||
|
parser = argparse.ArgumentParser(description="AI Bedtime Storyteller") |
||||||
|
parser.add_argument("provider", choices=["openai", "ollama"], help="AI provider to use") |
||||||
|
parser.add_argument("--model", help="Model to use for Ollama (required if provider is 'ollama')", required="ollama" in parser.parse_known_args()[0].provider) |
||||||
|
parser.add_argument("subject", help="What do you want the story to be about?") |
||||||
|
|
||||||
|
args = parser.parse_args() |
||||||
|
|
||||||
|
if args.provider == "openai": |
||||||
|
load_openai_key() |
||||||
|
client = OpenAI() |
||||||
|
model = "gpt-4o-mini" |
||||||
|
elif args.provider == "ollama": |
||||||
|
client = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama') |
||||||
|
model = args.model |
||||||
|
else: |
||||||
|
return "Error: invalid provider!" |
||||||
|
|
||||||
|
user_prompt = args.subject |
||||||
|
|
||||||
|
result = ask_llm(client, model, user_prompt) |
||||||
|
print("AI Response:", result) |
||||||
|
|
||||||
|
if __name__ == "__main__": |
||||||
|
main() |
||||||
|
|
@ -0,0 +1,567 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "c79dc33e-1a3b-4601-a8f2-219b7a9b6d88", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Company Brochure - Relevant Links and Custom Tone\n", |
||||||
|
"\n", |
||||||
|
"Using GPT to generate a company brochure with the relevant links functionality and the ability to choose the desired tone." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 1, |
||||||
|
"id": "e32f4aa7-6fc4-4dc9-8058-58e6a7f329c5", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"import json\n", |
||||||
|
"from typing import List\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display, update_display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 2, |
||||||
|
"id": "d1d65a21-bbba-44ff-a2be-85bf2055a493", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"OpenAI API Key set and good to go.\n" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||||
|
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"if openai_api_key:\n", |
||||||
|
" print(\"OpenAI API Key set and good to go.\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"OpenAI API Key not set. :(\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 3, |
||||||
|
"id": "c5db63fe-5da8-496e-9b37-139598d600a7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Setting up the OpenAI object\n", |
||||||
|
"\n", |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"gpt_model = 'gpt-4o-mini'" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 4, |
||||||
|
"id": "535da52f-b280-48ce-aa8b-f82f9f9805d9", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A class to represent a Webpage\n", |
||||||
|
"\n", |
||||||
|
"# Some websites need you to use proper headers when fetching them:\n", |
||||||
|
"headers = {\n", |
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||||
|
"}\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" A utility class to represent a Website that we have scraped, now with links\n", |
||||||
|
" \"\"\"\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" self.body = response.content\n", |
||||||
|
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" if soup.body:\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||||
|
" else:\n", |
||||||
|
" self.text = \"\"\n", |
||||||
|
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||||
|
" self.links = [link for link in links if link]\n", |
||||||
|
"\n", |
||||||
|
" def get_contents(self):\n", |
||||||
|
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 5, |
||||||
|
"id": "8d5757c4-95f4-4038-8ed4-8c81da5112b0", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||||
|
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", |
||||||
|
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||||
|
"link_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||||
|
"link_system_prompt += \"\"\"\n", |
||||||
|
"{\n", |
||||||
|
" \"links\": [\n", |
||||||
|
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||||
|
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||||
|
" ]\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 6, |
||||||
|
"id": "d5fd31ac-7c81-454a-a1dc-4c58bd3db246", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_links_user_prompt(website):\n", |
||||||
|
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||||
|
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||||
|
"Do not include Terms of Service, Privacy, email links.\\n\"\n", |
||||||
|
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||||
|
" user_prompt += \"\\n\".join(website.links)\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 7, |
||||||
|
"id": "e8b67492-1ba4-4aad-a588-39116128fa18", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def gpt_get_links(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" response = openai.chat.completions.create(\n", |
||||||
|
" model= gpt_model,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||||
|
" ],\n", |
||||||
|
" response_format={\"type\": \"json_object\"}\n", |
||||||
|
" )\n", |
||||||
|
" result = response.choices[0].message.content\n", |
||||||
|
" return json.loads(result)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 8, |
||||||
|
"id": "e8846e7a-ace2-487e-a0a8-fccb389f2eb9", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# This function provides uses the get_contents method in the Website Class as well as GPT to find relevant links.\n", |
||||||
|
"\n", |
||||||
|
"def get_all_details(url):\n", |
||||||
|
" result = \"Landing page:\\n\"\n", |
||||||
|
" result += Website(url).get_contents()\n", |
||||||
|
" links = gpt_get_links(url)\n", |
||||||
|
" print(\"Found links:\", links)\n", |
||||||
|
" for link in links[\"links\"]:\n", |
||||||
|
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||||
|
" result += Website(link[\"url\"]).get_contents()\n", |
||||||
|
" return result" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 9, |
||||||
|
"id": "18b42319-8342-4b9c-bef6-8b72acf92ab3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_brochure_user_prompt(company_name, url):\n", |
||||||
|
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||||
|
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; \\\n", |
||||||
|
" use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||||
|
" \n", |
||||||
|
" user_prompt += get_all_details(url)\n", |
||||||
|
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 10, |
||||||
|
"id": "d7748293-a616-41de-93cb-89f65cc5c73d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Let's create a call that streams back results\n", |
||||||
|
"# If you'd like a refresher on Generators (the \"yield\" keyword),\n", |
||||||
|
"# Please take a look at the Intermediate Python notebook in week1 folder.\n", |
||||||
|
"\n", |
||||||
|
"def stream_brochure(company_name, url, tone):\n", |
||||||
|
"\n", |
||||||
|
" system_message = f\"You are an assistant that analyzes the content of several relevant pages from a company website \\\n", |
||||||
|
" and creates a short brochure about the company for prospective customers, investors, and recruits. \\\n", |
||||||
|
" Include details of company culture, customers and careers/jobs if you have the information. \\\n", |
||||||
|
" Respond in markdown, and use a {tone.lower()} tone throughout the brochure.\"\n", |
||||||
|
"\n", |
||||||
|
" \n", |
||||||
|
" messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_message},\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||||
|
" ]\n", |
||||||
|
" stream = openai.chat.completions.create(\n", |
||||||
|
" model=gpt_model,\n", |
||||||
|
" messages=messages,\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" result = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" result += chunk.choices[0].delta.content or \"\"\n", |
||||||
|
" yield result" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 11, |
||||||
|
"id": "15222832-06e0-4452-a8e1-59b9b1755488", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"* Running on local URL: http://127.0.0.1:7860\n", |
||||||
|
"\n", |
||||||
|
"To create a public link, set `share=True` in `launch()`.\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/html": [ |
||||||
|
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.HTML object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
}, |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/plain": [] |
||||||
|
}, |
||||||
|
"execution_count": 11, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "execute_result" |
||||||
|
}, |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'company page', 'url': 'https://www.snowflake.com/en/company/overview/about-snowflake/'}, {'type': 'company leadership page', 'url': 'https://www.snowflake.com/en/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'company ESG page', 'url': 'https://www.snowflake.com/en/company/overview/esg/'}, {'type': 'company ventures page', 'url': 'https://www.snowflake.com/en/company/overview/snowflake-ventures/'}, {'type': 'end data disparity page', 'url': 'https://www.snowflake.com/en/company/overview/end-data-disparity/'}]}\n", |
||||||
|
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'about page', 'url': 'https://www.snowflake.com/company/overview/about-snowflake/'}, {'type': 'leadership page', 'url': 'https://www.snowflake.com/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'investor relations', 'url': 'https://investors.snowflake.com/overview/default.aspx'}, {'type': 'ESG page', 'url': 'https://www.snowflake.com/company/overview/esg/'}, {'type': 'snowflake ventures', 'url': 'https://www.snowflake.com/company/overview/snowflake-ventures/'}, {'type': 'end data disparity', 'url': 'https://www.snowflake.com/company/overview/end-data-disparity/'}]}\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"name": "stderr", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n", |
||||||
|
" self._validate_conn(conn)\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n", |
||||||
|
" conn.connect()\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n", |
||||||
|
" sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n", |
||||||
|
" ssl_sock = ssl_wrap_socket(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n", |
||||||
|
" ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n", |
||||||
|
" return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n", |
||||||
|
" return self.sslsocket_class._create(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n", |
||||||
|
" self.do_handshake()\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n", |
||||||
|
" self._sslobj.do_handshake()\n", |
||||||
|
"ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||||
|
"\n", |
||||||
|
"During handling of the above exception, another exception occurred:\n", |
||||||
|
"\n", |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n", |
||||||
|
" response = self._make_request(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n", |
||||||
|
" raise new_e\n", |
||||||
|
"urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||||
|
"\n", |
||||||
|
"The above exception was the direct cause of the following exception:\n", |
||||||
|
"\n", |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n", |
||||||
|
" resp = conn.urlopen(\n", |
||||||
|
" ^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n", |
||||||
|
" retries = retries.increment(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n", |
||||||
|
" raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
"urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", |
||||||
|
"\n", |
||||||
|
"During handling of the above exception, another exception occurred:\n", |
||||||
|
"\n", |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", |
||||||
|
" response = await route_utils.call_process_api(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", |
||||||
|
" output = await app.get_blocks().process_api(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n", |
||||||
|
" result = await self.call_function(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n", |
||||||
|
" prediction = await utils.async_iteration(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n", |
||||||
|
" return await anext(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n", |
||||||
|
" return await anyio.to_thread.run_sync(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", |
||||||
|
" return await get_async_backend().run_sync_in_worker_thread(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n", |
||||||
|
" return await future\n", |
||||||
|
" ^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n", |
||||||
|
" result = context.run(func, *args)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n", |
||||||
|
" return next(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n", |
||||||
|
" response = next(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n", |
||||||
|
" user_prompt += get_all_details(url)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n", |
||||||
|
" result += Website(url).get_contents()\n", |
||||||
|
" ^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n", |
||||||
|
" return request(\"get\", url, params=params, **kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n", |
||||||
|
" return session.request(method=method, url=url, **kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n", |
||||||
|
" resp = self.send(prep, **send_kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n", |
||||||
|
" r = adapter.send(request, **kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n", |
||||||
|
" raise SSLError(e, request=request)\n", |
||||||
|
"requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n", |
||||||
|
" self._validate_conn(conn)\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n", |
||||||
|
" conn.connect()\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n", |
||||||
|
" sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n", |
||||||
|
" ssl_sock = ssl_wrap_socket(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n", |
||||||
|
" ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n", |
||||||
|
" return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n", |
||||||
|
" return self.sslsocket_class._create(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n", |
||||||
|
" self.do_handshake()\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n", |
||||||
|
" self._sslobj.do_handshake()\n", |
||||||
|
"ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||||
|
"\n", |
||||||
|
"During handling of the above exception, another exception occurred:\n", |
||||||
|
"\n", |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n", |
||||||
|
" response = self._make_request(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n", |
||||||
|
" raise new_e\n", |
||||||
|
"urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||||
|
"\n", |
||||||
|
"The above exception was the direct cause of the following exception:\n", |
||||||
|
"\n", |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n", |
||||||
|
" resp = conn.urlopen(\n", |
||||||
|
" ^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n", |
||||||
|
" retries = retries.increment(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n", |
||||||
|
" raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
"urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", |
||||||
|
"\n", |
||||||
|
"During handling of the above exception, another exception occurred:\n", |
||||||
|
"\n", |
||||||
|
"Traceback (most recent call last):\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", |
||||||
|
" response = await route_utils.call_process_api(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", |
||||||
|
" output = await app.get_blocks().process_api(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n", |
||||||
|
" result = await self.call_function(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n", |
||||||
|
" prediction = await utils.async_iteration(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n", |
||||||
|
" return await anext(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n", |
||||||
|
" return await anyio.to_thread.run_sync(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", |
||||||
|
" return await get_async_backend().run_sync_in_worker_thread(\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n", |
||||||
|
" return await future\n", |
||||||
|
" ^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n", |
||||||
|
" result = context.run(func, *args)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n", |
||||||
|
" return next(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n", |
||||||
|
" response = next(iterator)\n", |
||||||
|
" ^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n", |
||||||
|
" user_prompt += get_all_details(url)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n", |
||||||
|
" result += Website(url).get_contents()\n", |
||||||
|
" ^^^^^^^^^^^^\n", |
||||||
|
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n", |
||||||
|
" return request(\"get\", url, params=params, **kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n", |
||||||
|
" return session.request(method=method, url=url, **kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n", |
||||||
|
" resp = self.send(prep, **send_kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n", |
||||||
|
" r = adapter.send(request, **kwargs)\n", |
||||||
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||||
|
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n", |
||||||
|
" raise SSLError(e, request=request)\n", |
||||||
|
"requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.petrofac.com/who-we-are/'}, {'type': 'what we do page', 'url': 'https://www.petrofac.com/who-we-are/what-we-do/'}, {'type': 'careers page', 'url': 'https://www.petrofac.com/careers/'}, {'type': 'our structure page', 'url': 'https://www.petrofac.com/who-we-are/our-structure/'}, {'type': 'energy transition page', 'url': 'https://www.petrofac.com/who-we-are/energy-transition/'}, {'type': 'sustainability and ESG page', 'url': 'https://www.petrofac.com/who-we-are/sustainability-and-esg/'}, {'type': 'investor relations page', 'url': 'https://www.petrofac.com/investors/'}, {'type': 'services page', 'url': 'https://www.petrofac.com/services/'}, {'type': 'where we operate page', 'url': 'https://www.petrofac.com/where-we-operate/'}]}\n" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"view = gr.Interface(\n", |
||||||
|
" fn=stream_brochure,\n", |
||||||
|
" inputs=[\n", |
||||||
|
" gr.Textbox(label=\"Company name:\"),\n", |
||||||
|
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", |
||||||
|
" gr.Textbox(label=\"Tone:\")],\n", |
||||||
|
" outputs=[gr.Markdown(label=\"Brochure:\")],\n", |
||||||
|
" flagging_mode=\"never\"\n", |
||||||
|
")\n", |
||||||
|
"view.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "70d6398c-21dd-44f8-ba7d-0204414dffa0", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,78 @@ |
|||||||
|
import gradio as gr |
||||||
|
import torch |
||||||
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer, AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline |
||||||
|
from huggingface_hub import login |
||||||
|
import os |
||||||
|
|
||||||
|
# Use the secret stored in the Hugging Face space |
||||||
|
token = os.getenv("HF_TOKEN") |
||||||
|
login(token=token) |
||||||
|
|
||||||
|
# Whisper Model Optimization |
||||||
|
model = "openai/whisper-tiny" |
||||||
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
||||||
|
|
||||||
|
processor = AutoProcessor.from_pretrained(model) |
||||||
|
|
||||||
|
|
||||||
|
transcriber = pipeline( |
||||||
|
"automatic-speech-recognition", |
||||||
|
model=model, |
||||||
|
tokenizer=processor.tokenizer, |
||||||
|
feature_extractor=processor.feature_extractor, |
||||||
|
device=0 if torch.cuda.is_available() else "cpu", |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Function to Transcribe & Generate Minutes |
||||||
|
def process_audio(audio_file): |
||||||
|
if audio_file is None: |
||||||
|
return "Error: No audio provided!" |
||||||
|
|
||||||
|
# Transcribe audio |
||||||
|
transcript = transcriber(audio_file)["text"] |
||||||
|
del transcriber |
||||||
|
del processor |
||||||
|
# LLaMA Model Optimization |
||||||
|
LLAMA = "meta-llama/Llama-3.2-3B-Instruct" |
||||||
|
llama_quant_config = BitsAndBytesConfig( |
||||||
|
load_in_4bit=True, |
||||||
|
bnb_4bit_use_double_quant=True, |
||||||
|
bnb_4bit_compute_dtype=torch.bfloat16, |
||||||
|
bnb_4bit_quant_type="nf4" |
||||||
|
) |
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(LLAMA) |
||||||
|
tokenizer.pad_token = tokenizer.eos_token |
||||||
|
model = AutoModelForCausalLM.from_pretrained( |
||||||
|
LLAMA, |
||||||
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
||||||
|
device_map="auto" |
||||||
|
) |
||||||
|
# Generate meeting minutes |
||||||
|
system_message = "You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown." |
||||||
|
user_prompt = f"Below is an extract transcript of a Denver council meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\n{transcript}" |
||||||
|
|
||||||
|
messages = [ |
||||||
|
{"role": "system", "content": system_message}, |
||||||
|
{"role": "user", "content": user_prompt} |
||||||
|
] |
||||||
|
|
||||||
|
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(DEVICE) |
||||||
|
streamer = TextStreamer(tokenizer) |
||||||
|
outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer) |
||||||
|
|
||||||
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
||||||
|
|
||||||
|
# Gradio Interface |
||||||
|
interface = gr.Interface( |
||||||
|
fn=process_audio, |
||||||
|
inputs=gr.Audio(sources=["upload", "microphone"], type="filepath"), |
||||||
|
outputs="text", |
||||||
|
title="Meeting Minutes Generator", |
||||||
|
description="Upload or record an audio file to get structured meeting minutes in Markdown.", |
||||||
|
) |
||||||
|
|
||||||
|
# Launch App |
||||||
|
interface.launch() |
@ -0,0 +1,283 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Import documents exported from Evernote to a vectorstore\n", |
||||||
|
"### Use OpenAI file search with responses API\n", |
||||||
|
"#### Prerequisite steps\n", |
||||||
|
"* exported notes from your Evernote notebook as html \n", |
||||||
|
"* converted the notes further to md-files and remove broken image links (use python/AI)\n", |
||||||
|
"* the files are named with note titles\n", |
||||||
|
"\n", |
||||||
|
"Files are in one folder.\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"##### Query ChromaDB vectorstore\n", |
||||||
|
"I tried to accomplish this task with RAG like the example by https://github.com/ed-donner/llm_engineering/commits?author=dinorrusso.\n", |
||||||
|
"\n", |
||||||
|
"I thought this to be a trivial task, but it was not 😃 That example uses Ollama running locally.\n", |
||||||
|
"Even though the retriever had the information required, it was dropped from the answer.\n", |
||||||
|
"\n", |
||||||
|
"I tried then to use Chroma + OpenAI. After several attemps succeeded to create a vectorstore and query it. That's it for this time.\n", |
||||||
|
"\n", |
||||||
|
"##### Openai vectorstore, see bottom of the notebook\n", |
||||||
|
"One attempt was to use OpenAI's fileSearch-tool which seemed pretty straightforward.\n", |
||||||
|
"The con: loading files was not working always. Code is left though as reference." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Imports\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"import gradio as gr\n", |
||||||
|
"import openai\n", |
||||||
|
"import chromadb\n", |
||||||
|
"from chromadb.config import Settings\n", |
||||||
|
"import os" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"#### Load files to vectorstore" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", |
||||||
|
"openai.api_key = os.environ['OPENAI_API_KEY']\n", |
||||||
|
"\n", |
||||||
|
"def chunk_text(text, max_tokens=2000):\n", |
||||||
|
" words = text.split()\n", |
||||||
|
" chunks = []\n", |
||||||
|
" current_chunk = []\n", |
||||||
|
" current_length = 0\n", |
||||||
|
"\n", |
||||||
|
" for word in words:\n", |
||||||
|
" current_length += len(word) + 1 # +1 for the space\n", |
||||||
|
" if current_length > max_tokens:\n", |
||||||
|
" chunks.append(\" \".join(current_chunk))\n", |
||||||
|
" current_chunk = [word]\n", |
||||||
|
" current_length = len(word) + 1\n", |
||||||
|
" else:\n", |
||||||
|
" current_chunk.append(word)\n", |
||||||
|
"\n", |
||||||
|
" if current_chunk:\n", |
||||||
|
" chunks.append(\" \".join(current_chunk))\n", |
||||||
|
"\n", |
||||||
|
" return chunks\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"# # Set up OpenAI API key\n", |
||||||
|
"# openai.api_key = \"your_openai_api_key\" # Replace with your API key\n", |
||||||
|
"chroma_client = chromadb.Client()\n", |
||||||
|
"\n", |
||||||
|
"# Create or get the existing collection\n", |
||||||
|
"collection_name = \"EverNotes\"\n", |
||||||
|
"\n", |
||||||
|
"try:\n", |
||||||
|
" existing_collection = chroma_client.get_collection(name=collection_name)\n", |
||||||
|
" if existing_collection.count() > 0:\n", |
||||||
|
" chroma_client.delete_collection(name=collection_name)\n", |
||||||
|
"except:\n", |
||||||
|
" print(f\"Collection {collection_name} does not exist. Creating a new one.\")\n", |
||||||
|
"\n", |
||||||
|
"# Create a collection in ChromaDB\n", |
||||||
|
"collection = chroma_client.get_or_create_collection(name=collection_name)\n", |
||||||
|
"\n", |
||||||
|
"# Define your data\n", |
||||||
|
"# it should be like this\n", |
||||||
|
"# documents = [\"OpenAI is revolutionizing AI.\", \"ChromaDB makes embedding storage easy.\"]\n", |
||||||
|
"# metadata = [{\"id\": 1}, {\"id\": 2}]\n", |
||||||
|
"\n", |
||||||
|
"folder_path = os.getenv('EVERNOTE_EXPORT')\n", |
||||||
|
"documents = []\n", |
||||||
|
"\n", |
||||||
|
"for root, dirs, files in os.walk(folder_path):\n", |
||||||
|
" for file in files:\n", |
||||||
|
" if file.endswith('.md'): # Change this to the file extension you need\n", |
||||||
|
" with open(os.path.join(root, file), 'r') as f:\n", |
||||||
|
" documents.append(f.read())\n", |
||||||
|
"\n", |
||||||
|
"metadata = [{\"id\": i + 1} for i in range(len(documents))]\n", |
||||||
|
"\n", |
||||||
|
"# Generate embeddings using OpenAI\n", |
||||||
|
"def get_embedding(text, model=\"text-embedding-ada-002\"):\n", |
||||||
|
" response = openai.embeddings.create(input=text, model=model)\n", |
||||||
|
" return response.data[0].embedding\n", |
||||||
|
"\n", |
||||||
|
"# Add documents and embeddings to ChromaDB in chunks\n", |
||||||
|
"for doc, meta in zip(documents, metadata):\n", |
||||||
|
" chunks = chunk_text(doc)\n", |
||||||
|
" for chunk in chunks:\n", |
||||||
|
" embedding = get_embedding(chunk)\n", |
||||||
|
" collection.add(\n", |
||||||
|
" documents=[chunk],\n", |
||||||
|
" embeddings=[embedding],\n", |
||||||
|
" metadatas=[meta],\n", |
||||||
|
" ids=[str(meta[\"id\"])]\n", |
||||||
|
" )\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"#### Query ChromaDB" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# \n", |
||||||
|
"query_text = \"Is there a video for Fitting the Shimano speed hub 7\"\n", |
||||||
|
"query_embedding = get_embedding(query_text)\n", |
||||||
|
"\n", |
||||||
|
"results = collection.query(\n", |
||||||
|
" query_embeddings=[query_embedding],\n", |
||||||
|
" n_results=2\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"print(\"Query Results:\", results)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"##### Gradio interface" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Function to query ChromaDB\n", |
||||||
|
"def query_chromadb(query_text):\n", |
||||||
|
" query_embedding = get_embedding(query_text)\n", |
||||||
|
" results = collection.query(\n", |
||||||
|
" query_embeddings=[query_embedding],\n", |
||||||
|
" n_results=2\n", |
||||||
|
" )\n", |
||||||
|
" return results\n", |
||||||
|
"\n", |
||||||
|
"# Gradio interface\n", |
||||||
|
"def gradio_interface(query_text):\n", |
||||||
|
" results = query_chromadb(query_text)\n", |
||||||
|
" return results\n", |
||||||
|
"\n", |
||||||
|
"# Create Gradio app\n", |
||||||
|
"iface = gr.Interface(\n", |
||||||
|
" fn=gradio_interface,\n", |
||||||
|
" inputs=\"text\",\n", |
||||||
|
" outputs=\"text\",\n", |
||||||
|
" title=\"ChromaDB Query Interface\",\n", |
||||||
|
" description=\"Enter your query to search the ChromaDB collection.\"\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"iface.launch()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"#### Below OpenAI filesearch variant which had some failures in file uploads." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import glob\n", |
||||||
|
"folder_path = os.environ['EVERNOTE_EXPORT'] \n", |
||||||
|
"# Filter out other except .md-files\n", |
||||||
|
"md_files = glob.glob(os.path.join(folder_path, '*.md'))\n", |
||||||
|
"file_paths = [os.path.join(folder_path, file) for file in md_files]\n", |
||||||
|
"file_streams = [open(path, 'rb') for path in file_paths]\n", |
||||||
|
"\n", |
||||||
|
"# Create vector store\n", |
||||||
|
"vector_store = openai.vector_stores.create(\n", |
||||||
|
" name=\"Evernote notes\",\n", |
||||||
|
")\n", |
||||||
|
"\n", |
||||||
|
"# Batch Upload Limit: You can upload up to 100 files in a single batch\n", |
||||||
|
"# https://community.openai.com/t/max-100-files-in-vector-store/729876/4\n", |
||||||
|
"batch_size = 90\n", |
||||||
|
"for i in range(0, len(file_streams), batch_size):\n", |
||||||
|
" batch = file_streams[i:i + batch_size]\n", |
||||||
|
" file_batch = openai.vector_stores.file_batches.upload_and_poll(\n", |
||||||
|
" vector_store_id=vector_store.id,\n", |
||||||
|
" files=batch\n", |
||||||
|
" )\n", |
||||||
|
" print(file_batch.status)\n", |
||||||
|
" print(file_batch.file_counts)\n", |
||||||
|
"\n", |
||||||
|
"# There can be some fails in file counts:\n", |
||||||
|
"# \"FileCounts(cancelled=0, completed=89, failed=1, in_progress=0, total=90)\"\"\n", |
||||||
|
"# Usually 1 % fails. Did not find solution for improving that yet" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"response = openai.responses.create(\n", |
||||||
|
" model=\"gpt-4o-mini\",\n", |
||||||
|
" input=\"Is there a video for Fitting the Shimano speed hub 7?\",\n", |
||||||
|
" tools=[{\n", |
||||||
|
" \"type\": \"file_search\",\n", |
||||||
|
" \"vector_store_ids\": [vector_store.id]\n", |
||||||
|
" }],\n", |
||||||
|
" include=None\n", |
||||||
|
")\n", |
||||||
|
"print(response)" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": ".venv", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 2 |
||||||
|
} |
@ -0,0 +1,359 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "c25c6e94-f3de-4367-b2bf-269ba7160977", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## An Expert Knowledge Worker Question-Answering Agent using RAG" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "15169580-cf11-4dee-8ec7-3a4ef59b19ee", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Aims\n", |
||||||
|
"- Reads README.md files and loads data using TextLoader\n", |
||||||
|
"- Splits into chunks using CharacterTextSplitter\n", |
||||||
|
"- Converts chunks into vector embeddings and creates a datastore\n", |
||||||
|
"- 2D and 3D visualisations\n", |
||||||
|
"- Langchain to set up a conversation retrieval chain" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "051cf881-357d-406b-8eae-1610651e40f1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import glob\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "ccfd403a-5bdb-4a8c-b3fd-d47ae79e43f7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports for langchain, plotly and Chroma\n", |
||||||
|
"\n", |
||||||
|
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
||||||
|
"from langchain.text_splitter import CharacterTextSplitter\n", |
||||||
|
"from langchain.schema import Document\n", |
||||||
|
"from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
||||||
|
"from langchain.embeddings import HuggingFaceEmbeddings\n", |
||||||
|
"from langchain_chroma import Chroma\n", |
||||||
|
"from langchain.memory import ConversationBufferMemory\n", |
||||||
|
"from langchain.chains import ConversationalRetrievalChain\n", |
||||||
|
"import numpy as np\n", |
||||||
|
"from sklearn.manifold import TSNE\n", |
||||||
|
"import plotly.graph_objects as go\n", |
||||||
|
"import plotly.express as px\n", |
||||||
|
"import matplotlib.pyplot as plt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2d853868-d2f6-43e1-b27c-b8e91d06b724", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"MODEL = \"gpt-4o-mini\"\n", |
||||||
|
"db_name = \"vector_db\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f152fc3b-0bf4-4d51-948f-95da1ebc030a", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "24e621ac-df06-4af6-a60d-a9ed7adb884a", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Read in documents using LangChain's loaders\n", |
||||||
|
"\n", |
||||||
|
"folder = \"my-knowledge-base/\"\n", |
||||||
|
"text_loader_kwargs={'autodetect_encoding': True}\n", |
||||||
|
"\n", |
||||||
|
"loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", |
||||||
|
"folder_docs = loader.load()\n", |
||||||
|
"\n", |
||||||
|
"for doc in folder_docs:\n", |
||||||
|
" filename_md = os.path.basename(doc.metadata[\"source\"]) \n", |
||||||
|
" filename, _ = os.path.splitext(filename_md) \n", |
||||||
|
" doc.metadata[\"filename\"] = filename\n", |
||||||
|
"\n", |
||||||
|
"documents = folder_docs \n", |
||||||
|
"\n", |
||||||
|
"text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=200)\n", |
||||||
|
"chunks = text_splitter.split_documents(documents)\n", |
||||||
|
"\n", |
||||||
|
"print(f\"Total number of chunks: {len(chunks)}\")\n", |
||||||
|
"print(f\"Files found: {set(doc.metadata['filename'] for doc in documents)}\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f02f08ee-5ade-4f79-a500-045a8f1a532f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n", |
||||||
|
"\n", |
||||||
|
"embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", |
||||||
|
"\n", |
||||||
|
"# Delete if already exists\n", |
||||||
|
"\n", |
||||||
|
"if os.path.exists(db_name):\n", |
||||||
|
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", |
||||||
|
"\n", |
||||||
|
"# Create vectorstore\n", |
||||||
|
"\n", |
||||||
|
"vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", |
||||||
|
"print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7f665f4d-ccb1-43fb-b901-040117925732", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Let's investigate the vectors\n", |
||||||
|
"\n", |
||||||
|
"collection = vectorstore._collection\n", |
||||||
|
"count = collection.count()\n", |
||||||
|
"\n", |
||||||
|
"sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n", |
||||||
|
"dimensions = len(sample_embedding)\n", |
||||||
|
"print(f\"There are {count:,} vectors with {dimensions:,} dimensions in the vector store\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "6208a971-e8b7-48bc-be7a-6dcb82967fd2", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# pre work\n", |
||||||
|
"\n", |
||||||
|
"result = collection.get(include=['embeddings','documents','metadatas'])\n", |
||||||
|
"vectors = np.array(result['embeddings']) \n", |
||||||
|
"documents = result['documents']\n", |
||||||
|
"metadatas = result['metadatas']\n", |
||||||
|
"filenames = [metadata['filename'] for metadata in metadatas]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "eb27bc8a-453b-4b19-84b4-dc495bb0e544", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import random\n", |
||||||
|
"def random_color():\n", |
||||||
|
" return f\"rgb({random.randint(0,255)},{random.randint(0,255)},{random.randint(0,255)})\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "78db67e5-ef10-4581-b8ac-3e0281ceba45", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def show_embeddings_2d(result):\n", |
||||||
|
" vectors = np.array(result['embeddings']) \n", |
||||||
|
" documents = result['documents']\n", |
||||||
|
" metadatas = result['metadatas']\n", |
||||||
|
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||||
|
" filenames_unique = sorted(set(filenames))\n", |
||||||
|
"\n", |
||||||
|
" # color assignment\n", |
||||||
|
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||||
|
" colors = [color_map[name] for name in filenames]\n", |
||||||
|
"\n", |
||||||
|
" tsne = TSNE(n_components=2, random_state=42,perplexity=4)\n", |
||||||
|
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||||
|
"\n", |
||||||
|
" # Create the 2D scatter plot\n", |
||||||
|
" fig = go.Figure(data=[go.Scatter(\n", |
||||||
|
" x=reduced_vectors[:, 0],\n", |
||||||
|
" y=reduced_vectors[:, 1],\n", |
||||||
|
" mode='markers',\n", |
||||||
|
" marker=dict(size=5,color=colors, opacity=0.8),\n", |
||||||
|
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||||
|
" hoverinfo='text'\n", |
||||||
|
" )])\n", |
||||||
|
"\n", |
||||||
|
" fig.update_layout(\n", |
||||||
|
" title='2D Chroma Vector Store Visualization',\n", |
||||||
|
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
||||||
|
" width=800,\n", |
||||||
|
" height=600,\n", |
||||||
|
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" fig.show()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2c250166-cb5b-4a75-8981-fae2d6dfe509", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"show_embeddings_2d(result)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3b290e38-0800-4453-b664-7a7622ff5ed2", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def show_embeddings_3d(result):\n", |
||||||
|
" vectors = np.array(result['embeddings']) \n", |
||||||
|
" documents = result['documents']\n", |
||||||
|
" metadatas = result['metadatas']\n", |
||||||
|
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||||
|
" filenames_unique = sorted(set(filenames))\n", |
||||||
|
"\n", |
||||||
|
" # color assignment\n", |
||||||
|
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||||
|
" colors = [color_map[name] for name in filenames]\n", |
||||||
|
"\n", |
||||||
|
" tsne = TSNE(n_components=3, random_state=42)\n", |
||||||
|
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||||
|
"\n", |
||||||
|
" fig = go.Figure(data=[go.Scatter3d(\n", |
||||||
|
" x=reduced_vectors[:, 0],\n", |
||||||
|
" y=reduced_vectors[:, 1],\n", |
||||||
|
" z=reduced_vectors[:, 2],\n", |
||||||
|
" mode='markers',\n", |
||||||
|
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
||||||
|
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||||
|
" hoverinfo='text'\n", |
||||||
|
" )])\n", |
||||||
|
"\n", |
||||||
|
" fig.update_layout(\n", |
||||||
|
" title='3D Chroma Vector Store Visualization',\n", |
||||||
|
" scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", |
||||||
|
" width=900,\n", |
||||||
|
" height=700,\n", |
||||||
|
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" fig.show()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "45d1d034-2503-4176-b1e4-f248e31c4770", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"show_embeddings_3d(result)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e79946a1-f93a-4b3a-8d19-deef40dec223", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# create a new Chat with OpenAI\n", |
||||||
|
"llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", |
||||||
|
"\n", |
||||||
|
"# set up the conversation memory for the chat\n", |
||||||
|
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
||||||
|
"\n", |
||||||
|
"# the retriever is an abstraction over the VectorStore that will be used during RAG\n", |
||||||
|
"retriever = vectorstore.as_retriever(search_kwargs={\"k\": 50})\n", |
||||||
|
"\n", |
||||||
|
"# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n", |
||||||
|
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "59f90c85-c113-4482-8574-8a728ef25459", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def chat(question, history):\n", |
||||||
|
" result = conversation_chain.invoke({\"question\": question})\n", |
||||||
|
" return result[\"answer\"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0520a8ff-01a4-4fa6-9dc8-57da87272edc", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b4949b17-cd9c-4bff-bd5b-0f80df72e7dc", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,353 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "d13be0fd-db15-4ab1-860a-b00257051339", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Gradio UI for Markdown-Based Q&A with Visualization" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "bc63fbdb-66a9-4c10-8dbd-11476b5e2d21", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"This interface enables users to:\n", |
||||||
|
"- Upload Markdown files for processing\n", |
||||||
|
"- Visualize similarity between document chunks in 2D and 3D using embeddings\n", |
||||||
|
"- Ask questions and receive RAG enabled responses\n", |
||||||
|
"- Mantain conversation context for better question answering\n", |
||||||
|
"- Clear chat history when required for fresh sessions\n", |
||||||
|
"- Store and retrieve embeddings using ChromaDB\n", |
||||||
|
"\n", |
||||||
|
"Integrates LangChain, ChromaDB, and OpenAI to process, store, and retrieve information efficiently." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "91da28d8-8e29-44b7-a62a-a3a109753727", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e47f670a-e2cb-4700-95d0-e59e440677a1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports for langchain, plotly and Chroma\n", |
||||||
|
"\n", |
||||||
|
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
||||||
|
"from langchain.text_splitter import CharacterTextSplitter\n", |
||||||
|
"from langchain.schema import Document\n", |
||||||
|
"from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
||||||
|
"from langchain.embeddings import HuggingFaceEmbeddings\n", |
||||||
|
"from langchain_chroma import Chroma\n", |
||||||
|
"from langchain.memory import ConversationBufferMemory\n", |
||||||
|
"from langchain.chains import ConversationalRetrievalChain\n", |
||||||
|
"import numpy as np\n", |
||||||
|
"from sklearn.manifold import TSNE\n", |
||||||
|
"import plotly.graph_objects as go\n", |
||||||
|
"import plotly.express as px\n", |
||||||
|
"import matplotlib.pyplot as plt\n", |
||||||
|
"from random import randint\n", |
||||||
|
"import shutil" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "362d4976-2553-4ed8-8fbb-49806145cad1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!pip install --upgrade gradio" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "968b6e96-557e-439f-b2f1-942c05168641", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"MODEL = \"gpt-4o-mini\"\n", |
||||||
|
"db_name = \"vector_db\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "537f66de-6abf-4b34-8e05-6b9a9df8ae82", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "246c1c1b-fcfa-4f4c-b99c-024598751361", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"folder = \"my-knowledge-base/\"\n", |
||||||
|
"db_name = \"vectorstore_db\"\n", |
||||||
|
"\n", |
||||||
|
"def process_files(files):\n", |
||||||
|
" os.makedirs(folder, exist_ok=True)\n", |
||||||
|
"\n", |
||||||
|
" processed_files = []\n", |
||||||
|
" for file in files:\n", |
||||||
|
" file_path = os.path.join(folder, os.path.basename(file)) # Get filename\n", |
||||||
|
" shutil.copy(file, file_path)\n", |
||||||
|
" processed_files.append(os.path.basename(file))\n", |
||||||
|
"\n", |
||||||
|
" # Load documents using LangChain's DirectoryLoader\n", |
||||||
|
" text_loader_kwargs = {'autodetect_encoding': True}\n", |
||||||
|
" loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", |
||||||
|
" folder_docs = loader.load()\n", |
||||||
|
"\n", |
||||||
|
" # Assign filenames as metadata\n", |
||||||
|
" for doc in folder_docs:\n", |
||||||
|
" filename_md = os.path.basename(doc.metadata[\"source\"])\n", |
||||||
|
" filename, _ = os.path.splitext(filename_md)\n", |
||||||
|
" doc.metadata[\"filename\"] = filename\n", |
||||||
|
"\n", |
||||||
|
" documents = folder_docs \n", |
||||||
|
"\n", |
||||||
|
" # Split documents into chunks\n", |
||||||
|
" text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=200)\n", |
||||||
|
" chunks = text_splitter.split_documents(documents)\n", |
||||||
|
"\n", |
||||||
|
" # Initialize embeddings\n", |
||||||
|
" embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", |
||||||
|
"\n", |
||||||
|
" # Delete previous vectorstore\n", |
||||||
|
" if os.path.exists(db_name):\n", |
||||||
|
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", |
||||||
|
"\n", |
||||||
|
" # Store in ChromaDB\n", |
||||||
|
" vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", |
||||||
|
"\n", |
||||||
|
" # Retrieve results\n", |
||||||
|
" collection = vectorstore._collection\n", |
||||||
|
" result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n", |
||||||
|
"\n", |
||||||
|
" llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", |
||||||
|
" memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
||||||
|
" retriever = vectorstore.as_retriever(search_kwargs={\"k\": 35})\n", |
||||||
|
" global conversation_chain\n", |
||||||
|
" conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)\n", |
||||||
|
"\n", |
||||||
|
" processed_text = \"**Processed Files:**\\n\\n\" + \"\\n\".join(f\"- {file}\" for file in processed_files)\n", |
||||||
|
" return result, processed_text" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "48678d3a-0ab2-4aa4-aa9e-4160c6a9cb24", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def random_color():\n", |
||||||
|
" return f\"rgb({randint(0,255)},{randint(0,255)},{randint(0,255)})\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "6caed889-9bb4-42ad-b1c2-da051aefc802", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def show_embeddings_2d(result):\n", |
||||||
|
" vectors = np.array(result['embeddings']) \n", |
||||||
|
" documents = result['documents']\n", |
||||||
|
" metadatas = result['metadatas']\n", |
||||||
|
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||||
|
" filenames_unique = sorted(set(filenames))\n", |
||||||
|
"\n", |
||||||
|
" # color assignment\n", |
||||||
|
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||||
|
" colors = [color_map[name] for name in filenames]\n", |
||||||
|
"\n", |
||||||
|
" tsne = TSNE(n_components=2, random_state=42,perplexity=4)\n", |
||||||
|
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||||
|
"\n", |
||||||
|
" # Create the 2D scatter plot\n", |
||||||
|
" fig = go.Figure(data=[go.Scatter(\n", |
||||||
|
" x=reduced_vectors[:, 0],\n", |
||||||
|
" y=reduced_vectors[:, 1],\n", |
||||||
|
" mode='markers',\n", |
||||||
|
" marker=dict(size=5,color=colors, opacity=0.8),\n", |
||||||
|
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||||
|
" hoverinfo='text'\n", |
||||||
|
" )])\n", |
||||||
|
"\n", |
||||||
|
" fig.update_layout(\n", |
||||||
|
" title='2D Chroma Vector Store Visualization',\n", |
||||||
|
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
||||||
|
" width=800,\n", |
||||||
|
" height=600,\n", |
||||||
|
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" return fig" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "de993495-c8cd-4313-a6bb-7d27494ecc13", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def show_embeddings_3d(result):\n", |
||||||
|
" vectors = np.array(result['embeddings']) \n", |
||||||
|
" documents = result['documents']\n", |
||||||
|
" metadatas = result['metadatas']\n", |
||||||
|
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||||
|
" filenames_unique = sorted(set(filenames))\n", |
||||||
|
"\n", |
||||||
|
" # color assignment\n", |
||||||
|
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||||
|
" colors = [color_map[name] for name in filenames]\n", |
||||||
|
"\n", |
||||||
|
" tsne = TSNE(n_components=3, random_state=42)\n", |
||||||
|
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||||
|
"\n", |
||||||
|
" fig = go.Figure(data=[go.Scatter3d(\n", |
||||||
|
" x=reduced_vectors[:, 0],\n", |
||||||
|
" y=reduced_vectors[:, 1],\n", |
||||||
|
" z=reduced_vectors[:, 2],\n", |
||||||
|
" mode='markers',\n", |
||||||
|
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
||||||
|
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||||
|
" hoverinfo='text'\n", |
||||||
|
" )])\n", |
||||||
|
"\n", |
||||||
|
" fig.update_layout(\n", |
||||||
|
" title='3D Chroma Vector Store Visualization',\n", |
||||||
|
" scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", |
||||||
|
" width=900,\n", |
||||||
|
" height=700,\n", |
||||||
|
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" return fig" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7b7bf62b-c559-4e97-8135-48cd8d97a40e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def chat(question, history):\n", |
||||||
|
" result = conversation_chain.invoke({\"question\": question})\n", |
||||||
|
" return result[\"answer\"]\n", |
||||||
|
"\n", |
||||||
|
"def visualise_data(result):\n", |
||||||
|
" fig_2d = show_embeddings_2d(result)\n", |
||||||
|
" fig_3d = show_embeddings_3d(result)\n", |
||||||
|
" return fig_2d,fig_3d" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "99217109-fbee-4269-81c7-001e6f768a72", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"css = \"\"\"\n", |
||||||
|
".btn {background-color: #1d53d1;}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e1429ea1-1d9f-4be6-b270-01997864c642", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"with gr.Blocks(css=css) as ui:\n", |
||||||
|
" gr.Markdown(\"# Markdown-Based Q&A with Visualization\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" file_input = gr.Files(file_types=[\".md\"], label=\"Upload Markdown Files\")\n", |
||||||
|
" with gr.Column(scale=1):\n", |
||||||
|
" processed_output = gr.Markdown(\"Progress\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" process_btn = gr.Button(\"Process Files\",elem_classes=[\"btn\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" question = gr.Textbox(label=\"Chat \", lines=10)\n", |
||||||
|
" answer = gr.Markdown(label= \"Response\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" question_btn = gr.Button(\"Ask a Question\",elem_classes=[\"btn\"])\n", |
||||||
|
" clear_btn = gr.Button(\"Clear Output\",elem_classes=[\"btn\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" plot_2d = gr.Plot(label=\"2D Visualization\")\n", |
||||||
|
" plot_3d = gr.Plot(label=\"3D Visualization\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" visualise_btn = gr.Button(\"Visualise Data\",elem_classes=[\"btn\"])\n", |
||||||
|
"\n", |
||||||
|
" result = gr.State([])\n", |
||||||
|
" # Action: When button is clicked, process files and update visualization\n", |
||||||
|
" clear_btn.click(fn=lambda:(\"\", \"\"), inputs=[],outputs=[question, answer])\n", |
||||||
|
" process_btn.click(process_files, inputs=[file_input], outputs=[result,processed_output])\n", |
||||||
|
" question_btn.click(chat, inputs=[question], outputs= [answer])\n", |
||||||
|
" visualise_btn.click(visualise_data, inputs=[result], outputs=[plot_2d,plot_3d])\n", |
||||||
|
"\n", |
||||||
|
"# Launch Gradio app\n", |
||||||
|
"ui.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d3686048-ac29-4df1-b816-e58996913ef1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
Loading…
Reference in new issue