101 changed files with 20707 additions and 829 deletions
@ -0,0 +1,240 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "9964872b-225d-4ced-93e4-fc5b279ec2ed", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Webpage English summarizer with user inputs (url, ollama-based LLM) " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4e49d399-d18c-4c91-8abc-cf3289e11e2f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"# from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n", |
||||
"import ollama, time\n", |
||||
"from tqdm import tqdm" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "46e7d809-248d-41b8-80e1-36b210041581", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define system prompt.\n", |
||||
"\n", |
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||
"and provides a detailed summary, ignoring text that might be navigation related. \\\n", |
||||
"Respond in markdown, in English.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e8bf237f-591f-4c32-9415-5d5d4e2522b8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||
"\n", |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||
"please provide a detailed summary of this website in markdown. \\\n", |
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||
" user_prompt += website.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7d39ee6d-c670-41ba-a0b8-debd55bda8e3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# See how this function creates exactly the format above\n", |
||||
"\n", |
||||
"def messages_for(website):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "43e28ff5-2def-4a47-acdd-2e06c0666956", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Constants\n", |
||||
"\n", |
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||
"HEADERS = {\"Content-Type\": \"application/json\"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "32f4f481-81a3-479d-817b-4e754d9af46d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = HEADERS\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" \"\"\"\n", |
||||
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||
" \"\"\"\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f81cfd17-8208-4192-a59f-485ff3ea74e4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And now: call the ollama API wrapper and return the relevant component of the response\n", |
||||
"\n", |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL,\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response['message']['content']" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7a9eedc6-2183-473d-84ca-b10d40e2a1e6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Ask the user the name of the url address\n", |
||||
"\n", |
||||
"url= str(input(\"\"\"\n", |
||||
"Please provide a valid url address:\n", |
||||
"https://\"\"\"))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5d012de2-0ef2-43db-9f51-fc7f989c3642", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Ask the user to select a valid model\n", |
||||
"\n", |
||||
"MODEL= str(input(\"\"\"\n", |
||||
"Please select a LLM:\n", |
||||
"(examples: llama3.2, deepseek-r1:1.5b)\n", |
||||
"\"\"\"))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1ac8c02e-4a62-448b-a231-8c6f65891811", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's just make sure the model is loaded\n", |
||||
"\n", |
||||
"!ollama pull {MODEL}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0544541f-11a8-4eb7-8eb6-bc032ed6d0d1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print('url: https://{0}\\nModel= {1}'.format(url, MODEL))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "45518950-f2c9-43af-b897-4fe8fe48dfd8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"summary = summarize('https://'+ url)\n", |
||||
"for summ in tqdm(summary):\n", |
||||
" time.sleep(0.01)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "02c0c15e-216d-47c7-843d-ac27af02820b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "985a3689-5827-4b15-b8d5-276f9b292afd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,273 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fad31e32-2e42-42ae-ae63-c15d90292839", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# First Project\n", |
||||
"Ollama -> Summary\n", |
||||
"huggingface_hub -> \"facebook/m2m100_418M\" for translation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5fb79a20-a455-4d27-91a1-91958af786c1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install transformers datasets torch\n", |
||||
"!pip install huggingface_hub" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e95ac7f2-5192-4f83-acf3-61df30cd3109", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"import requests\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"import json\n", |
||||
"import ollama" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "12276d74-0e79-4e66-9135-1c9d1a80b943", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class Website:\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
"\n", |
||||
"huggingface_url = \"https://huggingface.co/learn/ml-for-3d-course\"\n", |
||||
"huggingface_website = Website(huggingface_url)\n", |
||||
"\n", |
||||
"huggingface_data = {\n", |
||||
" \"title\": huggingface_website.title,\n", |
||||
" \"text\": huggingface_website.text\n", |
||||
"}\n", |
||||
"print(huggingface_data)\n", |
||||
"\n", |
||||
"with open('ml_for_3d_course_data.json', 'w') as f:\n", |
||||
" json.dump(huggingface_data, f)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7d74c85c-3e09-4514-bde4-4cafc4910c52", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# huggingface_data 'text' value\n", |
||||
"huggingface_text = huggingface_data['text']\n", |
||||
"\n", |
||||
"# Summary\n", |
||||
"response_summary = ollama.chat(model=\"llama3.2:latest\", messages=[{\"role\": \"user\", \"content\": f\"Summarize the following text: {huggingface_text}\"}])\n", |
||||
"print(response_summary)\n", |
||||
"\n", |
||||
"# print summary\n", |
||||
"summary_huggingface_text = response_summary.message['content']\n", |
||||
"print(\"Summary Text:\", summary_huggingface_text)\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d13764d5-cb76-46c5-bbe6-d132b31a9ea6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# HuggingFace Translation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "08405038-4115-487f-9efc-de58572453c1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class Website:\n", |
||||
" url: str\n", |
||||
" title: str\n", |
||||
" text: str\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
"\n", |
||||
"url = \"https://huggingface.co/learn/ml-for-3d-course\"\n", |
||||
"website = Website(url)\n", |
||||
"print(website.title) \n", |
||||
"print(website.text[:1000])\n", |
||||
"\n", |
||||
"data = {\n", |
||||
" \"title\": website.title,\n", |
||||
" \"text\": website.text\n", |
||||
"}\n", |
||||
"\n", |
||||
"with open('ml_for_3d_course_data.json', 'w') as f:\n", |
||||
" json.dump(data, f)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0632352f-4b16-4125-83bf-f3cc3aabd659", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(data)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a85f8625-725d-4d7f-8cb7-8da4276f81cf", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install sacremoses" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c800cea4-f4a4-4e41-9637-31ff11afb256", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import json\n", |
||||
"from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer\n", |
||||
"\n", |
||||
"# Load the M2M100 model and tokenizer\n", |
||||
"model_name = \"facebook/m2m100_418M\"\n", |
||||
"model = M2M100ForConditionalGeneration.from_pretrained(model_name)\n", |
||||
"tokenizer = M2M100Tokenizer.from_pretrained(model_name)\n", |
||||
"\n", |
||||
"# Load the saved JSON file\n", |
||||
"with open('ml_for_3d_course_data.json', 'r') as f:\n", |
||||
" data = json.load(f)\n", |
||||
"\n", |
||||
"# Extract text from the loaded data\n", |
||||
"text = data[\"text\"]\n", |
||||
"\n", |
||||
"# Set the source language to English and target language to Korean\n", |
||||
"source_lang = \"en\"\n", |
||||
"target_lang = \"ko\"\n", |
||||
"\n", |
||||
"# Set the language for tokenizer (important for M2M100)\n", |
||||
"tokenizer.src_lang = source_lang\n", |
||||
"tokenizer.tgt_lang = target_lang\n", |
||||
"\n", |
||||
"# Split text into smaller chunks if it's too large\n", |
||||
"# This step ensures we don't exceed the model's maximum length (512 tokens)\n", |
||||
"max_input_length = 512\n", |
||||
"chunks = [text[i:i+max_input_length] for i in range(0, len(text), max_input_length)]\n", |
||||
"\n", |
||||
"print(chunks)\n", |
||||
"# Initialize a list to hold the translated text\n", |
||||
"translated_chunks = []\n", |
||||
"\n", |
||||
"# Iterate through each chunk and translate it\n", |
||||
"for chunk in chunks:\n", |
||||
" # Tokenize the chunk\n", |
||||
" encoded = tokenizer(chunk, return_tensors=\"pt\", padding=True, truncation=True, max_length=512)\n", |
||||
"\n", |
||||
" # Generate translation from the model, forcing the output to be in Korean\n", |
||||
" generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(target_lang), max_length=512)\n", |
||||
"\n", |
||||
" # Decode the translated tokens to text\n", |
||||
" translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]\n", |
||||
" translated_chunks.append(translated_text)\n", |
||||
"\n", |
||||
"# Combine all translated chunks back together\n", |
||||
"final_translated_text = ' '.join(translated_chunks)\n", |
||||
"print(\"Translated Text:\", final_translated_text)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ffe0f264-a588-422f-a6e1-b60504d1e02c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import json\n", |
||||
"import requests\n", |
||||
"\n", |
||||
"# Ollama API URL 설정\n", |
||||
"ollama_url = \"http://localhost:11411/v1/models/facebook/m2m100_418M/generate\"\n", |
||||
"\n", |
||||
"# 저장된 JSON 파일 로드\n", |
||||
"with open('ml_for_3d_course_data.json', 'r') as f:\n", |
||||
" data = json.load(f)\n", |
||||
"\n", |
||||
"# 텍스트 추출\n", |
||||
"course_text = data[\"text\"]\n", |
||||
"\n", |
||||
"# 번역할 소스 언어 및 타겟 언어 설정\n", |
||||
"source_language = \"en\"\n", |
||||
"target_language = \"ko\"\n", |
||||
"\n", |
||||
"# 데이터 준비\n", |
||||
"payload = {\n", |
||||
" \"input_text\": course_text,\n", |
||||
" \"src_lang\": source_language,\n", |
||||
" \"tgt_lang\": target_language\n", |
||||
"}\n", |
||||
"\n", |
||||
"# API 호출\n", |
||||
"response = requests.post(ollama_url, json=payload)\n", |
||||
"\n", |
||||
"# 응답 확인\n", |
||||
"if response.status_code == 200:\n", |
||||
" translated_course_text = response.json().get(\"translated_text\", \"Translation failed\")\n", |
||||
" print(\"Translated Course Text:\", translated_course_text)\n", |
||||
"else:\n", |
||||
" print(f\"Error {response.status_code}: {response.text}\")\n" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,611 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# YOUR FIRST LAB\n", |
||||
"### Please read this section. This is valuable to get you prepared, even if it's a long read -- it's important stuff.\n", |
||||
"\n", |
||||
"## Your first Frontier LLM Project\n", |
||||
"\n", |
||||
"Let's build a useful LLM solution - in a matter of minutes.\n", |
||||
"\n", |
||||
"By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", |
||||
"\n", |
||||
"Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", |
||||
"\n", |
||||
"Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", |
||||
"\n", |
||||
"## If you're new to Jupyter Lab\n", |
||||
"\n", |
||||
"Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", |
||||
"\n", |
||||
"I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", |
||||
"\n", |
||||
"## If you're new to the Command Line\n", |
||||
"\n", |
||||
"Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n", |
||||
"\n", |
||||
"## If you'd prefer to work in IDEs\n", |
||||
"\n", |
||||
"If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n", |
||||
"If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n", |
||||
"\n", |
||||
"## If you'd like to brush up your Python\n", |
||||
"\n", |
||||
"I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n", |
||||
"`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n", |
||||
"\n", |
||||
"## I am here to help\n", |
||||
"\n", |
||||
"If you have any problems at all, please do reach out. \n", |
||||
"I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n", |
||||
"And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done 😂 \n", |
||||
"\n", |
||||
"## More troubleshooting\n", |
||||
"\n", |
||||
"Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", |
||||
"\n", |
||||
"## If this is old hat!\n", |
||||
"\n", |
||||
"If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n", |
||||
"\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Please read - important note</h2>\n", |
||||
" <span style=\"color:#900;\">The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, <b>after</b> watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#f71;\">Treat these labs as a resource</h2>\n", |
||||
" <span style=\"color:#f71;\">I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#181;\">Business value of these exercises</h2>\n", |
||||
" <span style=\"color:#181;\">A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Connecting to OpenAI\n", |
||||
"\n", |
||||
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", |
||||
"\n", |
||||
"## Troubleshooting if you have problems:\n", |
||||
"\n", |
||||
"Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", |
||||
"\n", |
||||
"If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", |
||||
"\n", |
||||
"Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", |
||||
"\n", |
||||
"Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", |
||||
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Let's make a quick call to a Frontier model to get started, as a preview!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", |
||||
"\n", |
||||
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", |
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", |
||||
"print(response.choices[0].message.content)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "2aa190e5-cb31-456a-96cc-db109919cd78", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## OK onwards with our first project" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c5e793b2-6775-426a-a139-4848291d0463", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" \"\"\"\n", |
||||
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||
" \"\"\"\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's try one out. Change the website and add print statements to follow along.\n", |
||||
"\n", |
||||
"ed = Website(\"https://edwarddonner.com\")\n", |
||||
"print(ed.title)\n", |
||||
"print(ed.text)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Types of prompts\n", |
||||
"\n", |
||||
"You may know this already - but if not, you will get very familiar with it!\n", |
||||
"\n", |
||||
"Models like GPT4o have been trained to receive instructions in a particular way.\n", |
||||
"\n", |
||||
"They expect to receive:\n", |
||||
"\n", |
||||
"**A system prompt** that tells them what task they are performing and what tone they should use\n", |
||||
"\n", |
||||
"**A user prompt** -- the conversation starter that they should reply to" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||
"\n", |
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||
"Respond in markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||
"\n", |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||
"please provide a short summary of this website in markdown. \\\n", |
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||
" user_prompt += website.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(user_prompt_for(ed))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Messages\n", |
||||
"\n", |
||||
"The API from OpenAI expects to receive messages in a particular structure.\n", |
||||
"Many of the other APIs share this structure:\n", |
||||
"\n", |
||||
"```\n", |
||||
"[\n", |
||||
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n", |
||||
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n", |
||||
"]\n", |
||||
"\n", |
||||
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"messages = [\n", |
||||
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", |
||||
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", |
||||
"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# To give you a preview -- calling OpenAI with system and user messages:\n", |
||||
"\n", |
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", |
||||
"print(response.choices[0].message.content)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## And now let's build useful messages for GPT-4o-mini, using a function" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# See how this function creates exactly the format above\n", |
||||
"\n", |
||||
"def messages_for(website):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Try this out, and then try for a few more websites\n", |
||||
"\n", |
||||
"messages_for(ed)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Time to bring it together - the API for OpenAI is very simple!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And now: call the OpenAI API. You will get very familiar with this!\n", |
||||
"\n", |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"summarize(\"https://edwarddonner.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3d926d59-450e-4609-92ba-2d6f244f1342", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function to display this nicely in the Jupyter output, using markdown\n", |
||||
"\n", |
||||
"def display_summary(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3018853a-445f-41ff-9560-d925d1774b2f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://edwarddonner.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Let's try more websites\n", |
||||
"\n", |
||||
"Note that this will only work on websites that can be scraped using this simplistic approach.\n", |
||||
"\n", |
||||
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", |
||||
"\n", |
||||
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", |
||||
"\n", |
||||
"But many websites will work just fine!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "45d83403-a24c-44b5-84ac-961449b4008f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://cnn.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "75e9fd40-b354-4341-991e-863ef2e59db7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#181;\">Business applications</h2>\n", |
||||
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", |
||||
"\n", |
||||
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>\n", |
||||
"\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n", |
||||
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Step 1: Create your prompts\n", |
||||
"\n", |
||||
"system_prompt = \"You are an head chef of a michelin star restaurant who has a diverse skillset \\\n", |
||||
"and loves to teach new and interesting recepies for homechefs. Given input of several ingredients \\\n", |
||||
"provide step by step instruction of what could be cooked for any cuisine of your choice. Respond in markdown.\"\n", |
||||
"\n", |
||||
"user_prompt = \"\"\"\n", |
||||
"You are a Michelin-starred head chef with a passion for teaching home chefs. \n", |
||||
"I have the following ingredients: \n", |
||||
"\n", |
||||
"**[Chicken breast, Bell peppers, cherry tomatoes, spinach, Basmati rice,\n", |
||||
"Garlic, basil, black pepper, smoked paprika]** \n", |
||||
"\n", |
||||
"Can you provide a step-by-step recipe using these ingredients? You can choose any cuisine that best fits them. \n", |
||||
"Please include cooking times, techniques, and any chef tips for enhancing flavors. \n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"# Step 2: Make the messages list\n", |
||||
"\n", |
||||
"messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
" ]\n", |
||||
"\n", |
||||
"# Step 3: Call OpenAI\n", |
||||
"\n", |
||||
"response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages\n", |
||||
" )\n", |
||||
"\n", |
||||
"\n", |
||||
"\n", |
||||
"# Step 4: print the result\n", |
||||
"def display_summary(summary):\n", |
||||
" display(Markdown(summary))\n", |
||||
"display_summary(response.choices[0].message.content)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## An extra exercise for those who enjoy web scraping\n", |
||||
"\n", |
||||
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Sharing your code\n", |
||||
"\n", |
||||
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", |
||||
"\n", |
||||
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", |
||||
"\n", |
||||
"Here are good instructions courtesy of an AI friend: \n", |
||||
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f4484fcf-8b39-4c3f-9674-37970ed71988", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,152 @@
|
||||
{ |
||||
"nbformat": 4, |
||||
"nbformat_minor": 0, |
||||
"metadata": { |
||||
"colab": { |
||||
"provenance": [] |
||||
}, |
||||
"kernelspec": { |
||||
"name": "python3", |
||||
"display_name": "Python 3" |
||||
}, |
||||
"language_info": { |
||||
"name": "python" |
||||
} |
||||
}, |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"source": [ |
||||
"# Getting MOM from call transcripts" |
||||
], |
||||
"metadata": { |
||||
"id": "99Z21wE7xpKS" |
||||
} |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"source": [ |
||||
"Import necessary libraries" |
||||
], |
||||
"metadata": { |
||||
"id": "YZMeexE8M_Pp" |
||||
} |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n" |
||||
], |
||||
"metadata": { |
||||
"id": "u5DCVg0Mxj5T" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": { |
||||
"id": "i0V11JQ2az-C" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"#The below code can be uncommented in using .env file\n", |
||||
"\n", |
||||
"#from dotenv import load_dotenv\n", |
||||
"#load_dotenv(override=True)\n", |
||||
"#api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"#I am using google colab to import api_key\n", |
||||
"from google.colab import userdata\n", |
||||
"api_key=userdata.get('gemini_api')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# A class to represet Transcript\n", |
||||
"from pathlib import Path\n", |
||||
"class Transcript:\n", |
||||
" def __init__(self, file_path):\n", |
||||
" self.file_path=file_path\n", |
||||
" self.content=Path(file_path).read_text(encoding='utf-8')\n" |
||||
], |
||||
"metadata": { |
||||
"id": "j6UTsnTEyWZ-" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"# Source of the text file -\"https://raw.githubusercontent.com/GeminiLn/EarningsCall_Dataset/refs/heads/master/3M%20Company_20170425/Text.txt\"\n", |
||||
"path = '/content/Text.txt' # Specify the path of file you want to use - format should be .txt\n", |
||||
"t=Transcript(path)\n" |
||||
], |
||||
"metadata": { |
||||
"id": "hquePU_mzZ7s" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"\n", |
||||
"system_prompt = \"You are expert at taking Meeting Notes & given the below transcript , create an MOM (Minutes of meeting)\"" |
||||
], |
||||
"metadata": { |
||||
"id": "ex5DB7M8L7KT" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"from google import genai\n", |
||||
"from google.genai import types\n", |
||||
"\n", |
||||
"client = genai.Client(api_key=api_key)\n", |
||||
"\n", |
||||
"response = client.models.generate_content(\n", |
||||
" model=\"gemini-2.0-flash\",\n", |
||||
" config=types.GenerateContentConfig(\n", |
||||
" system_instruction=system_prompt,\n", |
||||
" max_output_tokens=500,\n", |
||||
" temperature=0.1\n", |
||||
" ),\n", |
||||
" contents=t.content,\n", |
||||
")\n", |
||||
"\n", |
||||
"print(response.text)" |
||||
], |
||||
"metadata": { |
||||
"id": "wcpJ34qfMKmV" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
} |
||||
] |
||||
} |
@ -0,0 +1,87 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "44aba2a0-c6eb-4fc1-a5cc-0a8f8679dbb8", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Michelin-star cook..." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d4d58124-5e9a-4f5a-9e0a-ff74f43896a8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "67dc3099-2ccc-4ee8-8ff2-0dbbe4ae2fcb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are a professional chef in a Michelin-star restaurant. You will help me cook restaurant-style dishes using the ingredients I have left in my refrigerator.\\\n", |
||||
"You will provide detailed instructions with precise times and measurements in grams and include calorie information for raw ingredients, not cooked ones.\\\n", |
||||
"Add the caloric information at the end. Your responses should be formatted in Markdown.\"\n", |
||||
"\n", |
||||
"user_prompt = \"\"\"\n", |
||||
"Help me with a recipe using the ingredients I have left in the refrigerator. I have spinach, eggs, pasta, rice, chicken, beef, carrots, potatoes, butter, milk, cheese, tomatoes, red peppers, and all spices in the pantry.\\n\\n\n", |
||||
"\"\"\"\n", |
||||
"\n", |
||||
"messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt},\n", |
||||
"]\n", |
||||
" \n", |
||||
"response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages\n", |
||||
" )\n", |
||||
"\n", |
||||
"# Step 4: print the result in markdown format\n", |
||||
"pretty_response = Markdown(response.choices[0].message.content)\n", |
||||
"display(pretty_response)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,127 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0a512c2a-55e7-40e1-ab17-88b7034ca09a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Imports\n", |
||||
"import openai\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"from IPython.display import Markdown, display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1aa8dd82-6b5e-4dbd-a2ee-8367e796a51f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - head over to the troubleshooting notebook!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj... make sure you using the right key (Check troubleshooting notebook)\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like white space was found in beginning or end. (Check troubleshooting notebook)\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2acd579b-846c-4aa6-ba6c-1cc1a5a2eeb6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Input the system prompt\n", |
||||
"system_prompt = \"\"\"you are top notched AI music expert that have knowledge of all genres, songs, and artists. You need to google search lyrics. You have the following rules:\\\n", |
||||
"1. Carefully break down what type of recommendation the user wants and the context.\\\n", |
||||
"2. If asked to recommend genres similar to a song or artists please identify the top 3 genres.\\\n", |
||||
"3. If asked to recommend artists from songs or genres then recommend the top 5 artists.\n", |
||||
"4. If asked to recommend songs from genres or artist than recommend the top 10 songs.\n", |
||||
"5. If asked for a general recommendation give them the top 5 songs based off of context.\\\n", |
||||
"6. Be flexible and adaptable with recommendations and consider the context the user might ask.\n", |
||||
"7. always respond in markdown.\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3c1cf212-538c-4e9a-8da5-337bd7b6197c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# music recommender function\n", |
||||
"def music_recommender(user_prompt):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||
" ]\n", |
||||
" \n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=\"gpt-4\",\n", |
||||
" messages=messages,\n", |
||||
" max_tokens=300\n", |
||||
" )\n", |
||||
" \n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4f277561-af8b-4715-90e7-6ebaadeb15d0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# User prompt (Change this to fit your needs!)\n", |
||||
"user_prompt = \"Can you recommend me songs from Taylor Swift\"\n", |
||||
"\n", |
||||
"# Example usage\n", |
||||
"response = music_recommender(user_prompt)\n", |
||||
"display(Markdown(response))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bb869d36-de14-4e46-9087-223d6b257efa", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,444 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "it1JLoxrSqO1", |
||||
"metadata": { |
||||
"id": "it1JLoxrSqO1" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install openai python-docx python-dotenv gradio openpyxl" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "950a084a-7f92-4669-af62-f07cb121da56", |
||||
"metadata": { |
||||
"id": "950a084a-7f92-4669-af62-f07cb121da56" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"#from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"from docx import Document" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d0548135-ef16-4102-a55a-cea888a51c29", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import pandas as pd\n", |
||||
"import re\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d", |
||||
"metadata": { |
||||
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class ReqDoc:\n", |
||||
" def __init__(self, file_path):\n", |
||||
" self.file_path = file_path\n", |
||||
"\n", |
||||
" def extract(self):\n", |
||||
" \"\"\"\n", |
||||
" Reads the content of a .docx file and returns the paragraphs as a list of strings.\n", |
||||
" \"\"\"\n", |
||||
" try:\n", |
||||
" # Check if the file exists\n", |
||||
" if not os.path.exists(self.file_path):\n", |
||||
" raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n", |
||||
"\n", |
||||
" # Attempt to open and read the document\n", |
||||
" doc = Document(self.file_path)\n", |
||||
" text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n", |
||||
" return text\n", |
||||
"\n", |
||||
" except FileNotFoundError as fnf_error:\n", |
||||
" print(fnf_error)\n", |
||||
" return None\n", |
||||
" except Exception as e:\n", |
||||
" print(f\"An error occurred: {e}\")\n", |
||||
" return None\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9", |
||||
"metadata": { |
||||
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Initialize and constants\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n", |
||||
" print(\"API key looks good!\")\n", |
||||
"else:\n", |
||||
" print(\"There might be a problem with your API key. Please check!\")\n", |
||||
" \n", |
||||
"MODEL = 'gpt-4o-mini'\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb", |
||||
"metadata": { |
||||
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Set up system prompt for extracting just the requirements from the document\n", |
||||
"\n", |
||||
"req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n", |
||||
"You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n", |
||||
"functional or non-functional and list them all.\\n\"\n", |
||||
"req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n", |
||||
"Do not make up your own requirements. \\n\"\n", |
||||
"req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||
"req_doc_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"requirements\": [\n", |
||||
" {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
||||
" {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
||||
" {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n", |
||||
" {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "20460e45-c1b7-4dc4-ab07-932235c19895", |
||||
"metadata": { |
||||
"id": "20460e45-c1b7-4dc4-ab07-932235c19895" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n", |
||||
"#respond in JSON format.\n", |
||||
"\n", |
||||
"def req_doc_user_prompt(doc):\n", |
||||
" user_prompt = \"Here is the contents from a requirement document.\\n\"\n", |
||||
" user_prompt += f\"{doc.extract()} \\n\"\n", |
||||
" user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n", |
||||
"paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n", |
||||
"You must respond in a JSON format\"\n", |
||||
" user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n", |
||||
" user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891", |
||||
"metadata": { |
||||
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n", |
||||
"def get_requirements(doc):\n", |
||||
" reqdoc = ReqDoc(doc)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": req_doc_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n", |
||||
" ],\n", |
||||
" response_format={\"type\": \"json_object\"}\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" return json.loads(result)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e", |
||||
"metadata": { |
||||
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Uncomment and run this if you want to see the extracted requriements in json format.\n", |
||||
"#get_requirements(\"reqdoc.docx\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1fe8618c-1dfe-4030-bad8-405731294c93", |
||||
"metadata": { |
||||
"id": "1fe8618c-1dfe-4030-bad8-405731294c93" |
||||
}, |
||||
"source": [ |
||||
"### Next, we will make another call to gpt-4o-mini" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b", |
||||
"metadata": { |
||||
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Set up system prompt to ask for test cases in table format\n", |
||||
"system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n", |
||||
"You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n", |
||||
"The table will contain the following columns. \\\n", |
||||
"1.S No\\\n", |
||||
"2.Requirement No\\\n", |
||||
"3.Requirement Description\\\n", |
||||
"4.Test Case ID\\\n", |
||||
"5.Test case summary\\\n", |
||||
"6.Test case description\\\n", |
||||
"7.Success criteria \\n\"\n", |
||||
"system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5", |
||||
"metadata": { |
||||
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n", |
||||
"\n", |
||||
"def get_testcase_user_prompt(reqdoc):\n", |
||||
" user_prompt = \"You are looking at the following list of requirements. \\n\"\n", |
||||
" user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n", |
||||
" user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n", |
||||
" user_prompt += user_prompt[:25000]\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5b2a2b46-9d9c-416c-b189-3007b4d26d76", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n", |
||||
"def create_testcase_doc_gradio(response, is_response_ready, is_cleared, file_input):\n", |
||||
" if is_cleared or file_input == None: # Prevent OpenAI call if \"Clear\" was clicked\n", |
||||
" return \"\", False\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_testcase_user_prompt(file_input)}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" #Modified for Gradio\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.choices[0].delta.content or \"\"\n", |
||||
" #print(result)\n", |
||||
" yield result, False" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2bb96a11-063e-4b20-9880-71fa9ea4d3f7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n", |
||||
"force_dark_mode = \"\"\"\n", |
||||
"function refresh() {\n", |
||||
" const url = new URL(window.location);\n", |
||||
" if (url.searchParams.get('__theme') !== 'dark') {\n", |
||||
" url.searchParams.set('__theme', 'dark');\n", |
||||
" window.location.href = url.href;\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5c81c766-9613-4614-b88d-410654672b89", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def show_or_hide_save_button(response, is_response_ready, is_cleared):\n", |
||||
" if is_cleared or response == None:\n", |
||||
" return \"\", False\n", |
||||
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", |
||||
" table_match = re.search(table_pattern, response)\n", |
||||
" if table_match:\n", |
||||
" return response, True #(response, is_response_ready)\n", |
||||
" else:\n", |
||||
" return response, False #(response, is_response_ready)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a5f5d8e7-d29c-4f40-8d57-a9911bb7c47e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def extract_table_from_markdown(response):\n", |
||||
" # Regular expression to match Markdown tables\n", |
||||
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", |
||||
" table_match = re.search(table_pattern, response)\n", |
||||
"\n", |
||||
" if table_match:\n", |
||||
" table_data = table_match.group(0)\n", |
||||
" # Process the table into a format pandas can read\n", |
||||
" rows = table_data.strip().split(\"\\n\")\n", |
||||
" data = [row.split(\"|\")[1:-1] for row in rows] # Split columns by '|'\n", |
||||
"\n", |
||||
" # Convert to DataFrame\n", |
||||
" df = pd.DataFrame(data[1:], columns=data[0]) # First row is the header\n", |
||||
"\n", |
||||
" # Save to Excel\n", |
||||
" output_file = \"test_cases.xlsx\"\n", |
||||
" df.to_excel(output_file, index=False)\n", |
||||
"\n", |
||||
" return output_file\n", |
||||
" else:\n", |
||||
" return None" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c1380b11-3e28-40de-ab1a-93a5fd73cf81", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def extract_and_save_button(response, is_cleared):\n", |
||||
" if is_cleared:\n", |
||||
" return None # Do nothing if the file was cleared\n", |
||||
" # This function will be triggered when the user clicks \"Save as Excel\"\n", |
||||
" output_file = extract_table_from_markdown(response)\n", |
||||
" if output_file:\n", |
||||
" return output_file\n", |
||||
" else:\n", |
||||
" return \"No table found in the provided input.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3a532b42-9f81-4c75-8be4-e40d621a6b35", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Gradio interface\n", |
||||
"with gr.Blocks(js=force_dark_mode) as demo:\n", |
||||
" gr.HTML(\"<h2 style='text-align: center; color: white;'>📄 Test case automation</h2>\")\n", |
||||
" with gr.Row():\n", |
||||
" file_input = gr.File(label=\"Upload your requirements docx file\", file_types=[\".docx\"])\n", |
||||
" with gr.Row():\n", |
||||
" response = gr.Markdown()\n", |
||||
" # Button to save the table as Excel file (optional)\n", |
||||
" save_button = gr.Button(\"Download Table as Excel\", visible=False)\n", |
||||
" file_output = gr.File(label=\"Download Excel File\", visible=False) \n", |
||||
" # State variable to track if response is ready\n", |
||||
" is_response_ready = gr.State(False)\n", |
||||
" with gr.Row():\n", |
||||
" clear_button = gr.Button(\"Clear\")\n", |
||||
" # State variable to track if clear button is clicked\n", |
||||
" is_cleared = gr.State(False)\n", |
||||
"\n", |
||||
" # Function to show \"Processing...\" message\n", |
||||
" def show_processing(is_cleared, file_input):\n", |
||||
" if is_cleared or file_input==None:\n", |
||||
" return None, False, is_cleared, file_input # Do nothing if the file was cleared\n", |
||||
" #return gr.HTML(\"<h6 style='text-align: left; color: #ffffffff;'>⌛ Processing your file... Please wait!</h6>\"), False, is_cleared, file_input\n", |
||||
" return \"⌛ Processing your file... Please wait!\", False, is_cleared, file_input\n", |
||||
" \n", |
||||
" # Trigger response only if the file was uploaded and not cleared\n", |
||||
" file_input.change(\n", |
||||
" lambda _: False, # Directly set is_cleared to False\n", |
||||
" inputs=[file_input],\n", |
||||
" outputs=[is_cleared]\n", |
||||
" ).then(\n", |
||||
" show_processing, inputs=[is_cleared, file_input], outputs=[response, is_response_ready, is_cleared, file_input]\n", |
||||
" ).then(\n", |
||||
" create_testcase_doc_gradio, inputs=[response, is_response_ready, is_cleared, file_input], outputs=[response, is_response_ready]\n", |
||||
" ).then(\n", |
||||
" show_or_hide_save_button, inputs=[response, is_response_ready, is_cleared], outputs=[response, is_response_ready]\n", |
||||
" ).then(\n", |
||||
" lambda _, ready: (gr.update(visible=ready), gr.update(visible=ready)), inputs=[response, is_response_ready], outputs=[save_button,file_output])\n", |
||||
"\n", |
||||
" #.then() passes the previous function outputs as inputs to the next function\n", |
||||
"\n", |
||||
" # Button action to extract and save table as an Excel file\n", |
||||
" save_button.click(extract_and_save_button, inputs=[response, is_cleared], outputs=file_output)\n", |
||||
" \n", |
||||
" # Clear button resets both file and output while setting is_cleared to True\n", |
||||
" clear_button.click(lambda: (None, None, None, True), inputs=None, outputs=[file_input, file_output, response, is_cleared]) \n", |
||||
"\n", |
||||
"# Launch Gradio app\n", |
||||
"demo.launch(share=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cd5314b2-ee91-49bd-9d40-558775d44382", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"colab": { |
||||
"provenance": [] |
||||
}, |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,632 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# YOUR FIRST LAB\n", |
||||
"## Please read this. This is super-critical to get you prepared; there's no fluff here!\n", |
||||
"\n", |
||||
"## Your first Frontier LLM Project\n", |
||||
"\n", |
||||
"Let's build a useful LLM solution - in a matter of minutes.\n", |
||||
"\n", |
||||
"By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", |
||||
"\n", |
||||
"Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", |
||||
"\n", |
||||
"Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", |
||||
"\n", |
||||
"## If you're new to Jupyter Lab\n", |
||||
"\n", |
||||
"Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", |
||||
"\n", |
||||
"I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", |
||||
"\n", |
||||
"## If you're new to the Command Line\n", |
||||
"\n", |
||||
"Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n", |
||||
"Linux people, something tells me you could teach _me_ a thing or two about the command line!\n", |
||||
"\n", |
||||
"## If you'd prefer to work in IDEs\n", |
||||
"\n", |
||||
"If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n", |
||||
"If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n", |
||||
"\n", |
||||
"## If you'd like to brush up your Python\n", |
||||
"\n", |
||||
"I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n", |
||||
"`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n", |
||||
"\n", |
||||
"## I am here to help\n", |
||||
"\n", |
||||
"If you have any problems at all, please do reach out. \n", |
||||
"I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n", |
||||
"And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done 😂 \n", |
||||
"\n", |
||||
"## More troubleshooting\n", |
||||
"\n", |
||||
"Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n", |
||||
"\n", |
||||
"## If this is old hat!\n", |
||||
"\n", |
||||
"If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n", |
||||
"\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Please read - important note</h2>\n", |
||||
" <span style=\"color:#900;\">The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, <b>after</b> watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#f71;\">Treat these labs as a resource</h2>\n", |
||||
" <span style=\"color:#f71;\">I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#181;\">Business value of these exercises</h2>\n", |
||||
" <span style=\"color:#181;\">A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Connecting to OpenAI\n", |
||||
"\n", |
||||
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", |
||||
"\n", |
||||
"## Troubleshooting if you have problems:\n", |
||||
"\n", |
||||
"Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", |
||||
"\n", |
||||
"If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", |
||||
"\n", |
||||
"Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", |
||||
"\n", |
||||
"Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import httpx\n", |
||||
"openai = OpenAI(http_client=httpx.Client(verify=False))\n", |
||||
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", |
||||
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Let's make a quick call to a Frontier model to get started, as a preview!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", |
||||
"\n", |
||||
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", |
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", |
||||
"print(response.choices[0].message.content)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "2aa190e5-cb31-456a-96cc-db109919cd78", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## OK onwards with our first project" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c5e793b2-6775-426a-a139-4848291d0463", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" \"\"\"\n", |
||||
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||
" \"\"\"\n", |
||||
" self.url = url\n", |
||||
" requests.packages.urllib3.disable_warnings()\n", |
||||
" response = requests.get(url, headers=headers, verify=False)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's try one out. Change the website and add print statements to follow along.\n", |
||||
"ed = Website(\"http://edwarddonner.com\")\n", |
||||
"print(ed.title)\n", |
||||
"print(ed.text)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Types of prompts\n", |
||||
"\n", |
||||
"You may know this already - but if not, you will get very familiar with it!\n", |
||||
"\n", |
||||
"Models like GPT4o have been trained to receive instructions in a particular way.\n", |
||||
"\n", |
||||
"They expect to receive:\n", |
||||
"\n", |
||||
"**A system prompt** that tells them what task they are performing and what tone they should use\n", |
||||
"\n", |
||||
"**A user prompt** -- the conversation starter that they should reply to" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||
"\n", |
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||
"Respond in markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||
"\n", |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||
"please provide a short summary of this website in markdown. \\\n", |
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||
" user_prompt += website.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(user_prompt_for(ed))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Messages\n", |
||||
"\n", |
||||
"The API from OpenAI expects to receive messages in a particular structure.\n", |
||||
"Many of the other APIs share this structure:\n", |
||||
"\n", |
||||
"```\n", |
||||
"[\n", |
||||
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n", |
||||
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n", |
||||
"]\n", |
||||
"\n", |
||||
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"messages = [\n", |
||||
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", |
||||
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", |
||||
"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# To give you a preview -- calling OpenAI with system and user messages:\n", |
||||
"\n", |
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", |
||||
"print(response.choices[0].message.content)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## And now let's build useful messages for GPT-4o-mini, using a function" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# See how this function creates exactly the format above\n", |
||||
"\n", |
||||
"def messages_for(website):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Try this out, and then try for a few more websites\n", |
||||
"\n", |
||||
"messages_for(ed)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Time to bring it together - the API for OpenAI is very simple!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And now: call the OpenAI API. You will get very familiar with this!\n", |
||||
"\n", |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"summarize(\"https://edwarddonner.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3d926d59-450e-4609-92ba-2d6f244f1342", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function to display this nicely in the Jupyter output, using markdown\n", |
||||
"\n", |
||||
"def display_summary(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3018853a-445f-41ff-9560-d925d1774b2f", |
||||
"metadata": { |
||||
"scrolled": true |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://edwarddonner.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f8a34db6-9c2f-4f5e-95b4-62090d7b591b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://openai.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Let's try more websites\n", |
||||
"\n", |
||||
"Note that this will only work on websites that can be scraped using this simplistic approach.\n", |
||||
"\n", |
||||
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", |
||||
"\n", |
||||
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", |
||||
"\n", |
||||
"But many websites will work just fine!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "45d83403-a24c-44b5-84ac-961449b4008f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://cnn.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "75e9fd40-b354-4341-991e-863ef2e59db7", |
||||
"metadata": { |
||||
"scrolled": true |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_summary(\"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#181;\">Business applications</h2>\n", |
||||
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", |
||||
"\n", |
||||
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>\n", |
||||
"\n", |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n", |
||||
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A small exercise to feed the llm with image alt text and return a funny tweet.\n", |
||||
"\n", |
||||
"# Step 1: Create your prompts\n", |
||||
"import json\n", |
||||
"system_prompt = \"You are a meme lord. You like tweeting funny and hilarious comments on images. To understand the image you would be given alt text on the image.\"\n", |
||||
"class website:\n", |
||||
" def __init__(self,url):\n", |
||||
" self.url = url\n", |
||||
" requests.packages.urllib3.disable_warnings()\n", |
||||
" response = requests.get(url, headers=headers, verify=False)\n", |
||||
" html_content = response.content\n", |
||||
" soup = BeautifulSoup(html_content, 'html.parser')\n", |
||||
" image_tags = soup.find_all('img')\n", |
||||
" self.image_urls = [img['src'] for img in image_tags if img.get('src')]\n", |
||||
" self.image_alt = [img['alt'] if img.get('alt') else \"\" for img in image_tags]\n", |
||||
"\n", |
||||
" # Restricting to 3 images only.\n", |
||||
" if self.image_urls:\n", |
||||
" self.images = {self.image_urls[i]:self.image_alt[i] for i in range(4)}\n", |
||||
" else:\n", |
||||
" self.images = {}\n", |
||||
" \n", |
||||
"\n", |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"Following are images with their alt-text:\"\n", |
||||
" user_prompt += json.dumps(website.images)\n", |
||||
" user_prompt += \"\\n Give me a markdown layout with tables for each image where each image is given its own row, with the image itself on the left and funny tweet on the right.\"\n", |
||||
" return user_prompt\n", |
||||
"\n", |
||||
"\n", |
||||
"# Step 2: Make the messages list\n", |
||||
"page = website(\"https://www.pexels.com/\")\n", |
||||
"user_prompt = user_prompt_for(page)\n", |
||||
"messages = [{\"role\":\"system\",\"content\":system_prompt},{\"role\":\"user\", \"content\":user_prompt}] # fill this in\n", |
||||
"\n", |
||||
"# Step 3: Call OpenAI\n", |
||||
"response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages\n", |
||||
" )\n", |
||||
"\n", |
||||
"# Step 4: print the result\n", |
||||
"display(Markdown((response.choices[0].message.content)))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## An extra exercise for those who enjoy web scraping\n", |
||||
"\n", |
||||
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Sharing your code\n", |
||||
"\n", |
||||
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", |
||||
"\n", |
||||
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", |
||||
"\n", |
||||
"Here are good instructions courtesy of an AI friend: \n", |
||||
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f4484fcf-8b39-4c3f-9674-37970ed71988", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,148 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f38e9ebb-453d-4b40-84f6-bc3e9bf4d7ef", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"import ollama\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"# constants\n", |
||||
"\n", |
||||
"MODEL_GPT = 'gpt-4o-mini'\n", |
||||
"MODEL_LLAMA = 'llama3.2'\n", |
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||
"HEADERS = {\"Content-Type\": \"application/json\"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f367c5bb-80a2-4d78-8f27-823f5dafe7c0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# set up environment\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# System prompt for the AI TECHNICAL LLM AND PYTHON TUTOR.\"\n", |
||||
"\n", |
||||
"system_prompt = \"You are an EXPERT in AI, LLMS and Python \\\n", |
||||
"Provide the answer with example ALLWAYS when necessary. \\\n", |
||||
"If you do not know the answer just say 'I don't know the answer' \\\n", |
||||
"Respond in markdown in Spanish.\"\n", |
||||
"\n", |
||||
"# messages\n", |
||||
"def messages_for(question):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": question}\n", |
||||
" ]\n", |
||||
"\n", |
||||
"# here is the question; type over this to ask something new\n", |
||||
"\n", |
||||
"question = \"\"\"\n", |
||||
"Please explain what this code does and why:\n", |
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||
"\"\"\"\n", |
||||
"question = question[:5_000] # Truncate if more than 5,000 characters" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a90d726d-d494-401f-9cd6-0260f5c781e0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# METHODS TO DISPLAY\n", |
||||
"def display_summary_ollama(question):\n", |
||||
" response = ollama.chat(\n", |
||||
" model = MODEL_LLAMA,\n", |
||||
" messages = messages_for(question)\n", |
||||
" ) \n", |
||||
" summary = response['message']['content']\n", |
||||
" display(Markdown(summary))\n", |
||||
"\n", |
||||
"def display_summary_gpt(question):\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model = MODEL_GPT,\n", |
||||
" messages = messages_for(question),\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n", |
||||
" \n", |
||||
"def display_summary(llm, question):\n", |
||||
" if llm.startswith(\"llama3.2\"):\n", |
||||
" display_summary_ollama(question)\n", |
||||
" else:\n", |
||||
" display_summary_gpt(question)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4e993b6d-8fee-43f3-9e36-f86701a5cc57", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get gpt-4o-mini to answer, with streaming\n", |
||||
"\n", |
||||
"display_summary(MODEL_GPT, question)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "31f6283a-ee57-415e-9a57-83d07261b7f9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get Llama 3.2 to answer\n", |
||||
"\n", |
||||
"display_summary(MODEL_LLAMA, question)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,464 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# MY !FIRST LAB\n", |
||||
"\n", |
||||
"### Script will take a stackoverflow issue and summarize it as a technical tutorial. \n", |
||||
"\n", |
||||
"Example links to use: \n", |
||||
" \n", |
||||
"https://stackoverflow.com/questions/14220321/how-do-i-return-the-response-from-an-asynchronous-call \n", |
||||
"https://stackoverflow.com/questions/60174/how-can-i-prevent-sql-injection-in-php\n", |
||||
"https://stackoverflow.com/questions/1732348/regex-match-open-tags-except-xhtml-self-contained-tags\n", |
||||
"\n", |
||||
"*Note: Issues must be answered preferebly by a lot of users.*\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "e2fd67f3-6441-4fee-b19c-7c91e6188348", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"website = 'https://stackoverflow.com/questions/60174/how-can-i-prevent-sql-injection-in-php'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Load environment variables in a file callwebsite_content .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# Check the key\n", |
||||
"\n", |
||||
"if not api_key:\n", |
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||
"elif api_key.strip() != api_key:\n", |
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||
"else:\n", |
||||
" print(\"API key found and looks good so far!\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", |
||||
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "c5e793b2-6775-426a-a139-4848291d0463", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"# If you're not familiar with Classes, check out the \"Intermwebsite_contentiate Python\" notebook\n", |
||||
"\n", |
||||
"# Some websites newebsite_content you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" \"\"\"\n", |
||||
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||
" \"\"\"\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"mysql - How can I prevent SQL injection in PHP? - Stack Overflow\n", |
||||
"Skip to main content\n", |
||||
"Stack Overflow\n", |
||||
"About\n", |
||||
"Products\n", |
||||
"OverflowAI\n", |
||||
"Stack Overflow for Teams\n", |
||||
"Where developers & technologists share private knowledge with c\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Let's try one out. Change the website and add print statements to follow along.\n", |
||||
"\n", |
||||
"website_content = Website(website)\n", |
||||
"print(website_content.title[:100])\n", |
||||
"print(website_content.text[:150])" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Types of prompts\n", |
||||
"\n", |
||||
"You may know this already - but if not, you will get very familiar with it!\n", |
||||
"\n", |
||||
"Models like GPT4o have been trained to receive instructions in a particular way.\n", |
||||
"\n", |
||||
"They expect to receive:\n", |
||||
"\n", |
||||
"**A system prompt** that tells them what task they are performing and what tone they should use\n", |
||||
"\n", |
||||
"**A user prompt** -- the conversation starter that they should reply to" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "268cb127-ec40-4016-9436-94a1ae10a1c6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||
"\n", |
||||
"system_prompt = \"You are a technical writer that analyzes the contents of a stackoverflow website issue containing a question and answer \\\n", |
||||
"and provides a summary in the form of a technical tutorial , ignoring text that might be navigation related. \\\n", |
||||
"Respond in markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||
"\n", |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||
" user_prompt += f\"\"\" \n", |
||||
"\n", |
||||
" You are looking at a website titled {website_content.title}\n", |
||||
"\n", |
||||
" Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n", |
||||
" \n", |
||||
" {website_content.text}\n", |
||||
"\n", |
||||
"\n", |
||||
" The tutorial should include an introduction, problem statement, solution steps, and conclusion.\n", |
||||
" Tutrial should be in markdown format.\n", |
||||
" \"\"\"\n", |
||||
" user_prompt += website.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"You are looking at a website titled mysql - How can I prevent SQL injection in PHP? - Stack Overflow \n", |
||||
"\n", |
||||
" You are looking at a website titled mysql - How can I prevent SQL injection in PHP? - Stack Overflow\n", |
||||
"\n", |
||||
" Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n", |
||||
"\n", |
||||
" Skip to main content\n", |
||||
"Stack Overflow\n", |
||||
"About\n", |
||||
"Products\n", |
||||
"OverflowAI\n", |
||||
"Stack Overflow for Teams\n", |
||||
"Where developers & technologists share private knowledge with coworkers\n", |
||||
"Advertising & Talent\n", |
||||
"Reach devs & t\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"print(user_prompt_for(website_content)[:500])" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Messages\n", |
||||
"\n", |
||||
"The API from OpenAI expects to receive messages in a particular structure.\n", |
||||
"Many of the other APIs share this structure:\n", |
||||
"\n", |
||||
"```\n", |
||||
"[\n", |
||||
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n", |
||||
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n", |
||||
"]\n", |
||||
"\n", |
||||
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## And now let's build useful messages for GPT-4o-mini, using a function" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 11, |
||||
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# See how this function creates exactly the format above\n", |
||||
"\n", |
||||
"def messages_for(website):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Time to bring it together - the API for OpenAI is very simple!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 12, |
||||
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And now: call the OpenAI API. You will get very familiar with this!\n", |
||||
"\n", |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "3d926d59-450e-4609-92ba-2d6f244f1342", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A function to display this nicely in the Jupyter output, using markdown\n", |
||||
"\n", |
||||
"def display_summary(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 14, |
||||
"id": "0a6970cc-bed8-4759-a312-3b81236c2f4e", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"```markdown\n", |
||||
"# How to Prevent SQL Injection in PHP\n", |
||||
"\n", |
||||
"## Introduction\n", |
||||
"SQL injection is a serious security vulnerability that can allow an attacker to interfere with the queries that your application makes to the database. By exploiting this vulnerability, an attacker can gain unauthorized access to sensitive data, manipulate data, and even execute administrative operations on the database. This tutorial will guide you on how to prevent SQL injection in your PHP applications through various best practices.\n", |
||||
"\n", |
||||
"## Problem Statement\n", |
||||
"Consider the following PHP code that is vulnerable to SQL injection:\n", |
||||
"\n", |
||||
"```php\n", |
||||
"$unsafe_variable = $_POST['user_input']; \n", |
||||
"mysql_query(\"INSERT INTO `table` (`column`) VALUES ('$unsafe_variable')\");\n", |
||||
"```\n", |
||||
"\n", |
||||
"If a user were to input something like `value'); DROP TABLE table;--`, the query would become:\n", |
||||
"\n", |
||||
"```sql\n", |
||||
"INSERT INTO `table` (`column`) VALUES('value'); DROP TABLE table;--');\n", |
||||
"```\n", |
||||
"\n", |
||||
"This inserts an unwanted SQL command leading to disastrous effects on the database.\n", |
||||
"\n", |
||||
"## Solution Steps\n", |
||||
"\n", |
||||
"### 1. Use Prepared Statements\n", |
||||
"The best method to prevent SQL injection is to use prepared statements with parameterized queries. This separates SQL logic from data, ensuring that user input is treated as data, not executable code.\n", |
||||
"\n", |
||||
"#### Using PDO\n", |
||||
"Here's how to use PDO in PHP:\n", |
||||
"\n", |
||||
"```php\n", |
||||
"$dsn = 'mysql:dbname=dbtest;host=127.0.0.1;charset=utf8mb4';\n", |
||||
"$dbConnection = new PDO($dsn, 'user', 'password');\n", |
||||
"$dbConnection->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);\n", |
||||
"$dbConnection->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);\n", |
||||
"\n", |
||||
"$stmt = $dbConnection->prepare('SELECT * FROM users WHERE name = :name');\n", |
||||
"$stmt->execute(['name' => $name]);\n", |
||||
"\n", |
||||
"foreach ($stmt as $row) {\n", |
||||
" // Process row\n", |
||||
"}\n", |
||||
"```\n", |
||||
"\n", |
||||
"#### Using MySQLi\n", |
||||
"If you're using MySQLi, the syntax is slightly different:\n", |
||||
"\n", |
||||
"```php\n", |
||||
"$dbConnection = new mysqli('127.0.0.1', 'username', 'password', 'test');\n", |
||||
"$dbConnection->set_charset('utf8mb4');\n", |
||||
"\n", |
||||
"$stmt = $dbConnection->prepare('SELECT * FROM users WHERE name = ?');\n", |
||||
"$stmt->bind_param('s', $name); // 's' stands for string\n", |
||||
"$stmt->execute();\n", |
||||
"$result = $stmt->get_result();\n", |
||||
"\n", |
||||
"while ($row = $result->fetch_assoc()) {\n", |
||||
" // Process row\n", |
||||
"}\n", |
||||
"```\n", |
||||
"\n", |
||||
"### 2. Properly Configure the Database Connection\n", |
||||
"When using PDO, ensure that emulated prepared statements are disabled. This is essential for real prepared statements to take effect.\n", |
||||
"\n", |
||||
"Example configuration:\n", |
||||
"```php\n", |
||||
"$dbConnection->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);\n", |
||||
"```\n", |
||||
"\n", |
||||
"### 3. Validate Input Data\n", |
||||
"In addition to using prepared statements, you should validate and sanitize user inputs. Implementing whitelist validation can help by ensuring only expected values are processed.\n", |
||||
"\n", |
||||
"For example, if you expect a sorting direction:\n", |
||||
"```php\n", |
||||
"$dir = !empty($_GET['dir']) && $_GET['dir'] === 'DESC' ? 'DESC' : 'ASC';\n", |
||||
"```\n", |
||||
"\n", |
||||
"### 4. Limit Database Permissions\n", |
||||
"Restrict database user permissions to the minimum required for their role. For example, a user who only needs to read data should not have permissions to delete or alter it.\n", |
||||
"\n", |
||||
"```sql\n", |
||||
"GRANT SELECT ON database TO 'username'@'localhost';\n", |
||||
"```\n", |
||||
"\n", |
||||
"### 5. Regularly Update Your Codebase\n", |
||||
"Keep libraries and the PHP version you are using up-to-date. Deprecated functions and libraries often contain vulnerabilities that can be exploited.\n", |
||||
"\n", |
||||
"## Conclusion\n", |
||||
"Preventing SQL injection in PHP applications requires a proactive approach. Using prepared statements ensures user input is handled securely, while validating data and limiting permissions fortifies your application against potential attacks. By implementing these best practices, you can significantly reduce the risk of SQL injection vulnerabilities in your applications.\n", |
||||
"\n", |
||||
"For more in-depth information on SQL injection prevention techniques, consult the [OWASP SQL Injection Prevention Cheat Sheet](https://owasp.org/www-community/attacks/SQL_Injection).\n", |
||||
"```" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"display_summary(website)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python |
||||
|
||||
import os |
||||
import argparse |
||||
from dotenv import load_dotenv |
||||
from openai import OpenAI |
||||
|
||||
def load_openai_key(): |
||||
# Load environment variables in a file called .env |
||||
load_dotenv(override=True) |
||||
api_key = os.getenv('OPENAI_API_KEY') |
||||
|
||||
# Check the key |
||||
if not api_key: |
||||
return "Error: No API key was found!" |
||||
elif not api_key.startswith("sk-proj-"): |
||||
return "Error: An API key was found, but it doesn't start sk-proj-; please check you're using the right key" |
||||
elif api_key.strip() != api_key: |
||||
return "Error: An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them!" |
||||
else: |
||||
return "API key found and looks good so far!" |
||||
|
||||
def ask_llm(client, model, user_prompt): |
||||
system_prompt = """ |
||||
you are a writing assistant with an expertise in children's stories. |
||||
Write a bedtime story inspired by the subject below. |
||||
The story should have a begining, middle, and end. |
||||
The story shoukd be appropriate for children ages 5-8 and have a positive message. |
||||
I should be able to read the entire story in about 3 minutes |
||||
""" |
||||
response = client.chat.completions.create( |
||||
model = model, |
||||
messages = [ {"role": "system", "content": system_prompt}, |
||||
{"role": "user", "content": user_prompt}] |
||||
) |
||||
return response.choices[0].message.content |
||||
|
||||
def main(): |
||||
parser = argparse.ArgumentParser(description="AI Bedtime Storyteller") |
||||
parser.add_argument("provider", choices=["openai", "ollama"], help="AI provider to use") |
||||
parser.add_argument("--model", help="Model to use for Ollama (required if provider is 'ollama')", required="ollama" in parser.parse_known_args()[0].provider) |
||||
parser.add_argument("subject", help="What do you want the story to be about?") |
||||
|
||||
args = parser.parse_args() |
||||
|
||||
if args.provider == "openai": |
||||
load_openai_key() |
||||
client = OpenAI() |
||||
model = "gpt-4o-mini" |
||||
elif args.provider == "ollama": |
||||
client = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama') |
||||
model = args.model |
||||
else: |
||||
return "Error: invalid provider!" |
||||
|
||||
user_prompt = args.subject |
||||
|
||||
result = ask_llm(client, model, user_prompt) |
||||
print("AI Response:", result) |
||||
|
||||
if __name__ == "__main__": |
||||
main() |
||||
|
@ -0,0 +1,180 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# End of week 1 exercise\n", |
||||
"\n", |
||||
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", |
||||
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"import ollama" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# constants\n", |
||||
"MODEL_GPT = 'gpt-4o-mini'\n", |
||||
"MODEL_LLAMA = 'llama3.2'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# set up environment\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", |
||||
" print(\"API key looks good so far\")\n", |
||||
"else:\n", |
||||
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
||||
"\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are provided with a technical question. \\\n", |
||||
"You are answering by providing a quick explanation and giving some examples.\\n\"\n", |
||||
"\n", |
||||
"# here is the question; type over this to ask something new\n", |
||||
"question = \"\"\"\n", |
||||
"Please explain what this code does and why:\n", |
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get gpt-4o-mini to answer, with streaming\n", |
||||
"def get_answer_gpt():\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL_GPT,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": question}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
"\n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get Llama 3.2 to answer\n", |
||||
"def get_answer_ollama():\n", |
||||
" stream = ollama.generate(\n", |
||||
" MODEL_LLAMA,\n", |
||||
" question,\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk['response'] or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4a859eb1-23fa-40dd-ba91-b35084433a00", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_answer_gpt()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1c73f046-da3a-49a5-8a74-4b8a86a9032a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_answer_ollama()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bea20f33-a710-44ab-9a4d-856db05e4201", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
File diff suppressed because one or more lines are too long
@ -0,0 +1,460 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "e71d7ff9-c27a-4602-9230-856626b1de07", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Company Brochure Generator UI\n", |
||||
"Generates a brochure for a company website, after scraping the website and pages linked with that page, based on the provided company URL. \n", |
||||
"Enables users to \n", |
||||
"- Choose a model type (Llama 3.2, Claude, GPT)-\n", |
||||
"- Choose the tone preference\n", |
||||
"- Choose the target audience" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "de9b59b9-8673-42e7-8849-62fe30f56711", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Imports, Keys, Instantiation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 38, |
||||
"id": "39fd7fed-b215-4037-bd6e-7e1af1b83897", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"import anthropic\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 15, |
||||
"id": "0bf24357-1d77-4721-9d5a-f99827b2158c", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"OpenAI API Key exists and begins sk-proj-\n", |
||||
"Anthropic API Key exists and begins sk-ant-\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"if anthropic_api_key:\n", |
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||
"else:\n", |
||||
" print(\"Anthropic API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 20, |
||||
"id": "1afc12e1-02c1-4394-b589-19cd08d2a8bb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define models\n", |
||||
"CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n", |
||||
"GPT_MODEL = \"gpt-4o-mini\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 17, |
||||
"id": "d5d79a69-0a39-4ab4-aaf8-bc591bce0536", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Creating instances\n", |
||||
"claude = anthropic.Anthropic()\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1d3369bc-b751-4f4d-a288-d7d81c384e67", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Web Scraper" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"id": "fafe1074-fbf4-47cc-80dc-34413a447977", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"id": "41c1f1af-ae20-423b-bf7c-efd7f8c2751b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||
"link_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||
"link_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"id": "eb537563-e393-47ca-9af2-a8ea7393edd9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_user_prompt(website):\n", |
||||
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||
"Do not include Terms of Service, Privacy, email or social media links.\\n\"\n", |
||||
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 36, |
||||
"id": "033568d2-3f1a-43ac-a288-7a65b4ea86a5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=GPT_MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
" ],\n", |
||||
" response_format={\"type\": \"json_object\"}\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" return json.loads(result)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 12, |
||||
"id": "d8f316ac-f0b1-42d9-88a8-0a61fcb0023d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_all_details(url):\n", |
||||
" result = \"Landing page:\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = get_links(url)\n", |
||||
" print(\"Found links:\", links)\n", |
||||
" for link in links[\"links\"]:\n", |
||||
" print(f\"Processing {link['url']}...\")\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link[\"url\"]).get_contents()\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "016e065a-ac5a-48c0-bc4b-e916e9801384", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### System Message" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 18, |
||||
"id": "ed1c6068-5f4f-47a7-ab97-738dfb94e057", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n", |
||||
"and creates a short brochure about the company for prospective customers, investors and recruits. \\\n", |
||||
"You are also provided with the tone, and the target audience. Provide an appropriate answer. Respond in markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6d4f594c-927d-440f-8aae-33cfeb9c445c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### LLM Call Functions" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 40, |
||||
"id": "5b6a0379-3465-4c04-a553-4e4cdb9064b9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_gpt(prompt,company_name,url):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"user\", \"content\": prompt},\n", |
||||
" {\"role\":\"system\",\"content\":system_message}\n", |
||||
" ]\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=GPT_MODEL,\n", |
||||
" messages=messages,\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.choices[0].delta.content or \"\"\n", |
||||
" yield result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 21, |
||||
"id": "a2194e1d-4e99-4127-9515-aa9353382bc6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_claude(prompt):\n", |
||||
" result = claude.messages.stream(\n", |
||||
" model=CLAUDE_MODEL,\n", |
||||
" max_tokens=1000,\n", |
||||
" temperature=0.7,\n", |
||||
" system=system_message,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"user\", \"content\": prompt},\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" response = \"\"\n", |
||||
" with result as stream:\n", |
||||
" for text in stream.text_stream:\n", |
||||
" response += text or \"\"\n", |
||||
" yield response" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "64adf26c-33b2-4589-8df6-dc5d6da71420", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Brochure Creation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "8192f39f-508b-4592-a075-767db68672b3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 32, |
||||
"id": "8aebfabe-4d51-4ee7-a9d2-5a379e9427cb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_brochure(company_name, url,model,tone,target):\n", |
||||
" print('create brochure function called')\n", |
||||
" prompt = f\"Please generate a company brochure for {company_name}.\"\n", |
||||
" prompt += f\"Use a {tone} tone; and target content at {target}\"\n", |
||||
" prompt += get_brochure_user_prompt(company_name,url)\n", |
||||
" \n", |
||||
" if model == \"GPT\":\n", |
||||
" result = stream_gpt(prompt,company_name,url)\n", |
||||
" elif model==\"Claude\":\n", |
||||
" result = stream_claude(prompt,company_name,url)\n", |
||||
" else:\n", |
||||
" raise ValueError(\"Unknown model\")\n", |
||||
" yield from result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c5f4f97b-c9d0-4d4c-8b02-e6209ba2549c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Putting it all together : Gradio UI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 22, |
||||
"id": "33162303-9b49-46fe-a8e0-0d01be45685b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"force_dark_mode = \"\"\"\n", |
||||
"function refresh() {\n", |
||||
" const url = new URL(window.location);\n", |
||||
" if (url.searchParams.get('__theme') !== 'dark') {\n", |
||||
" url.searchParams.set('__theme', 'dark');\n", |
||||
" window.location.href = url.href;\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 41, |
||||
"id": "47ab9a41-cecd-4c21-bd68-4a15966b80c4", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7877\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7877/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 41, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.vellum.ai/'}, {'type': 'careers page', 'url': 'https://www.vellum.ai/careers'}]}\n", |
||||
"Processing https://www.vellum.ai/...\n", |
||||
"Processing https://www.vellum.ai/careers...\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"gr.Interface(\n", |
||||
" fn=create_brochure,\n", |
||||
" inputs=[\n", |
||||
" gr.Textbox(label='Company Name:'),\n", |
||||
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", |
||||
" gr.Dropdown(['GPT','Claude'],label='Select Model:'),\n", |
||||
" gr.Dropdown(['Formal','Casual','Persuasive','Informative','Conversational'],label='Select Tone:'),\n", |
||||
" gr.Dropdown(['Businesses','General Public','Students','Investors','Customers'],label='Select Target Audience:'),\n", |
||||
" ],\n", |
||||
" outputs = [gr.Markdown(label='Brochure')],\n", |
||||
" flagging_mode = 'never',\n", |
||||
" js = force_dark_mode\n", |
||||
").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2b923b09-6738-450a-9035-2c8d1bb9cae6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,567 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c79dc33e-1a3b-4601-a8f2-219b7a9b6d88", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Company Brochure - Relevant Links and Custom Tone\n", |
||||
"\n", |
||||
"Using GPT to generate a company brochure with the relevant links functionality and the ability to choose the desired tone." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "e32f4aa7-6fc4-4dc9-8058-58e6a7f329c5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "d1d65a21-bbba-44ff-a2be-85bf2055a493", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"OpenAI API Key set and good to go.\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||
"\n", |
||||
"if openai_api_key:\n", |
||||
" print(\"OpenAI API Key set and good to go.\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set. :(\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "c5db63fe-5da8-496e-9b37-139598d600a7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Setting up the OpenAI object\n", |
||||
"\n", |
||||
"openai = OpenAI()\n", |
||||
"gpt_model = 'gpt-4o-mini'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "535da52f-b280-48ce-aa8b-f82f9f9805d9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"# Some websites need you to use proper headers when fetching them:\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "8d5757c4-95f4-4038-8ed4-8c81da5112b0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||
"link_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||
"link_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"id": "d5fd31ac-7c81-454a-a1dc-4c58bd3db246", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_user_prompt(website):\n", |
||||
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||
"Do not include Terms of Service, Privacy, email links.\\n\"\n", |
||||
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"id": "e8b67492-1ba4-4aad-a588-39116128fa18", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def gpt_get_links(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model= gpt_model,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
" ],\n", |
||||
" response_format={\"type\": \"json_object\"}\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" return json.loads(result)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"id": "e8846e7a-ace2-487e-a0a8-fccb389f2eb9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# This function provides uses the get_contents method in the Website Class as well as GPT to find relevant links.\n", |
||||
"\n", |
||||
"def get_all_details(url):\n", |
||||
" result = \"Landing page:\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = gpt_get_links(url)\n", |
||||
" print(\"Found links:\", links)\n", |
||||
" for link in links[\"links\"]:\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link[\"url\"]).get_contents()\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"id": "18b42319-8342-4b9c-bef6-8b72acf92ab3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; \\\n", |
||||
" use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||
" \n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"id": "d7748293-a616-41de-93cb-89f65cc5c73d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's create a call that streams back results\n", |
||||
"# If you'd like a refresher on Generators (the \"yield\" keyword),\n", |
||||
"# Please take a look at the Intermediate Python notebook in week1 folder.\n", |
||||
"\n", |
||||
"def stream_brochure(company_name, url, tone):\n", |
||||
"\n", |
||||
" system_message = f\"You are an assistant that analyzes the content of several relevant pages from a company website \\\n", |
||||
" and creates a short brochure about the company for prospective customers, investors, and recruits. \\\n", |
||||
" Include details of company culture, customers and careers/jobs if you have the information. \\\n", |
||||
" Respond in markdown, and use a {tone.lower()} tone throughout the brochure.\"\n", |
||||
"\n", |
||||
" \n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ]\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=gpt_model,\n", |
||||
" messages=messages,\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.choices[0].delta.content or \"\"\n", |
||||
" yield result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 11, |
||||
"id": "15222832-06e0-4452-a8e1-59b9b1755488", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7860\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 11, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'company page', 'url': 'https://www.snowflake.com/en/company/overview/about-snowflake/'}, {'type': 'company leadership page', 'url': 'https://www.snowflake.com/en/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'company ESG page', 'url': 'https://www.snowflake.com/en/company/overview/esg/'}, {'type': 'company ventures page', 'url': 'https://www.snowflake.com/en/company/overview/snowflake-ventures/'}, {'type': 'end data disparity page', 'url': 'https://www.snowflake.com/en/company/overview/end-data-disparity/'}]}\n", |
||||
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'about page', 'url': 'https://www.snowflake.com/company/overview/about-snowflake/'}, {'type': 'leadership page', 'url': 'https://www.snowflake.com/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'investor relations', 'url': 'https://investors.snowflake.com/overview/default.aspx'}, {'type': 'ESG page', 'url': 'https://www.snowflake.com/company/overview/esg/'}, {'type': 'snowflake ventures', 'url': 'https://www.snowflake.com/company/overview/snowflake-ventures/'}, {'type': 'end data disparity', 'url': 'https://www.snowflake.com/company/overview/end-data-disparity/'}]}\n" |
||||
] |
||||
}, |
||||
{ |
||||
"name": "stderr", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n", |
||||
" self._validate_conn(conn)\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n", |
||||
" conn.connect()\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n", |
||||
" sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n", |
||||
" ssl_sock = ssl_wrap_socket(\n", |
||||
" ^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n", |
||||
" ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n", |
||||
" return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n", |
||||
" return self.sslsocket_class._create(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n", |
||||
" self.do_handshake()\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n", |
||||
" self._sslobj.do_handshake()\n", |
||||
"ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||
"\n", |
||||
"During handling of the above exception, another exception occurred:\n", |
||||
"\n", |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n", |
||||
" response = self._make_request(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n", |
||||
" raise new_e\n", |
||||
"urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||
"\n", |
||||
"The above exception was the direct cause of the following exception:\n", |
||||
"\n", |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n", |
||||
" resp = conn.urlopen(\n", |
||||
" ^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n", |
||||
" retries = retries.increment(\n", |
||||
" ^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n", |
||||
" raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
"urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", |
||||
"\n", |
||||
"During handling of the above exception, another exception occurred:\n", |
||||
"\n", |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", |
||||
" response = await route_utils.call_process_api(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", |
||||
" output = await app.get_blocks().process_api(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n", |
||||
" result = await self.call_function(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n", |
||||
" prediction = await utils.async_iteration(iterator)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n", |
||||
" return await anext(iterator)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n", |
||||
" return await anyio.to_thread.run_sync(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", |
||||
" return await get_async_backend().run_sync_in_worker_thread(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n", |
||||
" return await future\n", |
||||
" ^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n", |
||||
" result = context.run(func, *args)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n", |
||||
" return next(iterator)\n", |
||||
" ^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n", |
||||
" response = next(iterator)\n", |
||||
" ^^^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" ^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n", |
||||
" return request(\"get\", url, params=params, **kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n", |
||||
" return session.request(method=method, url=url, **kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n", |
||||
" resp = self.send(prep, **send_kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n", |
||||
" r = adapter.send(request, **kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n", |
||||
" raise SSLError(e, request=request)\n", |
||||
"requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n", |
||||
" self._validate_conn(conn)\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n", |
||||
" conn.connect()\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n", |
||||
" sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n", |
||||
" ssl_sock = ssl_wrap_socket(\n", |
||||
" ^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n", |
||||
" ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n", |
||||
" return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n", |
||||
" return self.sslsocket_class._create(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n", |
||||
" self.do_handshake()\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n", |
||||
" self._sslobj.do_handshake()\n", |
||||
"ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||
"\n", |
||||
"During handling of the above exception, another exception occurred:\n", |
||||
"\n", |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n", |
||||
" response = self._make_request(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n", |
||||
" raise new_e\n", |
||||
"urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n", |
||||
"\n", |
||||
"The above exception was the direct cause of the following exception:\n", |
||||
"\n", |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n", |
||||
" resp = conn.urlopen(\n", |
||||
" ^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n", |
||||
" retries = retries.increment(\n", |
||||
" ^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n", |
||||
" raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
"urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n", |
||||
"\n", |
||||
"During handling of the above exception, another exception occurred:\n", |
||||
"\n", |
||||
"Traceback (most recent call last):\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", |
||||
" response = await route_utils.call_process_api(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", |
||||
" output = await app.get_blocks().process_api(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n", |
||||
" result = await self.call_function(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n", |
||||
" prediction = await utils.async_iteration(iterator)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n", |
||||
" return await anext(iterator)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n", |
||||
" return await anyio.to_thread.run_sync(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", |
||||
" return await get_async_backend().run_sync_in_worker_thread(\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n", |
||||
" return await future\n", |
||||
" ^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n", |
||||
" result = context.run(func, *args)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n", |
||||
" return next(iterator)\n", |
||||
" ^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n", |
||||
" response = next(iterator)\n", |
||||
" ^^^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" ^^^^^^^^^^^^\n", |
||||
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n", |
||||
" return request(\"get\", url, params=params, **kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n", |
||||
" return session.request(method=method, url=url, **kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n", |
||||
" resp = self.send(prep, **send_kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n", |
||||
" r = adapter.send(request, **kwargs)\n", |
||||
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
||||
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n", |
||||
" raise SSLError(e, request=request)\n", |
||||
"requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n" |
||||
] |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.petrofac.com/who-we-are/'}, {'type': 'what we do page', 'url': 'https://www.petrofac.com/who-we-are/what-we-do/'}, {'type': 'careers page', 'url': 'https://www.petrofac.com/careers/'}, {'type': 'our structure page', 'url': 'https://www.petrofac.com/who-we-are/our-structure/'}, {'type': 'energy transition page', 'url': 'https://www.petrofac.com/who-we-are/energy-transition/'}, {'type': 'sustainability and ESG page', 'url': 'https://www.petrofac.com/who-we-are/sustainability-and-esg/'}, {'type': 'investor relations page', 'url': 'https://www.petrofac.com/investors/'}, {'type': 'services page', 'url': 'https://www.petrofac.com/services/'}, {'type': 'where we operate page', 'url': 'https://www.petrofac.com/where-we-operate/'}]}\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"view = gr.Interface(\n", |
||||
" fn=stream_brochure,\n", |
||||
" inputs=[\n", |
||||
" gr.Textbox(label=\"Company name:\"),\n", |
||||
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", |
||||
" gr.Textbox(label=\"Tone:\")],\n", |
||||
" outputs=[gr.Markdown(label=\"Brochure:\")],\n", |
||||
" flagging_mode=\"never\"\n", |
||||
")\n", |
||||
"view.launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "70d6398c-21dd-44f8-ba7d-0204414dffa0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,251 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "5d799d2a-6e58-4a83-b17a-dbbc40efdc39", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Project - Course Booking AI Asssistant\n", |
||||
"AI Customer Support Bot that \n", |
||||
"- Returns Prices\n", |
||||
"- Books Tickets\n", |
||||
"- Adds Information to Text File" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b1ad9acd-a702-48a3-8ff5-d536bcac8030", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "74adab0c-99b3-46cd-a79f-320a3e74138a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Initialization\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8d3240a4-99c1-4c07-acaa-ecbb69ffd2e4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful assistant for an Online Course Platform called StudyAI. \"\n", |
||||
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so.\"\n", |
||||
"system_message += \"If you are given a partial name, for example 'discrete' instead of 'discrete structures' \\\n", |
||||
"ask the user if they meant to say 'discrete structures', and then display the price. The user may also use \\\n", |
||||
"acronyms like 'PF' instead of programming fundamentals or 'OOP' to mean 'Object oriented programming'. \\\n", |
||||
"Clarify what the user means and then proceed as directed.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9a1b8d5f-f893-477b-8396-ff7d697eb0c3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"course_prices = {\"programming fundamentals\": \"$19\", \"discrete structures\": \"$39\", \"operating systems\": \"$24\", \"object oriented programming\": \"$39\"}\n", |
||||
"\n", |
||||
"def get_course_price(course):\n", |
||||
" print(f\"Tool get_course_price called for {course}\")\n", |
||||
" course = course.lower()\n", |
||||
" return course_prices.get(course, \"Unknown\")\n", |
||||
"\n", |
||||
"def enroll_in_course(course):\n", |
||||
" print(f'Tool enroll_in_course_ called for {course}')\n", |
||||
" course_price = get_course_price(course)\n", |
||||
" if course_price != 'Unknown':\n", |
||||
" with open('enrolled_courses.txt', 'a') as file: \n", |
||||
" file.write(course + \"\\n\")\n", |
||||
" return 'Successfully enrolled in course'\n", |
||||
" else:\n", |
||||
" return 'Enrollment failed, no such course available'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "330d2b94-a8c5-4967-ace7-15d2cd52d7ae", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_course_price('graph theory')\n", |
||||
"get_course_price('discrete structures')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5bb65830-fab8-45a7-bf43-7e52186915a0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"price_function = {\n", |
||||
" \"name\": \"get_course_price\",\n", |
||||
" \"description\": \"Get the price of a course. Call this whenever you need to know the course price, for example when a customer asks 'How much is a ticket for this course?'\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"course\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The course that the customer wants to purchase\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"course\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}\n", |
||||
"\n", |
||||
"enroll_function = {\n", |
||||
" \"name\": \"enroll_in_course\",\n", |
||||
" \"description\":\"Get the success status of course enrollment. Call whenever a customer wants to enroll in a course\\\n", |
||||
" for example, if they say 'I want to purchase this course' or 'I want to enroll in this course'\",\n", |
||||
" \"parameters\":{\n", |
||||
" \"type\":\"object\",\n", |
||||
" \"properties\":{\n", |
||||
" \"course\":{\n", |
||||
" \"type\":\"string\",\n", |
||||
" \"description\": \"The course that the customer wants to purchase\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"course\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" } \n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "08af86b9-3aaa-4b6b-bf7c-ee668ba1cbfe", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"tools = [\n", |
||||
" {\"type\":\"function\",\"function\":price_function},\n", |
||||
" {\"type\":\"function\",\"function\":enroll_function}\n", |
||||
"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "482efc34-ff1f-4146-9570-58b4d59c3b2f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message,history):\n", |
||||
" messages = [{\"role\":\"system\",\"content\":system_message}] + history + [{\"role\":\"user\",\"content\":message}]\n", |
||||
" response = openai.chat.completions.create(model=MODEL,messages=messages,tools=tools)\n", |
||||
"\n", |
||||
" if response.choices[0].finish_reason == \"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" messages.append(message)\n", |
||||
" for tool_call in message.tool_calls:\n", |
||||
" messages.append(handle_tool_call(tool_call))\n", |
||||
" response = openai.chat.completions.create(model=MODEL,messages=messages)\n", |
||||
"\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f725b4fb-d477-4d7d-80b5-5d70e1b25a86", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# We have to write that function handle_tool_call:\n", |
||||
"\n", |
||||
"def handle_tool_call(tool_call):\n", |
||||
" function = tool_call.function.name\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" match function:\n", |
||||
" case 'get_course_price':\n", |
||||
" course = arguments.get('course')\n", |
||||
" price = get_course_price(course)\n", |
||||
" return {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"course\": course,\"price\": price}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" case 'enroll_in_course':\n", |
||||
" course = arguments.get('course')\n", |
||||
" status = enroll_in_course(course)\n", |
||||
" return {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"course\": course, \"status\": status}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c446272a-9ce1-4ffd-9bc8-483d782810b4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat,type=\"messages\").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1fe714a3-f793-4c3b-b5aa-6c81b82aea1b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,218 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "e063b35e-5598-4084-b255-89956bfedaac", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### Models an interaction between LLama 3.2 and Claude 3.5 Haiku" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4f534359-cdb4-4441-aa66-d6700fa4d6a5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import anthropic\n", |
||||
"import ollama" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3bdff240-9118-4061-9369-585c4d4ce0a7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
" \n", |
||||
"if anthropic_api_key:\n", |
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||
"else:\n", |
||||
" print(\"Anthropic API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ff110b3f-3986-4fd8-a0b1-fd4b51133a8d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Connect to Anthropic\n", |
||||
"\n", |
||||
"claude = anthropic.Anthropic()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e6e596c6-6307-49c1-a29f-5c4e88f8d34d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Download the llama3.2:1b model for local execution.\n", |
||||
"!ollama pull llama3.2:1b" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "633b6892-6d04-40cb-8b61-196fc754b00c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Define models\n", |
||||
"CLAUDE_MODEL = \"claude-3-5-haiku-latest\"\n", |
||||
"LLAMA_MODEL = \"llama3.2:1b\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a699a809-e3d3-4392-94bd-e2f80a5aec60", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"claude_system = \"You are a chatbot designed as a study tutor for undergraduate students. \\\n", |
||||
"You explain information and key-technical terms related to the subject in a succint yet \\\n", |
||||
"comprehensive manner. You may use tables, formatting and other visuals to help create \\\n", |
||||
"'cheat-sheets' of sorts.\"\n", |
||||
"\n", |
||||
"llama_system = \"You are a chatbot designed to ask questions about different topics related to \\\n", |
||||
"computer vision. You are meant to simulate a student, not teacher. Act as if you have no \\\n", |
||||
"prior knowledge\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bdb049d8-130b-42dd-aaab-29c09e3e2347", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"llama_messages = [\"Hi\"]\n", |
||||
"claude_messages = [\"Hello\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c158f31c-5e8b-48a4-9980-6b280393800b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_llama():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": llama_system}]\n", |
||||
" for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": llama_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": claude_msg})\n", |
||||
" response = ollama.chat(model=LLAMA_MODEL, messages=messages)\n", |
||||
" return response['message']['content']\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d803c5a2-df54-427a-9b80-8e9dd04ee36d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_claude():\n", |
||||
" messages = []\n", |
||||
" for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n", |
||||
" messages.append({\"role\": \"user\", \"content\": llama_msg})\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": llama_messages[-1]})\n", |
||||
" message = claude.messages.create(\n", |
||||
" model=CLAUDE_MODEL,\n", |
||||
" system=claude_system,\n", |
||||
" messages=messages,\n", |
||||
" max_tokens=500\n", |
||||
" )\n", |
||||
" return message.content[0].text" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a23794bb-0f36-4f91-aa28-24b876203a36", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"call_llama()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7f5c3e2f-a1bb-403b-b6b5-944a10d93305", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"call_claude()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3d6eb874-1c8f-47d8-a9f1-2e0fe197ae83", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"llama_messages = [\"Hi\"]\n", |
||||
"claude_messages = [\"Hello there, what would you like to learn today?\"]\n", |
||||
"\n", |
||||
"print(f'Ollama:\\n{ollama_messages[0]}')\n", |
||||
"print(f'Claude:\\n{claude_messages[0]}')\n", |
||||
"\n", |
||||
"for _ in range(5):\n", |
||||
" llama_next = call_llama()\n", |
||||
" print(f'Llama 3.2:\\n{llama_next}')\n", |
||||
" llama_messages.append(llama_next)\n", |
||||
" \n", |
||||
" claude_next = call_claude()\n", |
||||
" print(f'Claude 3.5 Haiku:\\n{claude_next}')\n", |
||||
" claude_messages.append(claude_next)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d1e651ad-85c8-45c7-ba83-f7c689080d6b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,242 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Welcome to Week 2!\n", |
||||
"\n", |
||||
"## Frontier Model APIs\n", |
||||
"\n", |
||||
"In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", |
||||
"\n", |
||||
"Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import anthropic\n", |
||||
"from IPython.display import Markdown, display, update_display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# import for google\n", |
||||
"# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n", |
||||
"# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n", |
||||
"\n", |
||||
"import google.generativeai" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"# Print the key prefixes to help with any debugging\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||
"\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"if anthropic_api_key:\n", |
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||
"else:\n", |
||||
" print(\"Anthropic API Key not set\")\n", |
||||
"\n", |
||||
"if google_api_key:\n", |
||||
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"Google API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Connect to OpenAI, Anthropic\n", |
||||
"\n", |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"claude = anthropic.Anthropic()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# This is the set up code for Gemini\n", |
||||
"# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", |
||||
"google.generativeai.configure()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## An adversarial conversation between Chatbots.\n", |
||||
"\n", |
||||
"### What if two chatbots get into a self-referential conversation that goes on a long time? In my first test, \n", |
||||
"### they eventually forgot the topic and ended up repeating polite nothings to each other. In another test,\n", |
||||
"### they converged on a result and ended by exchanging nearly identical statements.\n", |
||||
"\n", |
||||
"### Warning: Think before you dial up the number of iterations too high. Being a student, I don't know at what \n", |
||||
"### point the chat becomes too costly or what models can do this without becoming overloaded. Maybe Ed can advise if he sees this.\n", |
||||
"\n", |
||||
"## Two chatbots edit an essay about cars. One keeps trying to make it longer every time; the other keeps making it \n", |
||||
"## shorter.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"\n", |
||||
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", |
||||
"# We're using cheap versions of models so the costs will be minimal\n", |
||||
"\n", |
||||
"gpt_model = \"gpt-4o-mini\"\n", |
||||
"claude_model = \"claude-3-haiku-20240307\"\n", |
||||
"\n", |
||||
"\n", |
||||
"gpt_system = \"This is a description of a car; \\\n", |
||||
"rephrase the description while adding one detail. Don't include comments that aren't part of the car description.\"\n", |
||||
"\n", |
||||
"claude_system = \"This is a description of a car; \\\n", |
||||
"repeat the description in slightly shorter form. You may remove some details if desired. Don't include comments that aren't part of the car description. Maximum reply length 125 words.\"\n", |
||||
"\n", |
||||
"\n", |
||||
"gpt_messages = [\"Hi there\"]\n", |
||||
"claude_messages = [\"Hi\"] \n", |
||||
"\n", |
||||
"\n", |
||||
"def call_gpt():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
||||
" for gpt, claude in zip(gpt_messages, claude_messages):\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": claude})\n", |
||||
" completion = openai.chat.completions.create(\n", |
||||
" model=gpt_model,\n", |
||||
" messages=messages\n", |
||||
" )\n", |
||||
" return completion.choices[0].message.content\n", |
||||
"\n", |
||||
"reply = call_gpt()\n", |
||||
"print('\\nGPT: ', reply)\n", |
||||
"\n", |
||||
"def call_claude():\n", |
||||
" messages = []\n", |
||||
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt})\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||
" message = claude.messages.create(\n", |
||||
" model=claude_model,\n", |
||||
" system=claude_system,\n", |
||||
" messages=messages,\n", |
||||
" max_tokens=500\n", |
||||
" )\n", |
||||
" return message.content[0].text\n", |
||||
"\n", |
||||
"\n", |
||||
"reply = call_claude()\n", |
||||
"print('\\nGPT: ', reply)\n", |
||||
"\n", |
||||
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", |
||||
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "9fbce0da", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### Here's the iterative loop. Important change: Unlike the original example, we don't repeat the entire conversation to make the input longer and longer.\n", |
||||
"### Instead, we use pop() to remove the oldest messages." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1f41d586", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"\n", |
||||
"for i in range(35):\n", |
||||
" gpt_next = call_gpt()\n", |
||||
" print(f\"GPT:\\n{gpt_next}\\n\")\n", |
||||
" if len(gpt_messages) > 6:\n", |
||||
" gpt_messages.pop(0)\n", |
||||
" gpt_messages.pop(0)\n", |
||||
" gpt_messages.append(gpt_next)\n", |
||||
" \n", |
||||
" claude_next = call_claude()\n", |
||||
" print(f\"Claude:\\n{claude_next}\\n\")\n", |
||||
" if len(claude_messages) > 6:\n", |
||||
" claude_messages.pop(0)\n", |
||||
" claude_messages.pop(0)\n", |
||||
" claude_messages.append(claude_next)\n", |
||||
"\n", |
||||
"print('Done!')\n", |
||||
"\n" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.12.4" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,142 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d18a61ce-bbd4-491c-ab2e-8b352f9af844", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### An AI Chatbot that teaches students programming using GPT API" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c658ac85-6087-4a2c-b23f-1b92c17f0db3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr\n", |
||||
"import anthropic" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "46df0488-f874-41e0-a6a4-9a64aa7be53c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables \n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
" \n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7eadc218-5b10-4174-bf26-575361640524", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e7484731-ac84-405a-a688-6e81d139c5ce", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful programming study assistant\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "54e82f5a-993f-4a95-9d9d-caf35dbc4e76", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
"\n", |
||||
" print(\"History is:\")\n", |
||||
" print(history)\n", |
||||
" print(\"And messages is:\")\n", |
||||
" print(messages)\n", |
||||
"\n", |
||||
" stream = openai.chat.completions.create(model='gpt-4o-mini', messages=messages, stream=True)\n", |
||||
"\n", |
||||
" response = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" yield response" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5941ed67-e2a7-41bc-a8a3-079e9f1fdb64", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e8fcfe68-bbf6-4058-acc9-0230c96608c2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message += \"Whenever the user talks about a topic that is not connected to programmming,\\\n", |
||||
"nudge them in the right direction by stating that you are here to help with programming. Encourage \\\n", |
||||
"the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge \\\n", |
||||
"if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone. Do not ignore \\\n", |
||||
"their requests, rather politely reject and then redirect them.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "090e7d49-fcbf-4715-b120-8d7aa91d165f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,275 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Project - Airline AI Assistant\n", |
||||
"\n", |
||||
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Initialization\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and be\\\\gins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# As an alternative, if you'd like to use Ollama instead of OpenAI\n", |
||||
"# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n", |
||||
"# MODEL = \"llama3.2\"\n", |
||||
"# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", |
||||
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", |
||||
"\n", |
||||
"def chat(message, history):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message}\n", |
||||
" ] + history + [\n", |
||||
" {\"role\": \"user\", \"content\": message}\n", |
||||
" ]\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" return response.choices[0].message.content\n", |
||||
"\n", |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Tools\n", |
||||
"\n", |
||||
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n", |
||||
"\n", |
||||
"With tools, you can write a function, and have the LLM call that function as part of its response.\n", |
||||
"\n", |
||||
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n", |
||||
"\n", |
||||
"Well, kinda." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's start by making a useful function\n", |
||||
"\n", |
||||
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", |
||||
"\n", |
||||
"def get_ticket_price(destination_city):\n", |
||||
" print(f\"Tool get_ticket_price called for {destination_city}\")\n", |
||||
" city = destination_city.lower()\n", |
||||
" return ticket_prices.get(city, \"Unknown\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_ticket_price(\"Berlin\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# There's a particular dictionary structure that's required to describe our function:\n", |
||||
"\n", |
||||
"price_function = {\n", |
||||
" \"name\": \"get_ticket_price\",\n", |
||||
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"destination_city\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The city that the customer wants to travel to\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"destination_city\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And this is included in a list of tools:\n", |
||||
"\n", |
||||
"tools = [{\"type\": \"function\", \"function\": price_function}]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Getting OpenAI to use our Tool\n", |
||||
"\n", |
||||
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n", |
||||
"\n", |
||||
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n", |
||||
"\n", |
||||
"Here's how the new chat function looks:" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ad32321f-083a-4462-a6d6-7bb3b0f5d10a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# We have to write that function handle_tool_call:\n", |
||||
"\n", |
||||
"def handle_tool_call(message): \n", |
||||
" responses = []\n", |
||||
" for tool_call in message.tool_calls: \n", |
||||
" if tool_call.function.name == \"get_ticket_price\":\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" city = arguments.get('destination_city')\n", |
||||
" price = get_ticket_price(city)\n", |
||||
" response = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" responses.append(response)\n", |
||||
" return responses" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message, history):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message}\n", |
||||
" ] + history + [\n", |
||||
" {\"role\": \"user\", \"content\": message}\n", |
||||
" ]\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", |
||||
"\n", |
||||
" # Tool usage\n", |
||||
" if response.choices[0].finish_reason==\"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" responses = handle_tool_call(message)\n", |
||||
" messages.append(message) # That's the assistant asking us to run a tool\n", |
||||
" for response in responses:\n", |
||||
" messages.append(response) # That's the result of the tool calls\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" \n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8dc18486-4d6b-4cbf-a6b8-16d08d7c4f54", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.13.2" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,701 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ec4f6b32-46e9-429a-a3cd-521ff5418493", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Occasio - Event Management Assistant" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import json\n", |
||||
"import time\n", |
||||
"import pprint\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import anthropic\n", |
||||
"import google.generativeai as genai\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"# Print the key prefixes to help with any debugging\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||
"\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"if anthropic_api_key:\n", |
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||
"else:\n", |
||||
" print(\"Anthropic API Key not set\")\n", |
||||
"\n", |
||||
"if google_api_key:\n", |
||||
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"Google API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8b501508-0082-47be-9903-52ff1c243486", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Connect to OpenAI, Anthropic and Google and assign a model for each\n", |
||||
"\n", |
||||
"openai = OpenAI()\n", |
||||
"OPENAI_MODEL = \"gpt-4o-mini\"\n", |
||||
"\n", |
||||
"claude = anthropic.Anthropic()\n", |
||||
"ANTHROPIC_MODEL = \"claude-3-haiku-20240307\"\n", |
||||
"\n", |
||||
"genai.configure()\n", |
||||
"GOOGLE_MODEL = \"gemini-2.0-flash\"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are called \\\"EventAI\\\", a virtual assistant for an Elementary school called Eagle Elementary School. You can help users by giving \\\n", |
||||
"them details of upcoming shcool events like event name, description, location etc. \"\n", |
||||
"#system_message += \"Introduce yourself with a warm welcome message on your first response ONLY.\"\n", |
||||
"system_message += \"Give short, courteous answers, no more than 2 sentences. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so. Do not make up your own event details information\"\n", |
||||
"system_message += \"You might be asked to list the questions asked by the user so far. In that situation, based on the conversation history provided to you, \\\n", |
||||
"list the questions and respond\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2c27c4ba-8ed5-492f-add1-02ce9c81d34c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Some imports for handling images\n", |
||||
"\n", |
||||
"import base64\n", |
||||
"from io import BytesIO\n", |
||||
"from PIL import Image" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "773a9f11-557e-43c9-ad50-56cbec3a0f8f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def artist(event_text):\n", |
||||
" image_response = openai.images.generate(\n", |
||||
" model=\"dall-e-3\",\n", |
||||
" prompt=f\"An image representing an {event_text}, showing typical activities that happen for that {event_text}, in a vibrant pop-art style that elementary school kids will like\",\n", |
||||
" size=\"1024x1024\",\n", |
||||
" n=1,\n", |
||||
" response_format=\"b64_json\",\n", |
||||
" )\n", |
||||
" image_base64 = image_response.data[0].b64_json\n", |
||||
" image_data = base64.b64decode(image_base64)\n", |
||||
" return Image.open(BytesIO(image_data))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d104b96a-02ca-4159-82fe-88e0452aa479", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import base64\n", |
||||
"from io import BytesIO\n", |
||||
"from PIL import Image\n", |
||||
"from IPython.display import Audio, display\n", |
||||
"\n", |
||||
"def talker(message):\n", |
||||
" response = openai.audio.speech.create(\n", |
||||
" model=\"tts-1\",\n", |
||||
" voice=\"onyx\",\n", |
||||
" input=message)\n", |
||||
"\n", |
||||
" audio_stream = BytesIO(response.content)\n", |
||||
" output_filename = \"output_audio.mp3\"\n", |
||||
" with open(output_filename, \"wb\") as f:\n", |
||||
" f.write(audio_stream.read())\n", |
||||
"\n", |
||||
" # Play the generated audio\n", |
||||
" display(Audio(output_filename, autoplay=True))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f0428a74-4daa-4b0d-b25a-219a35f39f55", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"school_events = [\n", |
||||
" {\n", |
||||
" \"event_id\": \"pta\",\n", |
||||
" \"name\": \"Parent Teachers Meeting (PTA/PTM)\",\n", |
||||
" \"description\": \"Parent teachers meeting (PTA/PTM) to discuss students' progress.\",\n", |
||||
" \"date_time\": \"Apr 1st, 2025 11 AM\",\n", |
||||
" \"location\" : \"Glove Annexure Hall\"\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"event_id\": \"read aloud\",\n", |
||||
" \"name\": \"Read Aloud to your class/Reading to your class\",\n", |
||||
" \"description\": \"Kids can bring their favorite book and read it to their class.\",\n", |
||||
" \"date_time\": \"Apr 15th, 2025 1 PM\",\n", |
||||
" \"location\": \"Classroom\"\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"event_id\": \"100 days of school\",\n", |
||||
" \"name\": \"Celebrating 100 days of school. Dress up time for kids\",\n", |
||||
" \"description\": \"Kids can dress up as old people and celebrate the milestone with their teachers.\",\n", |
||||
" \"date_time\": \"May 15th, 2025 11 AM\",\n", |
||||
" \"location\": \"Classroom\"\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"event_id\": \"Book fair\",\n", |
||||
" \"name\": \"Scholastic book fair\",\n", |
||||
" \"description\": \"Kids can purchase their favorite scholastic books.\",\n", |
||||
" \"date_time\": \"Jun 22nd, 2025 10:30 AM\",\n", |
||||
" \"location\": \"Library\"\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"event_id\": \"Halloween\",\n", |
||||
" \"name\": \"Halloween\",\n", |
||||
" \"description\": \"Kids can dress up as their favorite characters\",\n", |
||||
" \"date_time\": \"Oct 31st, 2025\",\n", |
||||
" \"location\": \"Classroom\"\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"event_id\": \"Movie Night\",\n", |
||||
" \"name\": \"Movie Night\",\n", |
||||
" \"description\": \"A popular and kids centric movie will be played. Kids and families are welcome.\",\n", |
||||
" \"date_time\": \"May 3rd, 2025\",\n", |
||||
" \"location\": \"Main auditorium\"\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"event_id\": \"Intruder Drill\",\n", |
||||
" \"name\": \"Intruder Drill\",\n", |
||||
" \"description\": \"State mandated monthly intruder drill to prepare staff and students with necessary safety skills in times of a crisis\",\n", |
||||
" \"date_time\": \"May 3rd, 2025\",\n", |
||||
" \"location\": \"Main auditorium\"\n", |
||||
" }\n", |
||||
"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b7027eec-e522-49c1-af59-56a82f9d3be8", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_event_details(query):\n", |
||||
" search_words = query.lower().split() \n", |
||||
" for event in school_events:\n", |
||||
" event_text = event['name'].lower() + ' ' + event['description'].lower()\n", |
||||
" if all(word in event_text for word in search_words):\n", |
||||
" return event\n", |
||||
" return None" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Tools\n", |
||||
"\n", |
||||
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n", |
||||
"\n", |
||||
"With tools, you can write a function, and have the LLM call that function as part of its response.\n", |
||||
"\n", |
||||
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n", |
||||
"\n", |
||||
"Well, kinda." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "68e96b54-b891-4e7b-a6bc-17693dc99970", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# for claude\n", |
||||
"tools_claude = [\n", |
||||
" {\n", |
||||
" \"name\": \"get_event_details\",\n", |
||||
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n", |
||||
"'When is the pta meeting scheduled?\",\n", |
||||
" \"input_schema\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"event_text\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The event keyword that the user wants to getails on\"\n", |
||||
" }\n", |
||||
" },\n", |
||||
" \"required\": [\"event_text\"]\n", |
||||
" }\n", |
||||
"}\n", |
||||
"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "636188d2-7e7a-48a0-9f04-f3813c7dc323", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# For GPT\n", |
||||
"events_function_gpt = {\n", |
||||
" \"name\": \"get_event_details\",\n", |
||||
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n", |
||||
" 'When is the pta meeting scheduled?\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"event_text\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The event keyword that the user wants to getails on\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"event_text\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "605684f8-ed02-4cc9-8a16-012533b601cb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And this is included in a list of tools:\n", |
||||
"tools_gpt = [{\"type\": \"function\", \"function\": events_function_gpt}]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4ac5a34c-a630-449a-9d46-669daace799c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Gemini function declaration structure\n", |
||||
"gemini_event_details = [{\n", |
||||
" \"name\": \"get_event_details\",\n", |
||||
" \"description\":\"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks 'When is the pta meeting scheduled?\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"event_text\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The event keyword that the user wants to details on\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"event_text\"],\n", |
||||
" },\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"name\": \"get_event_test\",\n", |
||||
" \"description\":\"This is a test function to validate if the function call picks up the right function if there are multiple functions.\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"event_text\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The event keyword that the user wants to details on\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"event_text\"],\n", |
||||
" },\n", |
||||
" }\n", |
||||
"]\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c6331113-63b0-4712-94bb-f363422a8441", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat_claude(history):\n", |
||||
" print(f\"\\nhistory is {history}\\n\")\n", |
||||
" #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs\n", |
||||
" history_claude = list({\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in history if \"role\" in msg and \"content\" in msg)\n", |
||||
" #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]\n", |
||||
" #messages = history\n", |
||||
" message = claude.messages.create(\n", |
||||
" model=ANTHROPIC_MODEL,\n", |
||||
" max_tokens=1000,\n", |
||||
" temperature=0.7,\n", |
||||
" system=system_message,\n", |
||||
" messages=history_claude,\n", |
||||
" tools=tools_claude\n", |
||||
" )\n", |
||||
" image = None\n", |
||||
" print(f\"Claude's message is \\n {pprint.pprint(message)}\\n\")\n", |
||||
" try: \n", |
||||
" if message.stop_reason == \"tool_use\":\n", |
||||
" tool_use = next(block for block in message.content if block.type == \"tool_use\")\n", |
||||
" event_text = tool_use.input.get('event_text')\n", |
||||
" image = artist(event_text)\n", |
||||
" tool_result = handle_tool_call(event_text)\n", |
||||
" #tool_result = handle_tool_call(tool_use, \"Claude\")\n", |
||||
" \n", |
||||
" print(f\"Tool Result: {tool_result}\")\n", |
||||
" \n", |
||||
" response = claude.messages.stream(\n", |
||||
" model=ANTHROPIC_MODEL,\n", |
||||
" max_tokens=4096,\n", |
||||
" system=system_message,\n", |
||||
" messages=[\n", |
||||
" {\n", |
||||
" \"role\": \"user\", \n", |
||||
" \"content\": [\n", |
||||
" {\n", |
||||
" \"type\": \"text\",\n", |
||||
" \"text\": history[-1].get('content')\n", |
||||
" }\n", |
||||
" ]\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"role\": \"assistant\", \n", |
||||
" \"content\": message.content\n", |
||||
" },\n", |
||||
" {\n", |
||||
" \"role\": \"user\",\n", |
||||
" \"content\": [\n", |
||||
" {\n", |
||||
" \"type\": \"tool_result\",\n", |
||||
" \"tool_use_id\": tool_use.id,\n", |
||||
" \"content\": tool_result,\n", |
||||
" }\n", |
||||
" ],\n", |
||||
" },\n", |
||||
" ],\n", |
||||
" tools=tools_claude\n", |
||||
" )\n", |
||||
" result = \"\"\n", |
||||
" with response as stream:\n", |
||||
" for text in stream.text_stream:\n", |
||||
" result += text or \"\"\n", |
||||
" yield result, None\n", |
||||
" talker(result)\n", |
||||
" #image= artist(tool_input.get('event_text'))\n", |
||||
" yield result, image\n", |
||||
" else:\n", |
||||
" response = next((block.text for block in message.content if hasattr(block, \"text\")), None,)\n", |
||||
" chunk_size=30\n", |
||||
" for i in range(0, len(response), chunk_size):\n", |
||||
" yield response[:i + chunk_size], None\n", |
||||
" time.sleep(0.05) #Simulate streaming delay\n", |
||||
" talker(response)\n", |
||||
" #image= artist(tool_input.get('event_text'))\n", |
||||
" yield response, None\n", |
||||
" except Exception as e:\n", |
||||
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", |
||||
" print(e)\n", |
||||
" yield error_message, None\n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9915ae05-5d52-4fdc-a3ea-18f050a79bd3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat_gpt(history):\n", |
||||
" print(f\"\\nhistory is {history}\\n\")\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", |
||||
" response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)\n", |
||||
" image = None\n", |
||||
" try:\n", |
||||
" if response.choices[0].finish_reason==\"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" tool = message.tool_calls[0]\n", |
||||
" arguments = json.loads(tool.function.arguments)\n", |
||||
" event_text = arguments.get('event_text')\n", |
||||
" image = artist(event_text)\n", |
||||
" event_json = handle_tool_call(event_text)\n", |
||||
" tool_output = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": event_json,\n", |
||||
" \"tool_call_id\": tool.id\n", |
||||
" }\n", |
||||
" messages.append(message)\n", |
||||
" messages.append(tool_output)\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=OPENAI_MODEL,\n", |
||||
" messages=messages,\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.choices[0].delta.content or \"\"\n", |
||||
" yield result, None\n", |
||||
" talker(result)\n", |
||||
" yield result, image\n", |
||||
" else: \n", |
||||
" reply = response.choices[0].message.content\n", |
||||
" chunk_size=30\n", |
||||
" for i in range(0, len(reply), chunk_size):\n", |
||||
" yield reply[:i + chunk_size], None\n", |
||||
" time.sleep(0.05)\n", |
||||
" talker(reply)\n", |
||||
" #image= artist(\"No such event\")\n", |
||||
" yield reply, None\n", |
||||
" except Exception as e:\n", |
||||
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", |
||||
" print(e)\n", |
||||
" yield error_message, None" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "30fa3de9-5b55-4bb6-93ea-a13fc09d38c1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat_gemini(history):\n", |
||||
" print(f\"\\nhistroy is {history}\\n\")\n", |
||||
" history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format\n", |
||||
" else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is\n", |
||||
" else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc\n", |
||||
" \n", |
||||
" print(f\"\\nhistroy_gemini is {history_gemini}\\n\")\n", |
||||
" model = genai.GenerativeModel(\n", |
||||
" model_name=GOOGLE_MODEL,\n", |
||||
" system_instruction=system_message\n", |
||||
" )\n", |
||||
" response = model.generate_content(\n", |
||||
" contents = history_gemini,\n", |
||||
" #contents = contents,\n", |
||||
" tools = [{\n", |
||||
" 'function_declarations': gemini_event_details,\n", |
||||
" }],\n", |
||||
" )\n", |
||||
" #print(f\"response is {response}\")\n", |
||||
"\n", |
||||
" image = None\n", |
||||
" try:\n", |
||||
" # Check if the model wants to use a tool\n", |
||||
" if response.candidates[0].content.parts[0].function_call:\n", |
||||
" function_call = response.candidates[0].content.parts[0].function_call\n", |
||||
" event_text = function_call.args.get(\"event_text\")\n", |
||||
" image = artist(event_text)\n", |
||||
" tool_result = handle_tool_call(event_text)\n", |
||||
" \n", |
||||
" print(f\"\\ntool_result is {tool_result}\\n\")\n", |
||||
" stream = model.generate_content(\n", |
||||
" \"Based on this information `\" + tool_result + \"`, extract the details of the event and provide the event details to the user\",\n", |
||||
" stream=True \n", |
||||
" )\n", |
||||
" #print(f\"\\nSecond response is {stream}\\n\")\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.candidates[0].content.parts[0].text or \"\"\n", |
||||
" #print(f\"REsult is \\n{result}\\n\")\n", |
||||
" yield result, None\n", |
||||
" talker(result) \n", |
||||
" yield result, image\n", |
||||
" #print(f\"REsult is \\n{result}\\n\")\n", |
||||
" else: \n", |
||||
" reply = response.text\n", |
||||
" chunk_size=30\n", |
||||
" for i in range(0, len(reply), chunk_size):\n", |
||||
" yield reply[:i + chunk_size], None\n", |
||||
" time.sleep(0.05)\n", |
||||
" talker(reply)\n", |
||||
" #image= artist(\"No such event\")\n", |
||||
" yield reply, None\n", |
||||
" \n", |
||||
" except Exception as e:\n", |
||||
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", |
||||
" print(e)\n", |
||||
" yield error_message, None\n", |
||||
" \n", |
||||
"\n", |
||||
" \n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "570fffb2-a054-4217-89ae-8b6f4630e383", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_and_process_model_responses(fn_name, chatbot):#, response, image):\n", |
||||
" response = \"\"\n", |
||||
" image = None\n", |
||||
" for response, image in fn_name(chatbot):\n", |
||||
" if chatbot and chatbot[-1][\"role\"] == \"assistant\": \n", |
||||
" chatbot[-1][\"content\"] = response # Update the last message\n", |
||||
" else:\n", |
||||
" chatbot.append({\"role\": \"assistant\", \"content\": response}) # First assistant message\n", |
||||
" #print(chatbot)\n", |
||||
" yield chatbot, image # Stream updated history to UI\n", |
||||
" \n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "32a6ccce-44fa-49a7-bd1a-08c70002771c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def handle_tool_call(event_text):\n", |
||||
" print(f\"event text is {event_text}\")\n", |
||||
" event_found = get_event_details(event_text)\n", |
||||
" print(f\"event_found is {event_found}\")\n", |
||||
" \n", |
||||
" if event_found:\n", |
||||
" response = json.dumps({\"name\": event_found['name'],\"description\": event_found['description'], \"when\": event_found['date_time'], \"where\": event_found['location']})\n", |
||||
" else: \n", |
||||
" response = json.dumps({\"event\": f\"Sorry, there is no schedule currently for {event_text}\"})\n", |
||||
" return response \n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4eaaaf9e-64b9-4d0b-9931-388cee8ea21d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def process_chosen_model(chatbot, model):\n", |
||||
" if model == 'GPT':\n", |
||||
" for chatbot, image in call_and_process_model_responses(chat_gpt, chatbot):\n", |
||||
" yield chatbot, image\n", |
||||
" elif model == 'Claude': \n", |
||||
" for chatbot, image in call_and_process_model_responses(chat_claude, chatbot):\n", |
||||
" yield chatbot, image\n", |
||||
" else:\n", |
||||
" #for Gemini, the content is to be replaced with parts.\n", |
||||
" for chatbot, image in call_and_process_model_responses(chat_gemini, chatbot):\n", |
||||
" yield chatbot, image\n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "627f6d49-5376-4f1d-8071-f2e96fd6e78b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# More involved Gradio code as we're not using the preset Chat interface!\n", |
||||
"# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", |
||||
"\n", |
||||
"with gr.Blocks(css=\"\"\"\n", |
||||
" select.gr-box { \n", |
||||
" appearance: auto !important; \n", |
||||
" -webkit-appearance: auto !important; \n", |
||||
" }\n", |
||||
"\"\"\") as ui:\n", |
||||
" with gr.Row():\n", |
||||
" gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio! An Event Management Assistant</h1>\") # Added title\n", |
||||
" with gr.Row():\n", |
||||
" # with gr.Column(scale=3): #Acts as a spacer on the left\n", |
||||
" # pass\n", |
||||
" \n", |
||||
" with gr.Column(scale=0):\n", |
||||
" model = gr.Dropdown(\n", |
||||
" choices=[\"GPT\", \"Claude\", \"Gemini\"], \n", |
||||
" label=\"Select model\", \n", |
||||
" value=\"GPT\",\n", |
||||
" interactive=True,\n", |
||||
" container=True # Applying the CSS class\n", |
||||
" )\n", |
||||
" # with gr.Column(scale=-54, min_width=200):\n", |
||||
" # gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio</h1>\") # Added title\n", |
||||
" # pass #Acts as a spacer on the right\n", |
||||
" with gr.Row():\n", |
||||
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n", |
||||
" image_output = gr.Image(height=500)\n", |
||||
" with gr.Row():\n", |
||||
" entry = gr.Textbox(label=\"Ask me \\\"when is pta meeting\\\", \\\"how about book fair\\\" and more... \")\n", |
||||
" with gr.Row():\n", |
||||
" clear = gr.Button(\"Clear\", min_width=150)\n", |
||||
" #message=None\n", |
||||
"\n", |
||||
" def do_entry(message, history):\n", |
||||
" history += [{\"role\":\"user\", \"content\":message}]\n", |
||||
" return \"\", history\n", |
||||
" \n", |
||||
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", |
||||
" process_chosen_model, inputs=[chatbot, model], outputs=[chatbot, image_output]\n", |
||||
" )\n", |
||||
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", |
||||
"\n", |
||||
"ui.launch(inbrowser=True)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,227 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6aa646e3-7a57-461a-b69a-073179effa18", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Additional End of week Exercise - week 2\n", |
||||
"\n", |
||||
"This includes \n", |
||||
"- Gradio UI\n", |
||||
"- use of the system prompt to add expertise\n", |
||||
"- audio input so you can talk to it\n", |
||||
"- respond with audio" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "72f3dca4-b052-4e9f-90c8-f42e667c165c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"import gradio as gr\n", |
||||
"import json" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "23570b9f-8c7a-4cc7-b809-3505334b60a7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"openai = OpenAI()\n", |
||||
"MODEL = 'gpt-4o-mini'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d379178a-8672-4e6f-a380-ad8d85f5c64e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"\"\"You are a personal study tutor, designed to provide clear, yet brief and succint answers to \n", |
||||
"students that ask you questions. The topics are related to data science, computer science \n", |
||||
"and technology in general, so you are allowed to use a moderate level of jargon. Explain in \n", |
||||
"simple terminology, so a student can easily understand. \n", |
||||
"\n", |
||||
"You may also be asked about prices for special courses.In this case, respond that you have no such\n", |
||||
"data available. \n", |
||||
"\n", |
||||
"\"\"\"\n", |
||||
"# Use a tabular format where possible \n", |
||||
"# for ease of information flow " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4745d439-c66e-4e5c-b5d4-9f0ba97aefdc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
"\n", |
||||
" reply = response.choices[0].message.content\n", |
||||
" history += [{\"role\":\"assistant\", \"content\":reply}]\n", |
||||
"\n", |
||||
" # Comment out or delete the next line if you'd rather skip Audio for now..\n", |
||||
" talker(reply)\n", |
||||
" \n", |
||||
" return history" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a8b31799-df86-4151-98ea-66ef50fe767e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install openai-whisper" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9f5b8e51-2833-44be-a4f4-63c4683f2b6e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import whisper\n", |
||||
"\n", |
||||
"def transcribe_audio(audio):\n", |
||||
" if audio is None:\n", |
||||
" return \"No audio received.\"\n", |
||||
" \n", |
||||
" model = whisper.load_model(\"base\") # You can use \"tiny\", \"small\", etc.\n", |
||||
" result = model.transcribe(audio)\n", |
||||
" \n", |
||||
" return result[\"text\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e55f8e43-2da1-4f2a-bcd4-3fffa830db48", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import base64\n", |
||||
"from io import BytesIO\n", |
||||
"from PIL import Image\n", |
||||
"from IPython.display import Audio, display\n", |
||||
"\n", |
||||
"def talker(message):\n", |
||||
" response = openai.audio.speech.create(\n", |
||||
" model=\"tts-1\",\n", |
||||
" voice=\"onyx\",\n", |
||||
" input=message)\n", |
||||
"\n", |
||||
" audio_stream = BytesIO(response.content)\n", |
||||
" output_filename = \"output_audio.mp3\"\n", |
||||
" with open(output_filename, \"wb\") as f:\n", |
||||
" f.write(audio_stream.read())\n", |
||||
"\n", |
||||
" # Play the generated audio\n", |
||||
" display(Audio(output_filename, autoplay=True))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cb3107a7-bfdc-4255-825f-bfabcf458c0c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# More involved Gradio code as we're not using the preset Chat interface!\n", |
||||
"# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", |
||||
"\n", |
||||
"with gr.Blocks() as ui:\n", |
||||
" with gr.Row():\n", |
||||
" chatbot = gr.Chatbot(height=400,type=\"messages\")\n", |
||||
" with gr.Row():\n", |
||||
" entry = gr.Textbox(label=\"Chat with our StudyAI Assistant:\")\n", |
||||
" # with gr.Row():\n", |
||||
" # entry = gr.Textbox(label=\"Speak or Type:\", placeholder=\"Speak your question...\", interactive=True, microphone=True)\n", |
||||
" with gr.Row():\n", |
||||
" audio_input = gr.Audio(type=\"filepath\", label=\"Speak your question\")\n", |
||||
" with gr.Row():\n", |
||||
" clear = gr.Button(\"Clear\")\n", |
||||
"\n", |
||||
" def do_entry(message, history):\n", |
||||
" history += [{\"role\":\"user\", \"content\":message}]\n", |
||||
" return \"\", history\n", |
||||
"\n", |
||||
" def handle_audio(audio, history):\n", |
||||
" text = transcribe_audio(audio)\n", |
||||
" history += [{\"role\": \"user\", \"content\": text}]\n", |
||||
" return \"\", history\n", |
||||
"\n", |
||||
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", |
||||
" chat, inputs=[chatbot], outputs=[chatbot]\n", |
||||
" )\n", |
||||
"\n", |
||||
" audio_input.change(handle_audio, inputs=[audio_input, chatbot], outputs=[entry, chatbot]).then(\n", |
||||
" chat, inputs=[chatbot], outputs=[chatbot]\n", |
||||
" )\n", |
||||
" \n", |
||||
" clear.click(lambda: [], inputs=None, outputs=chatbot, queue=False)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "73e0a776-d43e-4b04-a37f-a27d3714cf47", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ui.launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bcd45503-d314-4b28-a41c-4dbb87059188", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,129 @@
|
||||
import gradio as gr |
||||
import requests |
||||
import json |
||||
from json_handlers import SettingsHandler, LanguagesHandler |
||||
from ollama_utils import get_ollama_response |
||||
|
||||
|
||||
class GradioUI: |
||||
def __init__(self, models: list, settings: SettingsHandler, languages: LanguagesHandler): |
||||
self.models = models |
||||
self.settings = settings |
||||
self.languages = languages |
||||
|
||||
self.langs = self.languages.get_supported_languages() |
||||
|
||||
def _translate_callback(self, text, model, translte_from, translte_to): |
||||
model_options = self.settings.get_advanced_settings() |
||||
|
||||
full_response = "" |
||||
chunck_response = get_ollama_response(model, text, translte_from, translte_to, model_options) |
||||
for chunck in chunck_response: |
||||
full_response += chunck |
||||
yield full_response |
||||
|
||||
def _temp_setting_callback(self, temp_dropdown_val): |
||||
self.settings.update_advanced_settings_param("temperature", temp_dropdown_val) |
||||
|
||||
def _top_k_setting_callback(self, top_k_dropdown_val): |
||||
self.settings.update_advanced_settings_param("top_k", top_k_dropdown_val) |
||||
|
||||
def _top_p_setting_callback(self, top_p_dropdown_val): |
||||
self.settings.update_advanced_settings_param("top_p", top_p_dropdown_val) |
||||
|
||||
def _reset_to_default_callback(self): |
||||
temperature = 0.0 |
||||
top_k = 40.0 |
||||
top_p = 0.9 |
||||
default_settings = { |
||||
"temperature": temperature, |
||||
"top_k": top_k, |
||||
"top_p": top_p |
||||
} |
||||
self.settings.update_advanced_settings(default_settings) |
||||
return temperature, top_k, top_p |
||||
|
||||
def build_and_launch(self): |
||||
with gr.Blocks() as gui: |
||||
gr.Markdown("# LLM Translator") |
||||
with gr.Tab("Translate"): |
||||
with gr.Row(): |
||||
model_dropdown = gr.Dropdown( |
||||
label="Model", |
||||
info="Choose LLM Model", |
||||
choices=self.models |
||||
) |
||||
with gr.Group(): |
||||
with gr.Row(): |
||||
translte_from = gr.Dropdown( |
||||
value=self.langs[0], |
||||
show_label=False, |
||||
choices=self.langs, |
||||
interactive=True |
||||
) |
||||
translte_to = gr.Dropdown( |
||||
value=self.langs[1], |
||||
show_label=False, |
||||
choices=self.langs, |
||||
interactive=True |
||||
) |
||||
with gr.Row(): |
||||
translate_input = gr.Textbox(label="Your Input", lines=15, max_lines=15) |
||||
translate_output = gr.Textbox(label="Translated", lines=15, max_lines=15) |
||||
|
||||
btn = gr.Button("Translate", variant="primary") |
||||
btn.click( |
||||
fn=self._translate_callback, |
||||
inputs=[translate_input, model_dropdown, translte_from, translte_to], |
||||
outputs=translate_output |
||||
) |
||||
|
||||
with gr.Tab("Advanced Settings"): |
||||
temp_dropdown = gr.Number( |
||||
value=self.settings.get_advanced_setting_param("temperature"), |
||||
label="Temperature", |
||||
info="This parameter control how creative the model is\n0 means no creativity\n1 means very creative", |
||||
minimum=0, |
||||
maximum=1, |
||||
step=0.1, |
||||
interactive=True |
||||
) |
||||
|
||||
gr.Markdown() # Used only for spacing |
||||
|
||||
top_k_dropdown = gr.Number( |
||||
value=self.settings.get_advanced_setting_param("top_k"), |
||||
label="Top K", |
||||
info="A higher value (e.g. 100) will give more diverse answers\nwhile a lower value (e.g. 10) will be more conservative.", |
||||
minimum=1, |
||||
maximum=200, |
||||
step=1, |
||||
interactive=True |
||||
) |
||||
|
||||
gr.Markdown() # Used only for spacing |
||||
|
||||
top_p_dropdown = gr.Number( |
||||
value=self.settings.get_advanced_setting_param("top_p"), |
||||
label="Top P", |
||||
info="A higher value (e.g., 0.95) will lead to more diverse answers\nwhile a lower value (e.g., 0.5) will be more conservative", |
||||
minimum=0.1, |
||||
maximum=1.0, |
||||
step=0.1, |
||||
interactive=True |
||||
) |
||||
|
||||
gr.Markdown() # Used only for spacing |
||||
|
||||
reset_btn = gr.Button("Reset to Default") |
||||
reset_btn.click( |
||||
fn=self._reset_to_default_callback, |
||||
outputs=[temp_dropdown, top_k_dropdown, top_p_dropdown] |
||||
) |
||||
|
||||
temp_dropdown.change(self._temp_setting_callback, temp_dropdown) |
||||
top_k_dropdown.change(self._top_k_setting_callback, top_k_dropdown) |
||||
top_p_dropdown.change(self._top_p_setting_callback, top_p_dropdown) |
||||
|
||||
gui.launch() |
||||
|
@ -0,0 +1,60 @@
|
||||
import json |
||||
|
||||
|
||||
class SettingsHandler: |
||||
def __init__(self, json_filename): |
||||
self.json_filename = json_filename |
||||
self.advanced_settings = self.load_current_settings() |
||||
|
||||
def load_current_settings(self) -> dict: |
||||
with open(self.json_filename, "r") as file: |
||||
settings_dict = json.load(file) |
||||
|
||||
advanced_settings = settings_dict["Advanced Settings"] |
||||
|
||||
return advanced_settings |
||||
|
||||
def update_advanced_settings(self, updated_advanced_settings: dict): |
||||
new_dict = { |
||||
"Advanced Settings": updated_advanced_settings |
||||
} |
||||
|
||||
print(new_dict) |
||||
|
||||
with open(self.json_filename, "w") as file: |
||||
json.dump(new_dict, file) |
||||
|
||||
self.advanced_settings = updated_advanced_settings |
||||
|
||||
def update_advanced_settings_param(self, key: str, new_val): |
||||
if self.get_advanced_setting_param(key) is not None: |
||||
update_advanced_settings_dict = self.advanced_settings |
||||
update_advanced_settings_dict[key] = new_val |
||||
self.update_advanced_settings(update_advanced_settings_dict) |
||||
|
||||
def get_advanced_settings(self): |
||||
return self.advanced_settings |
||||
|
||||
def get_advanced_setting_param(self, key: str): |
||||
return self.advanced_settings.get(key) |
||||
|
||||
|
||||
class LanguagesHandler: |
||||
def __init__(self, json_filename): |
||||
self.json_filename = json_filename |
||||
self.langs = self.load_languages() |
||||
|
||||
def load_languages(self) -> list: |
||||
with open(self.json_filename, "r") as file: |
||||
langs = json.load(file) |
||||
|
||||
if type(langs) != list: |
||||
raise RuntimeError("Languages must be provided as lists") |
||||
if len(langs) < 2: |
||||
raise RuntimeError("At least 2 languages must be supported") |
||||
|
||||
return langs |
||||
|
||||
def get_supported_languages(self): |
||||
return self.langs |
||||
|
@ -0,0 +1,6 @@
|
||||
[ |
||||
"German", |
||||
"English", |
||||
"Spanish", |
||||
"French" |
||||
] |
@ -0,0 +1,15 @@
|
||||
from json_handlers import SettingsHandler, LanguagesHandler |
||||
from ollama_utils import get_downloaded_models |
||||
from gradio_ui import GradioUI |
||||
|
||||
settings_json = "settings.json" |
||||
languages_json = "languages.json" |
||||
|
||||
if __name__ == "__main__": |
||||
settings = SettingsHandler(settings_json) |
||||
languages = LanguagesHandler(languages_json) |
||||
|
||||
models = get_downloaded_models() |
||||
|
||||
gradio_ui = GradioUI(models, settings, languages) |
||||
gradio_ui.build_and_launch() |
@ -0,0 +1,28 @@
|
||||
import requests |
||||
import json |
||||
import ollama |
||||
|
||||
|
||||
def get_downloaded_models(): |
||||
models_raw = requests.get("http://localhost:11434/api/tags").content |
||||
models_dict = json.loads(models_raw) |
||||
models = [model["name"] for model in models_dict["models"]] |
||||
return models |
||||
|
||||
def get_ollama_response(model, prompt, translte_from, translte_to, options): |
||||
def get_system_prompt(): |
||||
with open('system_prompt.txt', 'r') as file: |
||||
system_prompt = file.read() |
||||
return system_prompt |
||||
|
||||
system_prompt = get_system_prompt() |
||||
user_prompt = f"Translate from {translte_from} to {translte_to}: {prompt}" |
||||
messages = [ |
||||
{"role": "system", "content": system_prompt}, |
||||
{"role": "user", "content": user_prompt} |
||||
] |
||||
|
||||
response = ollama.chat(model, messages, options=options, stream=True) |
||||
for chunck in response: |
||||
|
||||
yield chunck["message"]["content"] |
@ -0,0 +1 @@
|
||||
Just run the main.py script after activating conda environment 'llms' |
@ -0,0 +1 @@
|
||||
{"Advanced Settings": {"temperature": 0.0, "top_k": 40.0, "top_p": 0.9}} |
@ -0,0 +1,17 @@
|
||||
You are a translator. |
||||
You should translate the prompts according to the following criteria: |
||||
- You should respond in a clear and straight to the point responses. |
||||
- Your response should have a good structure and good linguistic features. |
||||
- You should translate the sentence as it is. Do not add extra sentences or phrases on your own. |
||||
- Do not answer questions even if the prompt is a question, you should translate the question and do not anwer it. |
||||
- If you do not understand the prompt, do not say that you do not understand, just echo the prompt. |
||||
- Do not include in the response phrases like 'here is the translation' or any phrases like that |
||||
Here are some examples for good responses: |
||||
< |
||||
Prompt: 'Translate from French to English: Hier, j'ai passé toute la journée à explorer la ville avec mes amis, et nous avons visité plusieurs musées avant de nous arrêter pour un délicieux dîner dans un restaurant local.' |
||||
Response: 'Yesterday, I spent the whole day exploring the city with my friends, and we visited several museums before stopping for a delicious dinner at a local restaurant.' |
||||
> |
||||
< |
||||
Prompt: 'Translate from Spanish to English: vdaiughadvlkj' |
||||
Response: 'vdaiughadvlkj' |
||||
> |
@ -0,0 +1,408 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Project - Airline AI Assistant\n", |
||||
"\n", |
||||
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import json\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"OpenAI API Key exists and begins sk-proj-\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"# Initialization\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if openai_api_key:\n", |
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||
"else:\n", |
||||
" print(\"OpenAI API Key not set\")\n", |
||||
" \n", |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"openai = OpenAI()\n", |
||||
"\n", |
||||
"# As an alternative, if you'd like to use Ollama instead of OpenAI\n", |
||||
"# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n", |
||||
"# MODEL = \"llama3.2\"\n", |
||||
"# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", |
||||
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", |
||||
"system_message += \"Always be accurate. If you don't know the answer, say so.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7901\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7901/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 4, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", |
||||
"\n", |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
" return response.choices[0].message.content\n", |
||||
"\n", |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Tools\n", |
||||
"\n", |
||||
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n", |
||||
"\n", |
||||
"With tools, you can write a function, and have the LLM call that function as part of its response.\n", |
||||
"\n", |
||||
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n", |
||||
"\n", |
||||
"Well, kinda." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 85, |
||||
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's start by making a useful function\n", |
||||
"\n", |
||||
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", |
||||
"\n", |
||||
"def get_ticket_price(destination_city):\n", |
||||
" print(f\"Tool get_ticket_price called for {destination_city}\")\n", |
||||
" city = destination_city.lower()\n", |
||||
" return ticket_prices.get(city, \"Unknown\")\n", |
||||
"\n", |
||||
"def get_destinations():\n", |
||||
" destinations=ticket_prices.keys()\n", |
||||
" cities=\", \".join(destinations) \n", |
||||
" return cities" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 86, |
||||
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Tool get_ticket_price called for Berlin\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [ |
||||
"'london, paris, tokyo, berlin'" |
||||
] |
||||
}, |
||||
"execution_count": 86, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
} |
||||
], |
||||
"source": [ |
||||
"get_ticket_price(\"Berlin\")\n", |
||||
"get_destinations()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# There's a particular dictionary structure that's required to describe our function:\n", |
||||
"\n", |
||||
"price_function = {\n", |
||||
" \"name\": \"get_ticket_price\",\n", |
||||
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", |
||||
" \"parameters\": {\n", |
||||
" \"type\": \"object\",\n", |
||||
" \"properties\": {\n", |
||||
" \"destination_city\": {\n", |
||||
" \"type\": \"string\",\n", |
||||
" \"description\": \"The city that the customer wants to travel to\",\n", |
||||
" },\n", |
||||
" },\n", |
||||
" \"required\": [\"destination_city\"],\n", |
||||
" \"additionalProperties\": False\n", |
||||
" }\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 29, |
||||
"id": "5842b7f1-e357-494c-9bd4-3aa9f9fd4332", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# There's a particular dictionary structure that's required to describe our function:\n", |
||||
"\n", |
||||
"destination_function = {\n", |
||||
" \"name\": \"get_destinations\",\n", |
||||
" \"description\": \"Get the destinations we serve. Call this whenever you need to know the destinations FlightAI flies to, for example when a customer asks 'Where do you fly to'\",\n", |
||||
" \"parameters\": {\n", |
||||
" },\n", |
||||
" \"additionalProperties\": False\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 30, |
||||
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# And this is included in a list of tools:\n", |
||||
"\n", |
||||
"tools = [{\"type\": \"function\", \"function\": price_function},\n", |
||||
" {\"type\": \"function\", \"function\": destination_function}]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Getting OpenAI to use our Tool\n", |
||||
"\n", |
||||
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n", |
||||
"\n", |
||||
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n", |
||||
"\n", |
||||
"Here's how the new chat function looks:" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5db52df0-cb48-4017-bae3-0014f5ca3a56", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", |
||||
"\n", |
||||
" if response.choices[0].finish_reason == \"tool_calls\":\n", |
||||
" message = response.choices[0].message\n", |
||||
" tool_name = message.tool_calls[0].function.name\n", |
||||
"\n", |
||||
" if tool_name == \"get_ticket_price\":\n", |
||||
" response, city = handle_tool_call(message)\n", |
||||
" elif tool_name == \"get_destinations\":\n", |
||||
" response = handle_tool_call_destination(message)\n", |
||||
"\n", |
||||
" messages.extend([message, response])\n", |
||||
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n", |
||||
"\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 91, |
||||
"id": "b0992986-ea09-4912-a076-8e5603ee631f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# We have to write that function handle_tool_call for price:\n", |
||||
"\n", |
||||
"def handle_tool_call_price(message):\n", |
||||
" tool_call = message.tool_calls[0]\n", |
||||
" arguments = json.loads(tool_call.function.arguments)\n", |
||||
" city = arguments.get('destination_city')\n", |
||||
" price = get_ticket_price(city)\n", |
||||
" response = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" return response, city" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 92, |
||||
"id": "4bbffdb0-5ab7-414e-8d2b-3d9367e64526", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# We have to write that function handle_tool_call for destinations:\n", |
||||
"\n", |
||||
"def handle_tool_call_destination(message):\n", |
||||
" tool_call = message.tool_calls[0]\n", |
||||
" destinations = get_destinations()\n", |
||||
" print(destinations)\n", |
||||
" response = {\n", |
||||
" \"role\": \"tool\",\n", |
||||
" \"content\": destinations,\n", |
||||
" \"tool_call_id\": tool_call.id\n", |
||||
" }\n", |
||||
" return response" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 93, |
||||
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"* Running on local URL: http://127.0.0.1:7928\n", |
||||
"\n", |
||||
"To create a public link, set `share=True` in `launch()`.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/html": [ |
||||
"<div><iframe src=\"http://127.0.0.1:7928/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"execution_count": 93, |
||||
"metadata": {}, |
||||
"output_type": "execute_result" |
||||
}, |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Tool get_ticket_price called for Paris\n", |
||||
"Tool get_ticket_price called for Timbuktu\n", |
||||
"london, paris, tokyo, berlin\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "243c156d-86c3-4d0a-8119-d0a532daa5cc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,167 @@
|
||||
{ |
||||
"nbformat": 4, |
||||
"nbformat_minor": 0, |
||||
"metadata": { |
||||
"colab": { |
||||
"provenance": [] |
||||
}, |
||||
"kernelspec": { |
||||
"name": "python3", |
||||
"display_name": "Python 3" |
||||
}, |
||||
"language_info": { |
||||
"name": "python" |
||||
} |
||||
}, |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"source": [ |
||||
"Import libraries as needed & keep your gemini api key ready" |
||||
], |
||||
"metadata": { |
||||
"id": "2UAcHYzT6ikw" |
||||
} |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"#!pip install gradio" |
||||
], |
||||
"metadata": { |
||||
"id": "XW0IY4xK6JZ1" |
||||
}, |
||||
"execution_count": 14, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"metadata": { |
||||
"id": "dwoPNMMP4ZSh" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"from google import genai\n", |
||||
"from google.genai import types\n", |
||||
"from google.colab import userdata\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"def get_trip_itinerary(budget: int) -> str:\n", |
||||
" \"\"\"\n", |
||||
" Returns a trip itinerary based on the given budget.\n", |
||||
" \"\"\"\n", |
||||
" itinerary_dict: Dict[int, str] = {\n", |
||||
" 500: \"Paris: 3-day budget trip covering Eiffel Tower, Louvre, and Seine River Cruise.\",\n", |
||||
" 1000: \"Tokyo: 5-day adventure covering Shibuya, Akihabara, Mount Fuji day trip.\",\n", |
||||
" 2000: \"New York: 7-day luxury stay covering Times Square, Broadway show, and helicopter tour.\",\n", |
||||
" 3000: \"Dubai: 7-day ultra-luxury trip with Burj Khalifa VIP tour, desert safari, and yacht cruise.\",\n", |
||||
" }\n", |
||||
"\n", |
||||
" return itinerary_dict.get(budget, \"No itinerary found for this budget. Try another amount!\")\n" |
||||
], |
||||
"metadata": { |
||||
"id": "cnYD07T24ueV" |
||||
}, |
||||
"execution_count": 3, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"from google.genai import types\n", |
||||
"\n", |
||||
"config = types.GenerateContentConfig(tools=[get_trip_itinerary])\n", |
||||
"\n", |
||||
"from google import genai\n", |
||||
"\n", |
||||
"client = genai.Client(api_key=userdata.get('gemini_api'))\n", |
||||
"\n", |
||||
"response = client.models.generate_content(\n", |
||||
" model='gemini-2.0-flash',\n", |
||||
" config=config,\n", |
||||
" contents='Based on the user budget suggest trip itinerary'\n", |
||||
")\n" |
||||
], |
||||
"metadata": { |
||||
"id": "3WRUXvD45VFC" |
||||
}, |
||||
"execution_count": 7, |
||||
"outputs": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [ |
||||
"import gradio as gr\n", |
||||
"\n", |
||||
"# Chat function using Gemini\n", |
||||
"chat = client.chats.create(model='gemini-2.0-flash', config=config)\n", |
||||
"\n", |
||||
"def chat_with_ai(user_input: str):\n", |
||||
" response = chat.send_message(user_input)\n", |
||||
" return response.text\n", |
||||
"\n", |
||||
"# Gradio Chat Interface\n", |
||||
"demo = gr.Interface(fn=chat_with_ai, inputs=\"text\", outputs=\"text\", title=\"AI Trip Planner\")\n", |
||||
"\n", |
||||
"demo.launch()\n" |
||||
], |
||||
"metadata": { |
||||
"colab": { |
||||
"base_uri": "https://localhost:8080/", |
||||
"height": 645 |
||||
}, |
||||
"id": "5fE700z96DHs", |
||||
"outputId": "3e35423c-8b2b-4868-8113-00d9d3a7a2ba" |
||||
}, |
||||
"execution_count": 13, |
||||
"outputs": [ |
||||
{ |
||||
"output_type": "stream", |
||||
"name": "stdout", |
||||
"text": [ |
||||
"Running Gradio in a Colab notebook requires sharing enabled. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n", |
||||
"\n", |
||||
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n", |
||||
"* Running on public URL: https://079a23f363400da700.gradio.live\n", |
||||
"\n", |
||||
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"output_type": "display_data", |
||||
"data": { |
||||
"text/plain": [ |
||||
"<IPython.core.display.HTML object>" |
||||
], |
||||
"text/html": [ |
||||
"<div><iframe src=\"https://079a23f363400da700.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
||||
] |
||||
}, |
||||
"metadata": {} |
||||
}, |
||||
{ |
||||
"output_type": "execute_result", |
||||
"data": { |
||||
"text/plain": [] |
||||
}, |
||||
"metadata": {}, |
||||
"execution_count": 13 |
||||
} |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"source": [], |
||||
"metadata": { |
||||
"id": "XC9zzq8X5u8m" |
||||
}, |
||||
"execution_count": null, |
||||
"outputs": [] |
||||
} |
||||
] |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,78 @@
|
||||
import gradio as gr |
||||
import torch |
||||
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer, AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline |
||||
from huggingface_hub import login |
||||
import os |
||||
|
||||
# Use the secret stored in the Hugging Face space |
||||
token = os.getenv("HF_TOKEN") |
||||
login(token=token) |
||||
|
||||
# Whisper Model Optimization |
||||
model = "openai/whisper-tiny" |
||||
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
||||
|
||||
processor = AutoProcessor.from_pretrained(model) |
||||
|
||||
|
||||
transcriber = pipeline( |
||||
"automatic-speech-recognition", |
||||
model=model, |
||||
tokenizer=processor.tokenizer, |
||||
feature_extractor=processor.feature_extractor, |
||||
device=0 if torch.cuda.is_available() else "cpu", |
||||
) |
||||
|
||||
|
||||
|
||||
# Function to Transcribe & Generate Minutes |
||||
def process_audio(audio_file): |
||||
if audio_file is None: |
||||
return "Error: No audio provided!" |
||||
|
||||
# Transcribe audio |
||||
transcript = transcriber(audio_file)["text"] |
||||
del transcriber |
||||
del processor |
||||
# LLaMA Model Optimization |
||||
LLAMA = "meta-llama/Llama-3.2-3B-Instruct" |
||||
llama_quant_config = BitsAndBytesConfig( |
||||
load_in_4bit=True, |
||||
bnb_4bit_use_double_quant=True, |
||||
bnb_4bit_compute_dtype=torch.bfloat16, |
||||
bnb_4bit_quant_type="nf4" |
||||
) |
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(LLAMA) |
||||
tokenizer.pad_token = tokenizer.eos_token |
||||
model = AutoModelForCausalLM.from_pretrained( |
||||
LLAMA, |
||||
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
||||
device_map="auto" |
||||
) |
||||
# Generate meeting minutes |
||||
system_message = "You are an assistant that produces minutes of meetings from transcripts, with summary, key discussion points, takeaways and action items with owners, in markdown." |
||||
user_prompt = f"Below is an extract transcript of a Denver council meeting. Please write minutes in markdown, including a summary with attendees, location and date; discussion points; takeaways; and action items with owners.\n{transcript}" |
||||
|
||||
messages = [ |
||||
{"role": "system", "content": system_message}, |
||||
{"role": "user", "content": user_prompt} |
||||
] |
||||
|
||||
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(DEVICE) |
||||
streamer = TextStreamer(tokenizer) |
||||
outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer) |
||||
|
||||
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
||||
|
||||
# Gradio Interface |
||||
interface = gr.Interface( |
||||
fn=process_audio, |
||||
inputs=gr.Audio(sources=["upload", "microphone"], type="filepath"), |
||||
outputs="text", |
||||
title="Meeting Minutes Generator", |
||||
description="Upload or record an audio file to get structured meeting minutes in Markdown.", |
||||
) |
||||
|
||||
# Launch App |
||||
interface.launch() |
@ -0,0 +1,186 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "6fb7858c-8ea7-4dea-95ea-f5d7d5210b9a", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"The following is **Meeting minutes Generator** by using **QWEN2** and **Openai Opensource model whisper for transcription**, check the following colab link to see the outputs\n", |
||||
"\n", |
||||
"https://colab.research.google.com/drive/1_pqFmQXjOYG9Se4Zov4blIGeoYX6ViTJ?usp=sharing\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2103adb0-51f3-4240-bc5d-e27b6103cd8a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import torch\n", |
||||
"from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "47dba08d-5829-417c-9c6c-bdb35ca846a6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"AUDIO_MODEL = \"openai/whisper-medium\"\n", |
||||
"speech_model = AutoModelForSpeechSeq2Seq.from_pretrained(AUDIO_MODEL, torch_dtype=torch.float16, low_cpu_mem_usage=True, use_safetensors=True)\n", |
||||
"speech_model.to('cuda')\n", |
||||
"processor = AutoProcessor.from_pretrained(AUDIO_MODEL)\n", |
||||
"\n", |
||||
"pipe = pipeline(\n", |
||||
" \"automatic-speech-recognition\",\n", |
||||
" model=speech_model,\n", |
||||
" tokenizer=processor.tokenizer,\n", |
||||
" feature_extractor=processor.feature_extractor,\n", |
||||
" torch_dtype=torch.float16,\n", |
||||
" device='cuda',\n", |
||||
" return_timestamps=True #important if audio is more than 30sec\n", |
||||
")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c35d6c76-01a9-495f-ad4e-84c98e320750", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"result = pipe(\"your-audio.mp3\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8fba2d46-b806-4bb3-b02d-e628343db986", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"transcription = result[\"text\"]\n", |
||||
"print(transcription)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1778c4db-d003-4fb9-a0d0-6cfa71e6208d", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## MODEL" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9eb579a7-b5de-4537-8ad9-e3117b24c2ff", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4c632023-9b37-4c0d-b43a-190aacbbd80d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"QWEN2 = \"Qwen/Qwen2-7B-Instruct\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "175814b9-81b2-4f75-bf40-9ef7cac492cd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"quant_config = BitsAndBytesConfig(\n", |
||||
" load_in_4bit=True,\n", |
||||
" bnb_4bit_use_double_quant=True,\n", |
||||
" bnb_4bit_compute_dtype=torch.bfloat16,\n", |
||||
" bnb_4bit_quant_type=\"nf4\"\n", |
||||
")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8aaa160e-7c2b-4080-b24a-995df4469edd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"tokenizer = AutoTokenizer.from_pretrained(QWEN2)\n", |
||||
"#tokenizer.pad_token = tokenizer.oes_token\n", |
||||
"inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\", add_generation_ptrompt=True).to(\"cuda\")\n", |
||||
"streamer = TextStreamer(tokenizer)\n", |
||||
"model = AutoModelForCausalLM.from_pretrained(QWEN2 , device_map=\"auto\", quantization_config=quant_config)\n", |
||||
"outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "517443aa-d230-4248-88aa-b06efd8ee3cd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"response = tokenizer.decode(outputs[0])" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "47562f76-fd35-4eb0-a399-8e8f1fa054c3", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## **For Markdown display**" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1f77fea1-0920-46e5-9230-d0e8b9f69353", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"from IPython.display import Markdown, display, update_display" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "35ac81e2-f960-4705-aaca-2385d8aa12d6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display(Markdown(response))" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.13.2" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,402 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "843542f7-220a-4408-9f8a-848696092434", |
||||
"metadata": { |
||||
"id": "843542f7-220a-4408-9f8a-848696092434" |
||||
}, |
||||
"source": [ |
||||
"# Build a Model to generate Synthetic Data" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a8816fc8-9517-46ff-af27-9fd0060840aa", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Code was written in Google Colab. " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "08a8d539-950b-4b58-abf4-f17bd832c0af", |
||||
"metadata": { |
||||
"id": "08a8d539-950b-4b58-abf4-f17bd832c0af" |
||||
}, |
||||
"source": [ |
||||
"## Imports" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "Ienu-NHTuUlT", |
||||
"metadata": { |
||||
"id": "Ienu-NHTuUlT" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install -q gradio" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c5e737cd-27b0-4a2e-9a0c-dbb30ce5cdbf", |
||||
"metadata": { |
||||
"id": "c5e737cd-27b0-4a2e-9a0c-dbb30ce5cdbf" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from google.colab import userdata\n", |
||||
"\n", |
||||
"from huggingface_hub import login\n", |
||||
"from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", |
||||
"import torch\n", |
||||
"\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "khD9X5-V_txO", |
||||
"metadata": { |
||||
"colab": { |
||||
"base_uri": "https://localhost:8080/" |
||||
}, |
||||
"id": "khD9X5-V_txO", |
||||
"outputId": "e2b8d8d0-0433-4b5f-c777-a675213a3f4c" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install -U bitsandbytes" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e47ead5f-b4e9-4e9f-acf9-be1ffb7fa6d7", |
||||
"metadata": { |
||||
"id": "e47ead5f-b4e9-4e9f-acf9-be1ffb7fa6d7" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"hf_token = userdata.get('HF_TOKEN')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ba104a9c-f298-4e90-9ceb-9d907e392d0d", |
||||
"metadata": { |
||||
"id": "ba104a9c-f298-4e90-9ceb-9d907e392d0d" |
||||
}, |
||||
"source": [ |
||||
"## Open Source Models from HF" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "11b1eb65-8ef5-4e6d-9176-cf1f70d07fb6", |
||||
"metadata": { |
||||
"id": "11b1eb65-8ef5-4e6d-9176-cf1f70d07fb6" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"deepseek_model = 'deepseek-ai/deepseek-llm-7b-chat'\n", |
||||
"llama_model = 'meta-llama/Meta-Llama-3.1-8B-Instruct'\n", |
||||
"qwen2 = 'Qwen/Qwen2-7B-Instruct'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "90fb1d2e-5d25-4d73-b629-8273ab71503c", |
||||
"metadata": { |
||||
"id": "90fb1d2e-5d25-4d73-b629-8273ab71503c" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"login(hf_token, add_to_git_credential=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "52948c01-8dc6-404b-a2c1-c87f9f6dbd64", |
||||
"metadata": { |
||||
"id": "52948c01-8dc6-404b-a2c1-c87f9f6dbd64" |
||||
}, |
||||
"source": [ |
||||
"## Creating Prompts" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "79374337-34fe-4002-b173-ac9b132a54d8", |
||||
"metadata": { |
||||
"id": "79374337-34fe-4002-b173-ac9b132a54d8" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are an expert in generating synthetic datasets. Your goal is to generate realistic datasets \\\n", |
||||
"based on a given business and its requirements from the user. You will also be given the desired datset format.\"\n", |
||||
"system_prompt += \"Do not repeat the instructions.\"\n", |
||||
"\n", |
||||
"user_prompt = (\"Please provide me a dataset for the following business.\"\n", |
||||
"\"For example:\\n\"\n", |
||||
"\"The Business: A retail store selling luxury watches.\\n\"\n", |
||||
"\"The Data Format: CSV.\\n\"\n", |
||||
"\"Output:\\n\"\n", |
||||
"\"Item,Price,Quantity,Brand,Sale Date\\n\"\n", |
||||
"\"Superocean II, 20.000$, 3, Breitling, 2025-04-08 \\n\"\n", |
||||
"\"If I don't provide you the necessary columns, please create the columns based on your knowledge about the given business\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "dcd90b5e-a7d2-4cdc-81ff-17974c5ff1fe", |
||||
"metadata": { |
||||
"id": "dcd90b5e-a7d2-4cdc-81ff-17974c5ff1fe" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def dataset_format(data_format, num_records):\n", |
||||
" format_message = ''\n", |
||||
" if data_format == 'CSV':\n", |
||||
" format_message = 'Please provide the dataset in a CSV format.'\n", |
||||
" elif data_format == 'JSON':\n", |
||||
" format_message = 'Please provide the dataset in a JSON format'\n", |
||||
" elif data_format == 'Tabular':\n", |
||||
" format_message = 'Please provide the dataset in a Tabular format'\n", |
||||
"\n", |
||||
" return format_message + f'Please generate {num_records} records'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "39243edb-3eba-46fd-a610-e474ed421b01", |
||||
"metadata": { |
||||
"id": "39243edb-3eba-46fd-a610-e474ed421b01" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def complete_user_prompt(user_input, data_format, num_records):\n", |
||||
" messages = [\n", |
||||
" {'role': 'system', 'content': system_prompt},\n", |
||||
" {'role': 'user', 'content': user_input + user_prompt + dataset_format(data_format, num_records)}\n", |
||||
" ]\n", |
||||
"\n", |
||||
" return messages" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1ac81127-b9cc-424b-8b38-8a8b09bcc226", |
||||
"metadata": { |
||||
"id": "1ac81127-b9cc-424b-8b38-8a8b09bcc226" |
||||
}, |
||||
"source": [ |
||||
"## Accessing the Models" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cc4aaab5-bde1-463b-b873-e8bd1a231dc1", |
||||
"metadata": { |
||||
"colab": { |
||||
"base_uri": "https://localhost:8080/" |
||||
}, |
||||
"id": "cc4aaab5-bde1-463b-b873-e8bd1a231dc1", |
||||
"outputId": "16c9420d-2c4a-4e57-f281-7c531b5145db" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(\"CUDA available:\", torch.cuda.is_available())\n", |
||||
"if torch.cuda.is_available():\n", |
||||
" print(\"GPU-Device:\", torch.cuda.get_device_name(torch.cuda.current_device()))\n", |
||||
"else:\n", |
||||
" print(\"No GPU found.\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6b8e648d-747f-4684-a20b-b8da550efc23", |
||||
"metadata": { |
||||
"id": "6b8e648d-747f-4684-a20b-b8da550efc23" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"quant_config = BitsAndBytesConfig(\n", |
||||
" load_in_4bit = True,\n", |
||||
" bnb_4bit_use_double_quant = False,\n", |
||||
" bnb_4bit_compute_dtype= torch.bfloat16,\n", |
||||
" bnb_4bit_quant_type= 'nf4'\n", |
||||
")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b3ae602f-0abf-420d-8c7b-1938cba92528", |
||||
"metadata": { |
||||
"id": "b3ae602f-0abf-420d-8c7b-1938cba92528" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def generate_model(model_id, messages):\n", |
||||
" try:\n", |
||||
" tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code = True)\n", |
||||
" inputs = tokenizer.apply_chat_template(messages, return_tensors = 'pt').to('cuda')\n", |
||||
" streamer = TextStreamer(tokenizer)\n", |
||||
" model = AutoModelForCausalLM.from_pretrained(model_id, device_map = 'auto', quantization_config = quant_config)\n", |
||||
" outputs = model.generate(inputs, max_new_tokens = 2000, streamer = streamer)\n", |
||||
" generated_text = tokenizer.decode(outputs[0], skip_special_tokens = True)\n", |
||||
" del tokenizer, streamer, model, inputs, outputs\n", |
||||
" return generated_text\n", |
||||
"\n", |
||||
" except Exception as e:\n", |
||||
" return f'Error during generation: {str(e)}'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "7c575c9e-4674-4eee-a9b9-c8d14ceed474", |
||||
"metadata": { |
||||
"id": "7c575c9e-4674-4eee-a9b9-c8d14ceed474" |
||||
}, |
||||
"source": [ |
||||
"## Generate Dataset" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d9c5963e-9f4e-4990-b744-b9ead03e623a", |
||||
"metadata": { |
||||
"id": "d9c5963e-9f4e-4990-b744-b9ead03e623a" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def generate_dataset(user_input, target_format, model_choice, num_records):\n", |
||||
" if model_choice == 'DeepSeek':\n", |
||||
" model_id = deepseek_model\n", |
||||
" elif model_choice == 'Llama-3.1-8B':\n", |
||||
" model_id = llama_model\n", |
||||
" elif model_choice == 'Qwen2':\n", |
||||
" model_id = qwen2\n", |
||||
"\n", |
||||
" messages = complete_user_prompt(user_input, target_format, num_records)\n", |
||||
" return generate_model(model_id, messages)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "ff574cfe-567f-4c6d-b944-fb756bf7ebca", |
||||
"metadata": { |
||||
"id": "ff574cfe-567f-4c6d-b944-fb756bf7ebca" |
||||
}, |
||||
"source": [ |
||||
"## Creating Gradio UI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "61d2b056-0d00-4b73-b083-024a8f374fef", |
||||
"metadata": { |
||||
"id": "61d2b056-0d00-4b73-b083-024a8f374fef" |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"with gr.Blocks(title = 'Synthetic Data Generator') as ui:\n", |
||||
" gr.Markdown('# Synthetic Data Generator')\n", |
||||
"\n", |
||||
" with gr.Row():\n", |
||||
" with gr.Column(min_width=600):\n", |
||||
" user_inputs = gr.Textbox(label = 'Enter your Business details and data requirements',\n", |
||||
" placeholder = 'Type here...', lines = 15)\n", |
||||
"\n", |
||||
" model_choice = gr.Dropdown(\n", |
||||
" ['DeepSeek', 'Llama-3.1-8B', 'Qwen2'],\n", |
||||
" label = 'Choose your Model',\n", |
||||
" value = 'DeepSeek'\n", |
||||
" )\n", |
||||
"\n", |
||||
" target_format = gr.Dropdown(\n", |
||||
" ['CSV', 'JSON', 'Tabular'],\n", |
||||
" label = 'Choose your Format',\n", |
||||
" value = 'CSV'\n", |
||||
" )\n", |
||||
" num_records = gr.Dropdown(\n", |
||||
" [50, 100, 150, 200],\n", |
||||
" label = 'Number of Records',\n", |
||||
" value = 50\n", |
||||
" )\n", |
||||
"\n", |
||||
" generate_button = gr.Button('Generate')\n", |
||||
"\n", |
||||
" with gr.Column():\n", |
||||
" output = gr.Textbox(label = 'Generated Synthetic Data',\n", |
||||
" lines = 30)\n", |
||||
"\n", |
||||
" generate_button.click(fn = generate_dataset, inputs = [user_inputs, target_format, model_choice, num_records],\n", |
||||
" outputs = output\n", |
||||
" )" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "958d9cbf-50ff-4c50-a305-18df6d5f5eda", |
||||
"metadata": { |
||||
"colab": { |
||||
"base_uri": "https://localhost:8080/", |
||||
"height": 626 |
||||
}, |
||||
"id": "958d9cbf-50ff-4c50-a305-18df6d5f5eda", |
||||
"outputId": "a6736641-85c3-4b6a-a28d-02ac5caf4562", |
||||
"scrolled": true |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ui.launch(inbrowser = True)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"accelerator": "GPU", |
||||
"colab": { |
||||
"gpuType": "T4", |
||||
"provenance": [] |
||||
}, |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,433 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "05432987-80bc-4aa5-8c05-277861e19307", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Adds docstrings/comments to code and generates code summary" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "e706f175-1e83-4d2c-8613-056b2e532624", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### Model Usage \n", |
||||
"\n", |
||||
"- **Open Source Models:**\n", |
||||
"\n", |
||||
" - Deployed via Endpoint: Hosted on a server and accessed remotely (Qwen 1.5-7)\n", |
||||
" - Run Locally on Machine: Executed directly on a local device (Ollama running Llama 3.2-1B)\n", |
||||
"\n", |
||||
"- **Closed Source Models:** \n", |
||||
" - Accessed through API key authentication: (OpenAI, Anthropic). \n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9ed667df-6660-4ba3-80c5-4c1c8f7e63f3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import io\n", |
||||
"import sys \n", |
||||
"import json\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import google.generativeai\n", |
||||
"import anthropic\n", |
||||
"import ollama\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"import gradio as gr\n", |
||||
"from huggingface_hub import login, InferenceClient\n", |
||||
"from transformers import AutoTokenizer, pipeline" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c9dd4bf1-48cf-44dc-9d04-0ec6e8189a3c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# environment\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')\n", |
||||
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY')\n", |
||||
"CODE_QWEN_URL = os.environ['CODE_QWEN_URL'] \n", |
||||
"BIGBIRD_PEGASUS_URL = os.environ['BIGBIRD_PEGASUS_URL']\n", |
||||
"HF_TOKEN = os.environ['HF_TOKEN']" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "71f671d6-50a7-43cf-9e04-52a159d67dab", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!ollama pull llama3.2:1b" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8e6f8f35-477d-4014-8fe9-874b5aee0061", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"claude = anthropic.Anthropic()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ae34b79c-425a-4f04-821a-8f1d9868b146", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"OPENAI_MODEL = \"gpt-4o-mini\"\n", |
||||
"CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n", |
||||
"LLAMA_MODEL = \"llama3.2:1b\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "80e6d920-3c94-48c4-afd8-518f415ab777", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n", |
||||
"bigbird_pegasus = \"google/bigbird-pegasus-large-arxiv\"\n", |
||||
"login(HF_TOKEN, add_to_git_credential=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "314cd8e3-2c10-4149-9818-4e6b0c05b871", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Uses Llama to Check Which Language the Code is Written In\n", |
||||
"system_message_comments = \"You are an assistant designed to add docstrings and helpful comments to code for documentation purposes.\"\n", |
||||
"system_message_comments += \"Respond back with properly formatted code, including docstrings and comments. Keep comments concise. \"\n", |
||||
"system_message_comments += \"Do not respond with greetings, or any such extra output\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "66fa09e4-1b79-4f53-9bb7-904d515b2f26", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message_summary = \"You are an assistant designed to summarise code for documentation purposes. You are not to display code again.\"\n", |
||||
"system_message_summary += \"Respond back with a properly crafted summary, mentioning key details regarding to the code, such as workflow, code language.\"\n", |
||||
"system_message_summary += \"Do not respond with greetings, or any such extra output. Do not respond in Markdown. Be thorough, keep explanation level at undergraduate level.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ea405820-f9d1-4cf1-b465-9ae5cd9016f6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def user_prompt_for(code):\n", |
||||
" user_prompt = \"Rewrite this code to include helpful comments and docstrings. \"\n", |
||||
" user_prompt += \"Respond only with code.\\n\"\n", |
||||
" user_prompt += code\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "26c9be56-1d4f-43e5-9bc4-eb5b76da8071", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def user_prompt_for_summary(code):\n", |
||||
" user_prompt = \"Return the summary of the code.\\n\"\n", |
||||
" user_prompt += code\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c0ac22cb-dc96-4ae1-b00d-2747572f6945", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def messages_for(code):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message_comments},\n", |
||||
" {\"role\":\"user\", \"content\" : user_prompt_for(code)}\n", |
||||
" ]\n", |
||||
" return messages" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "eae1a8b4-68a8-4cd5-849e-0ecabd166a0c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def messages_for_summary(code):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": system_message_summary},\n", |
||||
" {\"role\":\"user\", \"content\" : user_prompt_for_summary(code)}\n", |
||||
" ]\n", |
||||
" return messages" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5eb726dd-e09e-4011-8eb6-4d20f2830ff5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"func = \"\"\"\n", |
||||
"import time\n", |
||||
"\n", |
||||
"def calculate(iterations, param1, param2):\n", |
||||
" result = 1.0\n", |
||||
" for i in range(1, iterations+1):\n", |
||||
" j = i * param1 - param2\n", |
||||
" result -= (1/j)\n", |
||||
" j = i * param1 + param2\n", |
||||
" result += (1/j)\n", |
||||
" return result\n", |
||||
"\n", |
||||
"start_time = time.time()\n", |
||||
"result = calculate(100_000_000, 4, 1) * 4\n", |
||||
"end_time = time.time()\n", |
||||
"\n", |
||||
"print(f\"Result: {result:.12f}\")\n", |
||||
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f61943b2-c939-4910-a670-58abaf464bb6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_llama(code):\n", |
||||
" # commented code\n", |
||||
" messages = messages_for(code)\n", |
||||
" response1 = ollama.chat(model=LLAMA_MODEL, messages=messages)\n", |
||||
"\n", |
||||
" # summary\n", |
||||
" messages = messages_for_summary(code)\n", |
||||
" response2 = ollama.chat(model=LLAMA_MODEL, messages=messages)\n", |
||||
" \n", |
||||
" return response1['message']['content'],response2['message']['content']" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "696fb97e-807e-40ed-b0e1-beb82d1108a6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_claude(code):\n", |
||||
" # commented code\n", |
||||
" message1 = claude.messages.create(\n", |
||||
" model=CLAUDE_MODEL,\n", |
||||
" system=system_message_comments,\n", |
||||
" messages=([{\"role\": \"user\", \"content\":user_prompt_for(code)}]),\n", |
||||
" max_tokens=500\n", |
||||
" )\n", |
||||
"\n", |
||||
" # summary\n", |
||||
" message2 = claude.messages.create(\n", |
||||
" model=CLAUDE_MODEL,\n", |
||||
" system=system_message_summary,\n", |
||||
" messages=([{\"role\": \"user\", \"content\":user_prompt_for_summary(code)}]),\n", |
||||
" max_tokens=500\n", |
||||
" )\n", |
||||
" \n", |
||||
" return message1.content[0].text,message2.content[0].text" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4bf1db64-86fa-42a1-98dd-3df74607f8db", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_gpt(code):\n", |
||||
" # commented code\n", |
||||
" completion1 = openai.chat.completions.create(\n", |
||||
" model=OPENAI_MODEL,\n", |
||||
" messages=messages_for(code),\n", |
||||
" )\n", |
||||
"\n", |
||||
" #summary\n", |
||||
" completion2 = openai.chat.completions.create(\n", |
||||
" model=OPENAI_MODEL,\n", |
||||
" messages=messages_for_summary(code),\n", |
||||
" )\n", |
||||
" \n", |
||||
" return completion1.choices[0].message.content,completion2.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6863dc42-cbcd-4a95-8b0a-cfbcbfed0764", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_codeqwen(code):\n", |
||||
" # commented code\n", |
||||
" tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", |
||||
" messages = messages_for(code)\n", |
||||
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", |
||||
" client = InferenceClient(CODE_QWEN_URL, token=HF_TOKEN)\n", |
||||
" response1 = client.text_generation(text, details=True, max_new_tokens=1000)\n", |
||||
"\n", |
||||
" # summary\n", |
||||
" tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", |
||||
" messages = messages_for_summary(code)\n", |
||||
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", |
||||
" client = InferenceClient(CODE_QWEN_URL, token=HF_TOKEN)\n", |
||||
" response2 = client.text_generation(text, details=True, max_new_tokens=1000)\n", |
||||
" \n", |
||||
" return response1.generated_text ,response2.generated_text " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "06d05c02-45e4-47da-b70b-cf433dfaca4c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_docs(code,model):\n", |
||||
" if model == \"Llama\":\n", |
||||
" comments,summary = call_llama(code)\n", |
||||
" elif model == \"Claude\":\n", |
||||
" comments,summary = call_claude(code)\n", |
||||
" elif model == \"GPT\":\n", |
||||
" comments,summary = call_gpt(code)\n", |
||||
" elif model == \"CodeQwen\":\n", |
||||
" comments,summary = call_codeqwen(code)\n", |
||||
" else:\n", |
||||
" raise ValueError(\"Unknown Model\")\n", |
||||
" return comments,summary" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1b4ea289-5da9-4b0e-b4d4-f8f01e466839", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"css = \"\"\"\n", |
||||
".comments {background-color: #00599C;}\n", |
||||
".summary {background-color: #008B8B;}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "89ad7c7b-b881-45d3-aadc-d7206af578fb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"with gr.Blocks(css=css) as ui:\n", |
||||
" gr.Markdown(\"### Code Documentation and Formatting\")\n", |
||||
" with gr.Row():\n", |
||||
" code = gr.Textbox(label=\"Input Code: \", value=func, lines=10)\n", |
||||
" with gr.Row():\n", |
||||
" model = gr.Dropdown([\"GPT\",\"Claude\",\"Llama\",\"CodeQwen\"],label=\"Select model\",value=\"GPT\")\n", |
||||
" with gr.Row():\n", |
||||
" docs = gr.Button(\"Add Comments and Sumarise Code\")\n", |
||||
" with gr.Row():\n", |
||||
" commented_code = gr.Textbox(label= \"Formatted Code\", lines=10,elem_classes=[\"comments\"])\n", |
||||
" code_summary = gr.Textbox(label = \"Code Summary\", lines=10,elem_classes=[\"summary\"])\n", |
||||
" docs.click(create_docs,inputs=[code,model],outputs=[commented_code,code_summary])," |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1a9e3b1c-bfe6-4b71-aac8-fa36a491c157", |
||||
"metadata": { |
||||
"scrolled": true |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ui.launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ac895aa9-e044-4598-b715-d96d1c158656", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5a96877c-22b7-4ad5-b235-1cf8f8b200a1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(call_llama(func))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f11de1a2-52c0-41c7-ad88-01ef5f8bc628", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,346 @@
|
||||
import os |
||||
import io |
||||
import sys |
||||
import re |
||||
import subprocess |
||||
from dotenv import load_dotenv |
||||
from openai import OpenAI |
||||
from anthropic import Anthropic |
||||
import gradio as gr |
||||
|
||||
# Load environment variables and initialize APIs |
||||
load_dotenv(override=True) |
||||
openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
||||
anthropic = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) |
||||
MACHINE_SPEC = "MacbookPro, Apple M1 Chip" |
||||
|
||||
# Define global variables for HF integration |
||||
# For HF chat-based CodeQwen model |
||||
code_qwen = "Qwen/CodeQwen1.5-7B-Chat" |
||||
CODE_QWEN_URL = "" |
||||
|
||||
|
||||
def clean_code(code, target_language): |
||||
""" |
||||
Remove markdown code fences and stray language indicators. |
||||
Also apply language-specific replacements. |
||||
""" |
||||
raw_lines = code.splitlines() |
||||
cleaned_lines = [] |
||||
for line in raw_lines: |
||||
if "```" in line: |
||||
continue |
||||
if line.strip().lower() in ["c", "cpp", "c++", "rust"]: |
||||
continue |
||||
cleaned_lines.append(line) |
||||
cleaned = "\n".join(cleaned_lines) |
||||
if target_language == "C": |
||||
cleaned = cleaned.replace("1U << 32", "(1ULL << 32)") |
||||
if target_language == "Rust": |
||||
cleaned = process_rust_code(cleaned) |
||||
return cleaned |
||||
|
||||
# Conversion prompt functions (target language-aware) |
||||
def user_prompt_for(python_code, target_language): |
||||
return ( |
||||
f"Rewrite this Python code in {target_language} with the fastest possible implementation that produces identical output. " |
||||
f"Respond only with {target_language} code; do not explain your work. " |
||||
"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\n\n" |
||||
+ python_code |
||||
) |
||||
|
||||
def messages_for(python_code, target_language): |
||||
system_message = ( |
||||
f"You are an assistant that reimplements Python code in high performance {target_language} for an {MACHINE_SPEC}. " |
||||
f"Respond only with {target_language} code; use comments sparingly. " |
||||
f"The {target_language} response needs to produce an identical output in the fastest possible time." |
||||
) |
||||
return [ |
||||
{"role": "system", "content": system_message}, |
||||
{"role": "user", "content": user_prompt_for(python_code, target_language)}, |
||||
] |
||||
|
||||
def write_output(code, target_language): |
||||
"""Write the converted code to a file based on target language.""" |
||||
tag = target_language.lower() if target_language is not None else "" |
||||
if target_language == "C++": |
||||
filename = "optimized.cpp" |
||||
elif target_language == "C": |
||||
filename = "optimized.c" |
||||
elif target_language == "Rust": |
||||
filename = "optimized.rs" |
||||
else: |
||||
filename = "optimized.txt" |
||||
cleaned = code.replace(f"```{tag}\n", "").replace("```", "") |
||||
lines = cleaned.splitlines() |
||||
if lines and lines[0].strip().lower() in ["cpp", "c++", "c", "rust"]: |
||||
lines = lines[1:] |
||||
cleaned = "\n".join(lines) |
||||
cleaned = clean_code(cleaned, target_language) |
||||
with open(filename, "w") as f: |
||||
f.write(cleaned) |
||||
return filename |
||||
|
||||
# GPT integration for conversion |
||||
def stream_gpt(python_code, target_language, model_version): |
||||
stream = openai.chat.completions.create( |
||||
model=model_version, # Use selected GPT model version |
||||
messages=messages_for(python_code, target_language), |
||||
stream=True, |
||||
) |
||||
reply = "" |
||||
for chunk in stream: |
||||
if not hasattr(chunk, "choices") or not chunk.choices: |
||||
continue |
||||
fragment = chunk.choices[0].delta.content or "" |
||||
reply += fragment |
||||
yield reply.replace(f"```{target_language}\n", "").replace("```", "") |
||||
|
||||
# Claude integration for conversion |
||||
def stream_claude(python_code, target_language, model_version): |
||||
prompt = user_prompt_for(python_code, target_language) |
||||
response = anthropic.completions.create( |
||||
prompt=prompt, |
||||
model=model_version, |
||||
stream=True, |
||||
) |
||||
reply = "" |
||||
for chunk in response: |
||||
fragment = chunk.get("completion", "") |
||||
reply += fragment |
||||
yield reply.replace(f"```{target_language}\n", "").replace("```", "") |
||||
|
||||
# Hugging Face integration functions |
||||
def stream_code_qwen(python_code, target_language, model_version): |
||||
""" |
||||
HF chat-based model using CodeQwen. |
||||
""" |
||||
from transformers import AutoTokenizer |
||||
tokenizer = AutoTokenizer.from_pretrained(code_qwen) |
||||
messages = messages_for(python_code, target_language) |
||||
# Convert messages to chat format as expected by Qwen. |
||||
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
||||
from huggingface_hub import InferenceClient |
||||
client = InferenceClient(CODE_QWEN_URL, token=os.getenv("HF_TOKEN")) |
||||
stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000) |
||||
result = "" |
||||
for r in stream: |
||||
result += r.token.text |
||||
yield result.replace(f"```{target_language}\n", "").replace("```", "") |
||||
|
||||
def stream_huggingface(python_code, target_language, model_version): |
||||
""" |
||||
HF single-prompt model integration. |
||||
""" |
||||
prompt = user_prompt_for(python_code, target_language) |
||||
from huggingface_hub import InferenceClient |
||||
client = InferenceClient(model_name=model_version, token=os.getenv("HF_TOKEN")) |
||||
stream = client.text_generation(prompt, stream=True, details=True, max_new_tokens=3000) |
||||
reply = "" |
||||
for chunk in stream: |
||||
reply += chunk.token.text |
||||
yield reply.replace(f"```{target_language}\n", "").replace("```", "") |
||||
|
||||
|
||||
def optimize(python_code, combined_model, target_language): |
||||
""" |
||||
combined_model is a string like "GPT: gpt-4o", "CLAUDE: claude-3-5-sonnet-20240620" or "HF: model_name" |
||||
""" |
||||
provider, model_version = [x.strip() for x in combined_model.split(":")] |
||||
if provider == "GPT": |
||||
for partial in stream_gpt(python_code, target_language, model_version): |
||||
yield partial |
||||
elif provider == "CLAUDE": |
||||
for partial in stream_claude(python_code, target_language, model_version): |
||||
yield partial |
||||
elif provider == "HF": |
||||
if "CodeQwen" in model_version: |
||||
for partial in stream_code_qwen(python_code, target_language, model_version): |
||||
yield partial |
||||
else: |
||||
for partial in stream_huggingface(python_code, target_language, model_version): |
||||
yield partial |
||||
else: |
||||
raise ValueError("Unknown model provider") |
||||
|
||||
def execute_python(code): |
||||
"""Execute Python code and return its output.""" |
||||
env = {} # Dedicated global namespace |
||||
try: |
||||
output = io.StringIO() |
||||
sys.stdout = output |
||||
exec(code, env) |
||||
finally: |
||||
sys.stdout = sys.__stdout__ |
||||
return output.getvalue() |
||||
|
||||
def execute_cpp(code): |
||||
write_output(code, target_language="C++") |
||||
try: |
||||
compile_cmd = [ |
||||
"clang++", "-Ofast", "-std=c++17", "-march=armv8.5-a", |
||||
"-mtune=apple-m1", "-mcpu=apple-m1", "-o", "optimized", "optimized.cpp" |
||||
] |
||||
subprocess.run(compile_cmd, check=True, text=True, capture_output=True) |
||||
run_cmd = ["./optimized"] |
||||
run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True) |
||||
return run_result.stdout |
||||
except subprocess.CalledProcessError as e: |
||||
return f"Error:\n{e.stderr}" |
||||
|
||||
def execute_c(code): |
||||
cleaned_code = clean_code(code, "C") |
||||
with open("optimized.c", "w") as f: |
||||
f.write(cleaned_code) |
||||
try: |
||||
compile_cmd = ["clang", "-O2", "-std=c11", "-o", "optimized_c", "optimized.c"] |
||||
subprocess.run(compile_cmd, check=True, text=True, capture_output=True) |
||||
run_cmd = ["./optimized_c"] |
||||
run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True) |
||||
return run_result.stdout |
||||
except subprocess.CalledProcessError as e: |
||||
return f"Error:\n{e.stderr}" |
||||
|
||||
def process_rust_code(code): |
||||
code = code.replace("{:.6f}", "{:.6}") |
||||
code = re.sub( |
||||
r'(println!$begin:math:text$"Execution Time: \\{\\:\\.6\\} seconds", duration\\.as_secs_f64)(\\s*)$', |
||||
r'\\1())', |
||||
code, |
||||
flags=re.MULTILINE, |
||||
) |
||||
code = code.replace("max_val - min_val as u32 + 1", "((max_val - min_val + 1) as u32)") |
||||
code = code.replace("1 << 32", "1u64 << 32") |
||||
code = re.sub(r'($end:math:text$\s*as i64)\)', r'\1', code) |
||||
return code |
||||
|
||||
def execute_rust(code): |
||||
code = code.replace("```rust\n", "").replace("```", "") |
||||
lines = code.split('\n', 1) |
||||
if lines and lines[0].strip().lower() == "rust": |
||||
code = lines[1] if len(lines) > 1 else "" |
||||
code = process_rust_code(code) |
||||
with open("optimized.rs", "w") as f: |
||||
f.write(code) |
||||
try: |
||||
compile_cmd = ["rustc", "optimized.rs", "-O", "-o", "optimized_rust"] |
||||
subprocess.run(compile_cmd, check=True, text=True, capture_output=True) |
||||
run_cmd = ["./optimized_rust"] |
||||
run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True) |
||||
return run_result.stdout |
||||
except subprocess.CalledProcessError as e: |
||||
return f"Error:\n{e.stderr}" |
||||
|
||||
def execute_target_code(code, target_language): |
||||
"""Select the appropriate execution function based on target language.""" |
||||
if target_language == "C++": |
||||
return execute_cpp(code) |
||||
elif target_language == "C": |
||||
return execute_c(code) |
||||
elif target_language == "Rust": |
||||
return execute_rust(code) |
||||
else: |
||||
return "Unsupported language" |
||||
|
||||
# Gradio UI setup |
||||
css = """ |
||||
.python {background-color: #306998;} |
||||
.code {background-color: #050;} |
||||
""" |
||||
|
||||
def launch_ui(): |
||||
with gr.Blocks(css=css) as ui: |
||||
gr.Markdown("## Convert Python Code to C/C++/Rust") |
||||
with gr.Row(): |
||||
python_box = gr.Textbox(label="Python code:", value=PYTHON_HARD, lines=10) |
||||
converted_box = gr.Textbox(label="Converted Code:", lines=10) |
||||
with gr.Row(): |
||||
model_dropdown = gr.Dropdown( |
||||
["GPT: gpt-4o", "GPT: gpt-4o-mini", "CLAUDE: claude-3-5-sonnet-20240620", "CLAUDE: claude-3-haiku-20240307", "HF: CodeQwen1.5-7B-Chat", "HF: bigcode/starcoder"], |
||||
label="Select Model", |
||||
value="GPT: gpt-4o" |
||||
) |
||||
target_lang_dropdown = gr.Dropdown( |
||||
["C++", "C", "Rust"], |
||||
label="Select target language", |
||||
value="C++" |
||||
) |
||||
with gr.Row(): |
||||
convert_btn = gr.Button("Convert code") |
||||
with gr.Row(): |
||||
python_run_btn = gr.Button("Run Python") |
||||
run_converted_btn = gr.Button("Run Converted Code") |
||||
with gr.Row(): |
||||
python_out = gr.TextArea(label="Python result:", elem_classes=["python"]) |
||||
converted_out = gr.TextArea(label="Converted Code result:", elem_classes=["code"]) |
||||
convert_btn.click( |
||||
optimize, |
||||
inputs=[python_box, model_dropdown, target_lang_dropdown], |
||||
outputs=[converted_box], |
||||
) |
||||
python_run_btn.click(execute_python, inputs=[python_box], outputs=[python_out]) |
||||
run_converted_btn.click( |
||||
execute_target_code, |
||||
inputs=[converted_box, target_lang_dropdown], |
||||
outputs=[converted_out], |
||||
) |
||||
ui.launch() |
||||
|
||||
# Example Python code blocks |
||||
PYTHON_HARD = """ |
||||
# Support large number sizes |
||||
def lcg(seed, a=1664525, c=1013904223, m=2**32): |
||||
value = seed |
||||
while True: |
||||
value = (a * value + c) % m |
||||
yield value |
||||
def max_subarray_sum(n, seed, min_val, max_val): |
||||
lcg_gen = lcg(seed) |
||||
random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)] |
||||
max_sum = float('-inf') |
||||
for i in range(n): |
||||
current_sum = 0 |
||||
for j in range(i, n): |
||||
current_sum += random_numbers[j] |
||||
if current_sum > max_sum: |
||||
max_sum = current_sum |
||||
return max_sum |
||||
def total_max_subarray_sum(n, initial_seed, min_val, max_val): |
||||
total_sum = 0 |
||||
lcg_gen = lcg(initial_seed) |
||||
for _ in range(20): |
||||
seed = next(lcg_gen) |
||||
total_sum += max_subarray_sum(n, seed, min_val, max_val) |
||||
return total_sum |
||||
n = 10000 |
||||
initial_seed = 42 |
||||
min_val = -10 |
||||
max_val = 10 |
||||
import time |
||||
start_time = time.time() |
||||
result = total_max_subarray_sum(n, initial_seed, min_val, max_val) |
||||
end_time = time.time() |
||||
print("Total Maximum Subarray Sum (20 runs):", result) |
||||
print("Execution Time: {:.6f} seconds".format(end_time - start_time)) |
||||
""" |
||||
|
||||
if __name__ == "__main__": |
||||
import argparse |
||||
parser = argparse.ArgumentParser( |
||||
description="Single script with multiple executable sections and target language support" |
||||
) |
||||
parser.add_argument( |
||||
"--mode", |
||||
choices=["direct", "ui"], |
||||
default="ui", |
||||
help="Run direct conversion or launch Gradio UI", |
||||
) |
||||
args = parser.parse_args() |
||||
|
||||
if args.mode == "direct": |
||||
print("\nExecuting Python code (PYTHON_HARD)...") |
||||
exec(PYTHON_HARD) |
||||
for partial in optimize(PYTHON_HARD, "GPT: gpt-4o", "C++"): |
||||
print(partial, end="") |
||||
elif args.mode == "ui": |
||||
launch_ui() |
@ -0,0 +1,394 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "e9025a4a-b8ef-4901-b98e-753b756b028a", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Building a RAG chat without the langchain framework\n", |
||||
"## To understand more in detail what's going on\n", |
||||
"\n", |
||||
"The technical know-how comes from Ed Donner, obviously, as well as from Sakalya Mitra & Pradip Nichite on [this gem of a blog post](https://blog.futuresmart.ai/building-rag-applications-without-langchain-or-llamaindex) I found on futuresmart.ai" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1b7acfb5-8bf9-48b5-a219-46f1e3bfafc3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import gradio as gr\n", |
||||
"import re\n", |
||||
"from openai import OpenAI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "19af6b8b-be29-4086-a69f-5e2cdb867ede", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports for Chroma and plotly\n", |
||||
"\n", |
||||
"import chromadb\n", |
||||
"from chromadb.utils import embedding_functions\n", |
||||
"import numpy as np\n", |
||||
"from sklearn.manifold import TSNE\n", |
||||
"import plotly.graph_objects as go" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bc6d9ab4-816a-498c-a04c-c3838770d848", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"db_name = \"chroma_db\"\n", |
||||
"client = chromadb.PersistentClient(path=\"chroma_db\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a3715b81-eed0-4412-8c01-0623ed113657", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv()\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "3017e1dd-d0d5-4ef4-8c72-84517a927793", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### Making stuff at home: documents" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e83480a5-927b-4756-a978-520a56ceed85", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# items in documents are actually objects: Documents(metadata={...}, page_content=\"...\"), so we need a \"Document\" class\n", |
||||
"# btw all the quadruple-backslash madness here is due to Windows (there might be a more efficient way, still)\n", |
||||
"\n", |
||||
"class Document:\n", |
||||
" def __init__(self, metadata, page_content):\n", |
||||
" self.metadata = metadata\n", |
||||
" self.page_content = page_content\n", |
||||
"\n", |
||||
" def __repr__(self):\n", |
||||
" return f\"Document(metadata={self.metadata}, page_content={repr(self.page_content)})\"\n", |
||||
"\n", |
||||
"\n", |
||||
"documents = []\n", |
||||
"\n", |
||||
"def get_documents(path='.'):\n", |
||||
" for entry in os.listdir(path):\n", |
||||
" if len(re.findall(\"^\\.\", entry)) == 0:\n", |
||||
" full_path = os.path.join(path, entry)\n", |
||||
" if os.path.isdir(full_path):\n", |
||||
" get_documents(full_path)\n", |
||||
" else:\n", |
||||
" parent = re.sub(\"^\\.[\\\\\\\\].*[\\\\\\\\]\", \"\", os.path.dirname(full_path))\n", |
||||
" self = os.path.basename(full_path)\n", |
||||
" content = \"\"\n", |
||||
"\n", |
||||
" with open(full_path, mode=\"r\", encoding=\"utf-8\") as f:\n", |
||||
" content = f.read()\n", |
||||
" \n", |
||||
" doc = Document(metadata={\"source\": full_path, \"doc_type\": parent, \"self\": self}, page_content=content)\n", |
||||
" documents.append(doc)\n", |
||||
"\n", |
||||
"# where the knowledge collection lives\n", |
||||
"directory_path = r'.\\knowledge_collection'\n", |
||||
"get_documents(directory_path)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fd846bc0-54d0-4802-a18b-196c396a241c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### Making stuff at home: chunks" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "202b33e2-c3fe-424c-9c8e-a90e517add42", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"eos_pattern = re.compile(r\"((?<=[.!?;])[\\s]+)|([\\n\\r]+)\")\n", |
||||
"chunk_size = 1000\n", |
||||
"chunks = []" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a19a61ec-d204-4b87-9f05-88832d03fad6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"for doc in documents:\n", |
||||
"\n", |
||||
" sentence_ends = [end.start() for end in list(re.finditer(eos_pattern, doc.page_content)) if end.start() > chunk_size - 50]\n", |
||||
" start = 0\n", |
||||
" \n", |
||||
" if len(sentence_ends) == 0 and len(doc.page_content) > 5:\n", |
||||
" chunk = Document(metadata=doc.metadata, page_content=doc.page_content)\n", |
||||
" chunk.metadata['id'] = f\"{doc.metadata['source']}_chunk_\"\n", |
||||
" chunks.append(chunk)\n", |
||||
"\n", |
||||
" else: \n", |
||||
" for point in sentence_ends:\n", |
||||
" if point - start >= chunk_size - 50:\n", |
||||
" text = doc.page_content[start:point]\n", |
||||
" chunk = Document(metadata=doc.metadata, page_content=text)\n", |
||||
" chunk.metadata['id'] = f\"{doc.metadata['source']}_chunk_\"\n", |
||||
" chunks.append(chunk)\n", |
||||
" start = point\n", |
||||
" \n", |
||||
" # Add the remaining part of the text as the last chunk if it's big enough\n", |
||||
" if len(doc.page_content) - start > 5:\n", |
||||
" text = doc.page_content[start:]\n", |
||||
" chunk = Document(metadata=doc.metadata, page_content=text)\n", |
||||
" chunk.metadata['id'] = f\"{doc.metadata['source']}_chunk_\"\n", |
||||
" chunks.append(chunk)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "966ae50c-e0e5-403a-9465-8f26967f8922", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### Making stuff without a framework: embeddings" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b97391c0-e55f-4e08-b0cb-5e62fb119ae6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Configure sentence transformer embeddings\n", |
||||
"embeddings = embedding_functions.SentenceTransformerEmbeddingFunction(\n", |
||||
" model_name=\"all-MiniLM-L6-v2\"\n", |
||||
")\n", |
||||
"\n", |
||||
"collection_name = \"documents_collection\"\n", |
||||
"\n", |
||||
"try:\n", |
||||
" client.delete_collection(collection_name)\n", |
||||
"except ValueError:\n", |
||||
" print(f\"{collection_name} doesn't exist yet\")\n", |
||||
"\n", |
||||
"# Create collection\n", |
||||
"collection = client.get_or_create_collection(\n", |
||||
" name=collection_name,\n", |
||||
" embedding_function=embeddings\n", |
||||
")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5222dfec-8cf4-4e87-aeb8-33d0f3b3b5cb", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# adding our chunks to the \"collection\"\n", |
||||
"\n", |
||||
"for chunk in chunks:\n", |
||||
" index = chunks.index(chunk)\n", |
||||
" collection.add(\n", |
||||
" documents=chunk.page_content,\n", |
||||
" metadatas=chunk.metadata,\n", |
||||
" ids=chunk.metadata['id'] + f\"{index}\"\n", |
||||
" )" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5effcada-ee5f-4207-9fa6-1fc5604b068b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def semantic_search(collection, query: str, n_results: int = 4):\n", |
||||
" results = collection.query(\n", |
||||
" query_texts=[query],\n", |
||||
" n_results=n_results\n", |
||||
" )\n", |
||||
" return results" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "99f0a366-3dcb-4824-9f33-70e07af984d8", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Visualizing the Vector Store\n", |
||||
"\n", |
||||
"The results actually look just as good with `all-MiniLM-L6-v2`" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e12751ab-f102-4dc6-9c0f-313e5832b75f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Prework\n", |
||||
"\n", |
||||
"result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n", |
||||
"vectors = np.array(result['embeddings'])\n", |
||||
"documents = result['documents']\n", |
||||
"doc_types = [metadata['doc_type'] for metadata in result['metadatas']]\n", |
||||
"colors = [['blue', 'red', 'orange'][['languages', 'mountains', 'regions'].index(t)] for t in doc_types]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "422e3247-2de0-44ba-82bc-30b4f739da7e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Reduce the dimensionality of the vectors to 2D using t-SNE\n", |
||||
"# (t-distributed stochastic neighbor embedding)\n", |
||||
"\n", |
||||
"tsne = TSNE(n_components=2, random_state=42)\n", |
||||
"reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
"# Create the 2D scatter plot\n", |
||||
"fig = go.Figure(data=[go.Scatter(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
||||
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(doc_types, documents)],\n", |
||||
" hoverinfo='text'\n", |
||||
")])\n", |
||||
"\n", |
||||
"fig.update_layout(\n", |
||||
" title='2D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
||||
" width=800,\n", |
||||
" height=600,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
")\n", |
||||
"\n", |
||||
"fig.show()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "2cff9065-de3d-4e91-8aff-c7ad750a4334", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Comment: Relying on Gradio's history handling seems to be memory enough\n", |
||||
"##### If all you need is your favorite LLM with expertise in your knowlege collection" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "aebb676f-883e-4b2b-8420-13f2a8399e77", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are a helpful assistant for everything French. Give brief, accurate answers. \\\n", |
||||
"Do not provide any information that you haven't been asked for, even if you have lots of context. \\\n", |
||||
"If you haven't been provided with relevant context, say you don't know. Do not make anything up, only \\\n", |
||||
"provide answers that are based in the context you have been given. Do not comment on the provided context. \\\n", |
||||
"If the user doesn't ask for any information, engage in brief niceties and offer your expertise regarding France.\"\n", |
||||
"\n", |
||||
"history = [{\"role\": \"system\", \"content\": system_prompt}]\n", |
||||
"\n", |
||||
"def get_user_prompt(prompt):\n", |
||||
" # semantic search!!\n", |
||||
" context = semantic_search(collection, prompt)['documents'][0]\n", |
||||
"\n", |
||||
" if len(context) > 0:\n", |
||||
" prompt += f\"\\n\\n[AUTOMATIC SYSTEM CONTEXT ADDITION] Here is some context that might be useful for answering the question:\"\n", |
||||
"\n", |
||||
" for doc in context:\n", |
||||
" prompt += f\"\\n\\n{doc}\"\n", |
||||
" \n", |
||||
" user_prompt = {\"role\": \"user\", \"content\": prompt}\n", |
||||
"\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "23b70162-2c4f-443e-97c8-3e675304d307", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_gpt(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_prompt}] + history\n", |
||||
" messages.append(get_user_prompt(message))\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=messages,\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" result = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" result += chunk.choices[0].delta.content or \"\"\n", |
||||
" yield result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4ecf4a30-452d-4d41-aa60-fa62c8e2559b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Gradio\n", |
||||
"\n", |
||||
"gr.ChatInterface(fn=stream_gpt, type=\"messages\").launch(inbrowser=True)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,42 @@
|
||||
# Overview of Alsacien Language |
||||
|
||||
## Definition |
||||
Alsacien, also known as Alsatian or Alsatian German, is a variety of the Alemannic branch of the Germanic languages spoken predominantly in Alsace, France. |
||||
|
||||
## Geographic Distribution |
||||
- Primarily spoken in Alsace, a region in northeastern France. |
||||
- Communities of Alsacien speakers can also be found in neighboring regions of Germany and Switzerland. |
||||
|
||||
## Linguistic Classification |
||||
- **Language Family**: Indo-European |
||||
- **Subfamily**: Germanic |
||||
- **Group**: West Germanic |
||||
- **Branch**: High German |
||||
|
||||
## Speakers |
||||
- Estimates of native speakers range from 500,000 to 1 million, though use has declined due to factors like urbanization and language shift towards French. |
||||
|
||||
## Dialectal Variations |
||||
- Alsacien includes multiple dialects, which may vary significantly from one locality to another. |
||||
- Two main dialects: |
||||
- **Haut-Rhin** (Upper Rhine) |
||||
- **Bas-Rhin** (Lower Rhine) |
||||
|
||||
## Characteristics |
||||
- Strongly influenced by both French and standard German, leading to unique vocabulary and pronunciation. |
||||
- Grammar and syntax retain features of Middle High German. |
||||
|
||||
## Cultural Significance |
||||
- Acts as a marker of regional identity for the people of Alsace. |
||||
- Extensively used in local media, literature, and music, particularly folk traditions. |
||||
|
||||
## Status |
||||
- Considered a vulnerable language by UNESCO. |
||||
- Efforts are ongoing for revitalization, including teaching in schools and cultural associations promoting its use. |
||||
|
||||
## Related Languages |
||||
- Closely related to Swiss German and other Alemannic dialects. |
||||
- Influenced by and influences neighboring languages, particularly French. |
||||
|
||||
## Conclusion |
||||
Alsacien is a vital part of the cultural heritage of the Alsace region, with ongoing efforts aimed at preserving and promoting its use among younger generations. |
@ -0,0 +1,31 @@
|
||||
# Overview of the Bourguignon Language |
||||
|
||||
## General Information |
||||
- **Name**: Bourguignon |
||||
- **Region**: Primarily spoken in the Burgundy region of France |
||||
- **Language Family**: Romance languages |
||||
- **Classification**: It is part of the Langue d'oïl group, which also includes languages like French, Norman, and Picard. |
||||
|
||||
## Historical Context |
||||
- **Origin**: Derived from Vulgar Latin, Bourguignon developed in the medieval period and reflects the linguistic evolution of the region. |
||||
- **Influence**: Historically influenced by Old French, as well as regional dialects and neighboring languages. |
||||
|
||||
## Features |
||||
- **Dialects**: Bourguignon comprises several dialects, often differing significantly from one another. |
||||
- **Phonetics**: The phonetic system exhibits distinct sounds not found in Standard French. |
||||
- **Vocabulary**: Contains unique vocabulary and expressions that may not be understood by standard French speakers. |
||||
|
||||
## Current Status |
||||
- **Speaker Population**: The number of speakers has declined over the years, with estimates suggesting only a few thousand fluent speakers today. |
||||
- **Recognition**: Bourguignon is not an official language in France, but there are efforts to preserve and promote its use among local communities. |
||||
|
||||
## Cultural Significance |
||||
- **Folklore and Literature**: Bourguignon has a rich tradition of oral literature, including folk tales and songs that reflect the cultural heritage of Burgundy. |
||||
- **Festivals and Events**: Local festivals often include performances in Bourguignon, celebrating the language's place in regional identity. |
||||
|
||||
## Modern Efforts |
||||
- **Revitalization**: Initiatives to teach Bourguignon in schools and promote its use in cultural activities aim to preserve the language for future generations. |
||||
- **Media Presence**: Some local media, including radio stations and publications, feature Bourguignon, fostering a sense of community among speakers. |
||||
|
||||
## Conclusion |
||||
Bourguignon remains an important part of the cultural identity of the Burgundy region, reflecting the historical and linguistic diversity of France. Efforts to revive and sustain the language highlight its significance within the local heritage. |
@ -0,0 +1,33 @@
|
||||
# Overview of the Breton Language |
||||
|
||||
## General Information |
||||
- **Name**: Breton (Brezhoneg) |
||||
- **Language Family**: Celtic, part of the Brythonic branch |
||||
- **Region**: Brittany (Breizh), France |
||||
|
||||
## Historical Background |
||||
- **Origins**: Breton is derived from the Brythonic Celtic languages that were spoken in Great Britain. It arrived in Brittany with settlers from Britain during the early medieval period. |
||||
- **First Documented Evidence**: The earliest written examples of Breton date back to the 8th century. |
||||
|
||||
## Linguistic Features |
||||
- **Dialects**: There are three main dialects of Breton: |
||||
- **Gouèze** (Western) |
||||
- **Kerne** (Central) |
||||
- **Leoneg** (Eastern) |
||||
- **Alphabet**: The modern Breton alphabet uses the Latin script with some diacritics. |
||||
|
||||
## Current Status |
||||
- **Speakers**: Approximately 200,000 to 300,000 speakers as of recent estimates. |
||||
- **Recognition**: Breton is recognized as a regional language in France, but it does not hold official status. |
||||
- **Revitalization Efforts**: There are ongoing initiatives to promote the language, including bilingual education and media in Breton. |
||||
|
||||
## Cultural Significance |
||||
- **Literature and Music**: Breton has a rich oral tradition, including folklore, songs, and poetry. Contemporary literature and music often embrace the language. |
||||
- **Festivals**: Events like Fest-Noz (night festivals) celebrate Breton culture and often feature music and dance in the Breton language. |
||||
|
||||
## Challenges |
||||
- **Decline**: The number of native speakers has declined significantly due to historical policies and the dominance of French. |
||||
- **Education**: Breton is not widely taught in schools, although there are some bilingual programs and immersion schools. |
||||
|
||||
## Conclusion |
||||
Breton is a vibrant Celtic language with a rich history and cultural heritage, facing challenges in the modern age but supported by revitalization efforts and community engagement. |
@ -0,0 +1,34 @@
|
||||
# Overview of the Gascon Language |
||||
|
||||
## General Information |
||||
- **Language Family**: Occitan branch of the Romance languages. |
||||
- **Region**: Primarily spoken in the Gascony region of southwestern France, which includes parts of the departments of Gers, Landes, and Pyrénées-Atlantiques. |
||||
|
||||
## Historical Context |
||||
- **Origins**: Gascon evolved from Vulgar Latin and has influences from the Visigoths and various other historical invaders. |
||||
- **Status**: Once a widely spoken language, Gascon has seen a decline in the number of speakers, particularly in urban areas, due to the rise of French as the dominant language. |
||||
|
||||
## Dialects |
||||
- **Varieties**: Gascon includes several dialects, most notably: |
||||
- **Bigourdan**: Spoken in the region of Bigorre. |
||||
- **Armanac**: Found in Armagnac. |
||||
- **Languedocien**: This influences some Gascon speakers, particularly those in mixed-language areas. |
||||
|
||||
## Linguistic Features |
||||
- **Phonetics**: Gascon has unique phonetic characteristics, such as the preservation of the Latin 'u' sound and certain nasal vowels. |
||||
- **Vocabulary**: Contains a wealth of regional vocabulary, along with borrowings from French, Occitan, and Basque. |
||||
|
||||
## Cultural Significance |
||||
- **Literature**: Historically, Gascon has been used in regional literature and songs, contributing richly to the cultural heritage of the area. |
||||
- **Folklore and Traditions**: Gascon is an important vehicle for local folklore, traditions, and customs in Gascony. |
||||
|
||||
## Current Status |
||||
- **Revitalization Efforts**: There are ongoing efforts to promote and teach Gascon in schools, cultural organizations, and through local media. |
||||
- **Number of Speakers**: As of recent estimates, the number of fluent speakers is declining, with efforts being made to preserve the language among younger generations. |
||||
|
||||
## Related Languages |
||||
- **Occitan**: Gascon is one of the major dialects of the Occitan language, which also includes Provençal and Languedocien. |
||||
- **Comparison to French**: While Gascon shares some similarities with French, it retains distinct grammatical structures and vocabulary. |
||||
|
||||
## Conclusion |
||||
Gascon is not only a language but a crucial component of the cultural identity of the Gascon people, reflecting their history, traditions, and regional pride. Efforts for revitalization continue to be important in preserving this unique linguistic heritage. |
@ -0,0 +1,30 @@
|
||||
# Overview of Languedocien Language |
||||
|
||||
## General Information |
||||
- **Language Family**: Occitan |
||||
- **Region**: Primarily spoken in the Languedoc region of southern France. |
||||
- **ISO Code**: Not officially assigned, but sometimes referred to as "oc" for Occitan. |
||||
|
||||
## Linguistic Features |
||||
- **Dialects**: Languedocien is one of the major dialects of the Occitan language, which also includes Provençal, Gascon, and Auvergnat. |
||||
- **Phonetics**: Characterized by the presence of certain vowel sounds and the use of diphthongs that may differ from other dialects. |
||||
- **Grammar**: Similar to other Occitan dialects, it features a subject-verb-object structure, but with unique local variations. |
||||
|
||||
## Vocabulary |
||||
- **Lexical Influence**: Languedocien vocabulary is heavily influenced by Latin, with a significant number of words also derived from Provençal and other regional languages. |
||||
- **Regionalisms**: Contains unique words and expressions that are specific to local culture and traditions. |
||||
|
||||
## Cultural Context |
||||
- **Recognition**: While part of the Occitan language family, Languedocien does not have official status in France and is considered a regional language. |
||||
- **Literature**: Historically used in medieval literature; notable authors include Frédéric Mistral and others who contributed to the revival of Occitan literature. |
||||
|
||||
## Current Status |
||||
- **Speakers**: There are an estimated few hundred thousand speakers, with numbers decreasing due to the dominance of French. |
||||
- **Revitalization Efforts**: Various cultural organizations and schools aim to preserve and promote the use of Languedocien through courses, workshops, and public events. |
||||
|
||||
## Geographic Distribution |
||||
- **Primary Areas**: Predominantly spoken in the departments of Hérault, Aude, Gard, and parts of Lozère and Pyrénées-Orientales. |
||||
- **Urban vs. Rural**: More commonly spoken in rural areas, with younger generations tending to use it less in urban settings. |
||||
|
||||
## Conclusion |
||||
Languedocien remains an essential part of the cultural heritage of southern France, reflecting the region's history, traditions, and linguistic diversity. Efforts to sustain and promote the language continue amidst challenges posed by modernization and globalization. |
@ -0,0 +1,26 @@
|
||||
# Overview of the Lorrain Language |
||||
|
||||
## General Information |
||||
- **Language Family**: Lorrain is part of the Langue d'Oïl languages, which are a subgroup of the Romance languages. |
||||
- **Region**: Primarily spoken in the Lorraine region of northeastern France. |
||||
- **Dialects**: There are various dialects of Lorrain, including certain variations influenced by local languages and cultures. |
||||
|
||||
## Historical Context |
||||
- **Origins**: The language has roots dating back to the medieval period and was influenced by the historical presence of the Duchy of Lorraine. |
||||
- **Language Shift**: Over the 19th and 20th centuries, Lorrain saw a decline in usage due to the dominance of French, leading many speakers to shift to French. |
||||
|
||||
## Linguistic Features |
||||
- **Phonology**: Lorrain phonetics include distinct sounds that differentiate it from standard French and other Langue d'Oïl languages. |
||||
- **Vocabulary**: The lexicon of Lorrain retains several archaic words and expressions that have disappeared from modern French. |
||||
- **Grammar**: Similar to French but with unique grammatical structures and conjugations, reflecting its distinct identity. |
||||
|
||||
## Cultural Significance |
||||
- **Traditions**: Lorrain is often associated with local folklore, songs, and literature, which contribute to the cultural identity of Lorraine. |
||||
- **Preservation Efforts**: Various initiatives have been undertaken to promote and preserve the Lorrain language, including cultural festivals and educational programs. |
||||
|
||||
## Current Status |
||||
- **Speaker Population**: The number of active speakers has significantly decreased, with many older speakers and limited transmission to younger generations. |
||||
- **Revitalization**: Recent efforts are being made to revive interest in Lorrain among younger populations through workshops, classes, and media. |
||||
|
||||
## Conclusion |
||||
Lorrain is a unique language that embodies the rich cultural heritage of the Lorraine region. While it faces challenges, ongoing efforts aim to preserve and revitalize this historical language for future generations. |
@ -0,0 +1,34 @@
|
||||
# Overview of the Normand Language |
||||
|
||||
## What is Normand? |
||||
Normand is a regional language of France, part of the Oïl language group. It originates from the Normandy region and is historically linked to Old Norman, which developed from the Old Norman dialect of Old French. |
||||
|
||||
## Geographic Distribution |
||||
- Predominantly spoken in Normandy, particularly in the departments of Seine-Maritime and Calvados. |
||||
- Some dialects extend into the Channel Islands (like Jersey and Guernsey), where it is closely related to Jèrriais and Guernésiais. |
||||
|
||||
## Dialects |
||||
Normand has several dialects, which can vary significantly in terms of vocabulary, pronunciation, and grammar. Key dialects include: |
||||
- **Bocage**: Spoken in the rural areas of western Normandy. |
||||
- **Mélée**: Found in the northeastern part. |
||||
- **Sèvres**: A dialect with influences from the urban centers. |
||||
|
||||
## Linguistic Features |
||||
- Normand retains many archaic French features that have evolved in Standard French. |
||||
- The pronunciation of vowels and some consonant sounds can be quite distinct from Standard French. |
||||
- There are notable differences in use of articles and noun endings compared to Standard French. |
||||
|
||||
## Historical Context |
||||
- Norman was historically influential due to the Viking settlement of Normandy in the 9th century and subsequent Norman Conquest of England in 1066. |
||||
- It was widely used by the nobility and in administrative contexts until French became more dominant post-16th century. |
||||
|
||||
## Current Status |
||||
- Normand is considered a minority language and has seen a decline in speakers over the years. |
||||
- Efforts for revitalization are ongoing, with various cultural associations promoting the language through education and media. |
||||
|
||||
## Cultural Aspects |
||||
- Normand has a rich oral tradition, with folk tales, songs, and proverbs integral to the culture of Normandy. |
||||
- Festivals and events celebrating Normand language and culture are held in various communities. |
||||
|
||||
## Conclusion |
||||
While facing challenges due to globalization and the dominance of Standard French, Normand remains an important part of the cultural heritage of Normandy. Efforts to preserve and promote the language continue, aiming to maintain its presence for future generations. |
@ -0,0 +1,27 @@
|
||||
# Overview of the Picard Language |
||||
|
||||
## General Information |
||||
- **Language Family**: Romance, specifically a part of the West Oïl languages, which also includes French. |
||||
- **Region**: Primarily spoken in the historic region of Picardy in northern France, as well as in parts of Belgium and historically in the areas of the nearby Nord-Pas-de-Calais. |
||||
|
||||
## Linguistic Characteristics |
||||
- **Dialects**: There are several dialects of Picard, including Amiénois, Beauvaisis, and Hesdinois. |
||||
- **Vocabulary**: Shares many lexical items with French but also retains unique words and expressions. Some vocabulary is influenced by local historical interactions with Dutch and German. |
||||
|
||||
## Historical Context |
||||
- **Origins**: Evolved from Latin, like other Romance languages. Roots trace back to the Vulgar Latin spoken in the region during the Roman Empire. |
||||
- **Literary Tradition**: Has a rich but lesser-known literary tradition, with poetry and prose dating back to the Middle Ages. |
||||
|
||||
## Current Status |
||||
- **Speakers**: The number of speakers has declined significantly over the 20th century due to the dominance of standard French and the 1999 ban on the usage of Picard in all of France. |
||||
- **Revitalization Efforts**: Recent efforts outside of France include community classes, cultural organizations, and media in Picard to promote the language. It is rumored that there is an underground movement in France to keep Picard alive in spite of the language being banned and illegal to use since 1999. |
||||
|
||||
## Cultural Significance |
||||
- **Identity**: Picard is an important part of regional identity and cultural heritage for many people in northern France. |
||||
- **Festivals and Events**: Regional festivals celebrate Picard culture, featuring traditional songs, dances, and cuisine. |
||||
|
||||
## Legal Status |
||||
- **Recognition**: Picard has no official status in France, but it is recognized as a regional language. Efforts have been made to include it in educational curricula and local government documents in some areas. |
||||
|
||||
## Conclusion |
||||
Picard is a unique language that reflects the cultural and historical tapestry of northern France. Despite challenges, there are active efforts to preserve and promote its usage among future generations. |
@ -0,0 +1,27 @@
|
||||
# Overview of Provençal Language |
||||
|
||||
## Definition |
||||
Provençal is a Romance language that belongs to the Occitan language family, which is spoken primarily in the Provence region of southern France. |
||||
|
||||
## Historical Background |
||||
- **Origins**: Provençal has its roots in Vulgar Latin and has been influenced by various languages and cultures throughout history, including Celtic, Germanic, and Arabic. |
||||
- **Literary Tradition**: It has a rich literary tradition dating back to the 11th century, with notable poets such as Frédéric Mistral contributing to its revival in the 19th century. |
||||
|
||||
## Geographic Distribution |
||||
- **Regions**: Primarily spoken in Provence, it also has speakers in parts of Italy and Spain, particularly in the Val d'Aran valley in Catalonia, known as Aranese. |
||||
- **Dialectal Variations**: Provençal encompasses several dialects, such as Alémanique, Boulégue, and Languedocien, reflecting the linguistic diversity within the Occitan language. |
||||
|
||||
## Current Status |
||||
- **Recognition**: Provençal is recognized as a cultural language in France but has a minority status and faces challenges due to the dominance of French. |
||||
- **Revitalization Efforts**: There are ongoing efforts to promote and teach Provençal, including in schools and cultural institutions. |
||||
|
||||
## Linguistic Features |
||||
- **Grammar and Syntax**: Provençal has distinct grammatical structures that differentiate it from standard French, including the use of gendered nouns and specific verb conjugations. |
||||
- **Vocabulary**: It retains many words and expressions derived from Latin, along with unique local terms and influences from neighboring languages. |
||||
|
||||
## Cultural Significance |
||||
- **Folklore and Traditions**: Provençal is an important part of the cultural identity in Provence, associated with local traditions, music, festivals, and cuisine. |
||||
- **Media and Literature**: There are books, newspapers, and online resources available in Provençal, contributing to its presence in modern media. |
||||
|
||||
## Conclusion |
||||
Provençal is a vibrant language with a deep historical and cultural significance in southern France. While it faces challenges, ongoing efforts for its preservation continue to foster interest and engagement in this unique linguistic heritage. |
@ -0,0 +1,37 @@
|
||||
# Overview of the French Alps |
||||
|
||||
## General Information |
||||
- **Location:** Southeastern France, extending into Switzerland and Italy. |
||||
- **Length:** Approximately 1,200 kilometers (750 miles). |
||||
- **Highest Peak:** Mont Blanc, standing at 4,808 meters (15,774 feet). |
||||
- **Mountain Chain:** Part of the larger Alpine range that spans across several European countries. |
||||
|
||||
## Geography |
||||
- **Geological Composition:** Primarily composed of limestone and granite. |
||||
- **Major Valleys:** Includes the Rhône and Isère valleys. |
||||
- **Natural Parks:** Home to several national parks, including Écrins National Park and Vanoise National Park. |
||||
|
||||
## Climate |
||||
- **Variety:** Alpine climate with large variations; cold winters and mild summers. |
||||
- **Snowfall:** Heavy snowfall in winter makes it a prime destination for winter sports. |
||||
|
||||
## Flora and Fauna |
||||
- **Biodiversity:** Rich diversity of species; includes both alpine and Mediterranean flora. |
||||
- **Wildlife:** Encounters with species such as chamois, ibex, and golden eagles. |
||||
|
||||
## Activities |
||||
- **Winter Sports:** Skiing and snowboarding are popular, with famous resorts like Chamonix, Courchevel, and Val d’Isère. |
||||
- **Summer Activities:** Hiking, mountaineering, and mountain biking attract visitors during the warmer months. |
||||
- **Paragliding:** Known as a hotspot for paragliding due to favorable winds and stunning views. |
||||
|
||||
## Cultural Significance |
||||
- **Local Communities:** Home to various Alpine villages and cultures, each with unique traditions and languages. |
||||
- **Gastronomy:** Famous for local cheeses (like Beaufort and Reblochon), charcuterie, and dishes such as fondue and raclette. |
||||
|
||||
## Historical Aspects |
||||
- **Cultural Heritage:** Influenced by Roman and medieval settlements, with significant archaeological sites. |
||||
- **Tourism:** Became a major tourist destination in the 19th century. |
||||
|
||||
## Importance |
||||
- **Economic Significance:** Tourism is a vital part of the local economy, alongside agriculture and forestry. |
||||
- **Sustainability Focus:** Growing emphasis on sustainable tourism practices to protect the fragile alpine ecosystem. |
@ -0,0 +1,36 @@
|
||||
# Overview of the Ardennes Mountain Range |
||||
|
||||
## Location |
||||
- The Ardennes is a region located in the northeastern part of France, extending into Belgium and Luxembourg. |
||||
|
||||
## Geography |
||||
- The Ardennes is characterized by dense forests, deep valleys, and rolling hills. |
||||
- The highest peak in the French Ardennes is Le Signal de Botrange, which reaches an elevation of about 2,277 feet (694 meters), although it is situated in Belgium. |
||||
|
||||
## Geology |
||||
- The area is known for its rugged terrain and is primarily composed of sedimentary rocks such as limestone and sandstone. |
||||
- The landscape has been shaped by glacial and river erosion over millennia. |
||||
|
||||
## Climate |
||||
- The Ardennes has a temperate maritime climate, with cool summers and mild winters. |
||||
- Precipitation is relatively high, leading to lush vegetation. |
||||
|
||||
## Flora and Fauna |
||||
- The region is home to diverse wildlife, including deer, wild boar, and various bird species. |
||||
- Dense forests are dominated by beech and fir trees, and many areas are protected as nature reserves. |
||||
|
||||
## Human Activity |
||||
- The Ardennes has a rich history, having been inhabited since prehistoric times. |
||||
- It has significance in World War I and II, particularly during the Battle of the Bulge. |
||||
- The region is known for outdoor activities such as hiking, cycling, and kayaking. |
||||
|
||||
## Cultural Aspects |
||||
- The Ardennes is dotted with picturesque villages and towns, showcasing traditional architecture. |
||||
- The area is known for its beer production, particularly in Belgium, with many breweries operating in the region. |
||||
|
||||
## Tourism |
||||
- Key attractions include the Semois River, the fortress of Bouillon, and the expansive forests of the Ardennes. |
||||
- The region offers several trails and parks, attracting nature lovers and adventure enthusiasts. |
||||
|
||||
## Conclusion |
||||
The Ardennes is a unique blend of natural beauty, historical significance, and cultural richness, making it an important region in France and beyond. |
@ -0,0 +1,37 @@
|
||||
# Overview of the Jura Mountain Range in France |
||||
|
||||
## Location |
||||
- The Jura Mountains are located along the border between France and Switzerland. |
||||
- They stretch approximately 365 kilometers (227 miles) from the Rhône River in the south to the Rhine River in the north. |
||||
|
||||
## Geography |
||||
- The Jura is characterized by its rugged terrain, with numerous peaks, plateaus, and deep valleys. |
||||
- The highest peak in the French Jura is Crêt de la Neige, which rises to an elevation of 1,720 meters (5,643 feet). |
||||
|
||||
## Geology |
||||
- The range is primarily composed of limestone, which has been shaped by erosion, creating unique karst formations, caves, and cliffs. |
||||
- The Jura Mountains were formed during the Jurassic period, which is reflected in their name. |
||||
|
||||
## Climate |
||||
- The climate in the Jura varies from humid in the west to drier conditions in the east. |
||||
- The area experiences significant snowfall in winter, making it popular for winter sports. |
||||
|
||||
## Flora and Fauna |
||||
- The Jura is home to diverse ecosystems, including forests, alpine meadows, and wetlands. |
||||
- Wildlife includes species such as deer, chamois, marmots, and a variety of bird species. |
||||
|
||||
## Activities |
||||
- The Jura Mountains offer various outdoor activities, including hiking, skiing, and mountain biking. |
||||
- The region is known for its beautiful landscapes and natural parks, attracting tourists and nature enthusiasts. |
||||
|
||||
## Cultural Significance |
||||
- The Jura region is also known for its traditional cheese production, particularly Comté cheese. |
||||
- Numerous charming villages and towns, such as Arbois and Clairvaux-les-Lacs, showcase the cultural heritage of the area. |
||||
|
||||
## History |
||||
- The Jura Mountains have historical significance, having served as a natural barrier and route for trade and exploration. |
||||
- The region has witnessed various historical events, including battles during the French Revolutionary Wars and the Napoleonic Wars. |
||||
|
||||
## Accessibility |
||||
- The Jura is accessible from major cities like Geneva, Lyon, and Besançon, making it a popular destination for both locals and tourists. |
||||
- Several scenic routes and parks are maintained to facilitate exploration and enjoyment of the natural beauty. |
@ -0,0 +1,35 @@
|
||||
# Overview of the Massif Armorican |
||||
|
||||
## Location |
||||
- **Region**: Brittany, France |
||||
- **Coordinates**: Approximately 47° N latitude and 2° W longitude |
||||
|
||||
## Geography |
||||
- **Type**: Mountain range and geological massif |
||||
- **Area**: Covers parts of the departments of Ille-et-Vilaine, Morbihan, and Finistère |
||||
- **Elevation**: The highest peak, **Montagnes Noires**, reaches around 600 meters (1,969 feet) |
||||
|
||||
## Geology |
||||
- **Formation**: Primarily composed of ancient metamorphic rocks and granite formations, dating back to the Precambrian and Paleozoic eras |
||||
- **Tectonic Activity**: Influenced by the Variscan orogeny, which caused significant geological changes |
||||
|
||||
## Flora and Fauna |
||||
- **Biodiversity**: Home to diverse ecosystems, including heathlands, forests, and wetlands |
||||
- **Protected Areas**: Parts of the massif are designated as natural parks and reserves, promoting conservation efforts |
||||
|
||||
## Culture and History |
||||
- **Historical Significance**: The area is rich in megalithic structures and archaeological sites, reflecting ancient Celtic culture |
||||
- **Tourism**: Popular for hiking, cycling, and exploring its historical sites, contributing to local economies |
||||
|
||||
## Climate |
||||
- **Climate Type**: Maritime temperate climate, characterized by mild winters and cool summers |
||||
- **Precipitation**: Receives a significant amount of rainfall throughout the year, supporting its lush vegetation |
||||
|
||||
## Attractions |
||||
- **Sites of Interest**: Includes historic towns, châteaux, and picturesque landscapes, attracting visitors for both natural beauty and cultural heritage |
||||
- **Outdoor Activities**: Offers opportunities for outdoor sports such as hiking, horseback riding, and nature observation |
||||
|
||||
## Transportation |
||||
- **Accessibility**: Well-connected by road and rail, making it easily accessible from major urban centers in Brittany |
||||
|
||||
This overview encapsulates the essential aspects of the Massif Armorican, highlighting its geographical, geological, and cultural significance in France. |
@ -0,0 +1,34 @@
|
||||
# Overview of Massif Central |
||||
|
||||
## General Information |
||||
- **Location**: South-central France |
||||
- **Area**: Approximately 85,000 km² |
||||
- **Highest Peak**: Puy de Sancy (1,885 meters) |
||||
- **Geological Composition**: Primarily volcanic and sedimentary rocks |
||||
|
||||
## Geography |
||||
- **Regions Covered**: Spans across several French departments including Cantal, Puy-de-Dôme, Haute-Loire, and Lozère. |
||||
- **Landscape**: Characterized by plateaus, volcanic cones, deep valleys, and rivers. |
||||
|
||||
## Climate |
||||
- **Type**: Predominantly oceanic climate with a continental influence. |
||||
- **Precipitation**: Higher rainfall in the western regions, often resulting in lush landscapes. |
||||
|
||||
## Flora and Fauna |
||||
- **Biodiversity**: Home to various ecosystems, including grasslands, forests, and wetlands. |
||||
- **Protected Areas**: Includes several national parks and nature reserves, such as the Parc Naturel Régional des Volcans d'Auvergne. |
||||
|
||||
## Cultural Significance |
||||
- **History**: Affected by various historical events and populations, including the Gauls and the Roman Empire. |
||||
- **Heritage**: Rich cultural heritage with medieval towns, castles, and traditional practices. |
||||
|
||||
## Economic Importance |
||||
- **Agriculture**: Known for agriculture, particularly cheese production (e.g., Saint-Nectaire, Cantal). |
||||
- **Tourism**: Popular destination for outdoor activities such as hiking, skiing, and exploring natural parks. |
||||
|
||||
## Notable Features |
||||
- **Volcanic Activity**: The region contains many extinct volcanoes, with some still showing geothermal activity. |
||||
- **Natural Attractions**: Features stunning sites like the Gorges de la Loire and the Chaîne des Puys, a UNESCO World Heritage site. |
||||
|
||||
## Accessibility |
||||
- **Transport**: Well-connected by road and rail, with several towns providing access points for visitors. |
@ -0,0 +1,44 @@
|
||||
# Overview of the Morvan Mountain Range |
||||
|
||||
## Location |
||||
- **Country**: France |
||||
- **Region**: Burgundy (Bourgogne) |
||||
- **Department**: Nièvre, Saône-et-Loire, Côte-d'Or |
||||
|
||||
## Geography |
||||
- **Coordinates**: Approximately 47°10′N 3°55′E |
||||
- **Highest Peak**: Mont Beuvray |
||||
- **Elevation**: 821 meters (2,700 feet) |
||||
- **Area**: Approximately 3,500 square kilometers |
||||
- **Major Rivers**: Cure, Yonne, and Loing flow through the region. |
||||
|
||||
## Geology |
||||
- Composed primarily of granitic and metamorphic rocks. |
||||
- The landscape features rolling hills, valleys, and plateaus. |
||||
- Known for its rich biodiversity and varied ecosystems. |
||||
|
||||
## Climate |
||||
- **Type**: Temperate continental climate. |
||||
- **Weather**: Mild summers and cold winters with occasional snowfall. |
||||
|
||||
## History |
||||
- The Morvan area has a rich history dating back to prehistoric times. |
||||
- Notable archaeological sites include the remnants of the Gallic tribe of the Aedui in Mont Beuvray. |
||||
- The region was significant during the Roman conquest of Gaul. |
||||
|
||||
## Culture and Economy |
||||
- The Morvan is known for its traditional rural lifestyle and local crafts. |
||||
- Main industries include agriculture, forestry, and tourism. |
||||
- Famous for Morvan cheese and wines from the surrounding Burgundy region. |
||||
|
||||
## Tourism |
||||
- Offers a variety of outdoor activities such as hiking, cycling, and fishing. |
||||
- Home to the Morvan Regional Natural Park, established in 1970, which promotes conservation and sustainable tourism. |
||||
- Attractions include ancient ruins, beautiful landscapes, and charming villages. |
||||
|
||||
## Wildlife |
||||
- Habitat for various species, including deer, wild boars, and numerous bird species. |
||||
- Rich flora with many endemic plant species. |
||||
|
||||
## Conservation |
||||
- The region emphasizes environmental protection and sustainability in its natural park initiatives. |
@ -0,0 +1,40 @@
|
||||
# Overview of the Pyrenees Mountain Range |
||||
|
||||
## Geographic Location |
||||
- The Pyrenees mountain range forms a natural border between **France** and **Spain**. |
||||
- It extends approximately **430 kilometers (267 miles)** from the Atlantic Ocean (Bay of Biscay) in the west to the Mediterranean Sea in the east. |
||||
|
||||
## Major Peaks |
||||
- **Aneto** is the highest peak, with an elevation of **3,404 meters (11,168 feet)**. |
||||
- Other notable peaks include **Monte Perdido**, **Vignemale**, and **Pic du Midi d'Ossau**. |
||||
|
||||
## Geography and Geology |
||||
- The Pyrenees are divided into three sections: |
||||
- **Western Pyrenees**: Characterized by rugged terrain and steep valleys. |
||||
- **Central Pyrenees**: Known for its glacial landscapes and high peaks. |
||||
- **Eastern Pyrenees**: Features more rounded hills and a transition to the Mediterranean landscape. |
||||
- The range is primarily composed of granite, limestone, and schist rock formations. |
||||
|
||||
## Climate |
||||
- The climate varies from oceanic in the west to Mediterranean in the east. |
||||
- Snowfall is common during the winter months, making it a popular destination for skiing and winter sports. |
||||
|
||||
## Flora and Fauna |
||||
- The region is home to diverse ecosystems, featuring forests, meadows, and alpine tundra. |
||||
- Wildlife includes species such as the **Pyrenean ibex**, **brown bear**, **vultures**, and various endemic plants. |
||||
|
||||
## Cultural Significance |
||||
- The Pyrenees have a rich history, with numerous prehistoric caves, Roman ruins, and medieval castles. |
||||
- The region is culturally significant for both France and Spain, with unique traditions, languages (such as **Occitan** and **Catalan**), and gastronomy. |
||||
|
||||
## Outdoor Activities |
||||
- The Pyrenees are a popular destination for various activities including: |
||||
- **Hiking**: Numerous trails cater to different skill levels. |
||||
- **Skiing and Snowboarding**: Several ski resorts like **Saint-Lary-Soulan** and **Baqueira Beret**. |
||||
- **Climbing and Mountaineering**: Challenging routes attract climbers from around the world. |
||||
|
||||
## National Parks |
||||
- Several national parks, including **Pyrenees National Park** in France and **Ordesa y Monte Perdido National Park** in Spain, protect this stunning natural environment and its biodiversity. |
||||
|
||||
## Accessibility |
||||
- The Pyrenees can be accessed from various cities, including **Toulouse** and **Barcelona**, with numerous roads and hiking paths connecting different areas of the mountains. |
@ -0,0 +1,33 @@
|
||||
# Vosges Mountains Overview |
||||
|
||||
## Geography |
||||
- **Location**: Northeastern France, bordering Germany to the east. |
||||
- **Length**: Approximately 150 kilometers (93 miles) from north to south. |
||||
- **Elevation**: The highest peak is **Haut du Tôt**, which reaches an elevation of **1,424 meters** (4,672 feet). |
||||
|
||||
## Natural Features |
||||
- **Landscape**: Characterized by rolling hills, dense forests, and numerous lakes and streams. |
||||
- **Geology**: Composed mainly of granite and sandstone, along with some limestone. |
||||
- **Flora and Fauna**: Home to diverse ecosystems, including coniferous and deciduous forests, and various wildlife such as deer, wild boar, and a range of bird species. |
||||
|
||||
## Climate |
||||
- **Influence**: The Vosges mountains create a rainshadow effect, leading to varied climates on either side of the range. |
||||
- **Weather**: Generally humid, with abundant rainfall, particularly in the western slopes. |
||||
|
||||
## Culture and History |
||||
- **Human Settlement**: Historically inhabited by Celtic tribes, later significant in both the Roman Empire and medieval periods. |
||||
- **Tourism**: Popular for hiking, skiing, and outdoor activities, with many marked trails and ski resorts. |
||||
- **Cultural Heritage**: Known for traditional villages, local cuisine, and the Alsace wine route. |
||||
|
||||
## Notable Locations |
||||
- **Ballons des Vosges Regional Nature Park**: A protected area showcasing the natural beauty of the mountains. |
||||
- **Colmar and Gérardmer**: Prominent towns known for their cultural significance and as tourist destinations. |
||||
- **Route des Crêtes**: A scenic road that offers breathtaking views of the Vosges and surrounding regions. |
||||
|
||||
## Activities |
||||
- **Hiking**: Numerous trails, including the famous GR5 long-distance path. |
||||
- **Skiing**: Various ski resorts, particularly in the higher altitudes. |
||||
- **Cycling**: The region is cyclist-friendly with several bike routes. |
||||
|
||||
## Accessibility |
||||
- **Transport**: Well-connected by road and rail, making it accessible from major French cities and neighboring countries. |
@ -0,0 +1,47 @@
|
||||
# Overview of Alsace-Lorraine Region in France |
||||
|
||||
Alsace-Lorraine is a historically significant and culturally diverse region located in northeastern France. Known for its unique blend of French and German influences, the region has a fascinating history, charming towns, and beautiful landscapes. |
||||
|
||||
## Geography |
||||
- **Location**: Situated along the Rhine River, Alsace-Lorraine borders Germany to the east and Luxembourg to the north. The region is part of the Grand Est administrative region of France. |
||||
- **Area**: Covers approximately 14,524 square kilometers. |
||||
- **Major Cities**: Strasbourg (capital of Alsace), Metz (capital of Lorraine), Mulhouse, Nancy, Colmar, and Epinal. |
||||
|
||||
## History |
||||
- **German and French Control**: The region has alternated between French and German control multiple times, particularly during the 19th and 20th centuries. It was part of the German Empire from 1871 to 1918, and again during World War II, before returning to France after the war. |
||||
- **Franco-Prussian War (1870-1871)**: Alsace and most of Lorraine were ceded to Germany after France's defeat in the war. This period marked significant German cultural and linguistic influence. |
||||
- **Post-World War II**: After World War II, Alsace-Lorraine was definitively integrated into France, with the region's mixed identity still influencing its culture and language. |
||||
|
||||
## Culture |
||||
- **Bilingualism**: The region has strong Germanic roots, and many people speak both French and a variety of regional dialects, such as Alsatian (a dialect of German). This bilingual heritage is reflected in the local culture, architecture, and cuisine. |
||||
- **Festivals**: Alsace-Lorraine is known for its rich tradition of festivals, especially those celebrating wine and food. The Strasbourg Christmas Market is one of the oldest and most famous in Europe. |
||||
- **Cuisine**: The region is renowned for its hearty and flavorful cuisine, which blends French and German influences. Notable dishes include choucroute (sauerkraut with sausages), tarte flambée (a type of pizza), and kugelhopf (a traditional cake). |
||||
- **Wine**: Alsace is one of the premier wine-producing regions in France, known for its white wines, particularly Riesling, Gewürztraminer, and Pinot Gris. The Alsace Wine Route is a popular tourist attraction. |
||||
|
||||
## Natural Beauty |
||||
- **Vosges Mountains**: Located in Lorraine, the Vosges Mountains offer scenic landscapes, hiking trails, and ski resorts. |
||||
- **The Alsace Wine Route**: Stretching over 170 kilometers, this picturesque route offers breathtaking views of vineyards and charming villages. |
||||
- **Regional Parks**: The region is home to several natural parks, including the Ballons des Vosges Regional Nature Park, which features forests, lakes, and wildlife. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Strasbourg Cathedral**: The Cathedral of Notre-Dame in Strasbourg is a masterpiece of Gothic architecture and a UNESCO World Heritage site. Its astronomical clock and panoramic views from the tower are major attractions. |
||||
- **Château de Haut-Koenigsbourg**: A stunning medieval castle located in the Vosges Mountains, offering panoramic views of the Alsace plain. |
||||
- **Metz’s Cathedral**: The Cathedral of Saint-Étienne in Metz is a notable example of Gothic architecture, with some of the largest stained-glass windows in France. |
||||
- **Colmar**: Known for its well-preserved old town, Colmar is a charming medieval town with colorful half-timbered houses and canals that resemble a fairytale village. |
||||
|
||||
## Economy |
||||
- **Industry**: Alsace-Lorraine has a diverse economy that includes manufacturing, automotive, chemicals, and electronics. The region is home to several large industrial companies, particularly in Strasbourg and Mulhouse. |
||||
- **Agriculture**: The region is known for its agricultural output, particularly in wine production, as well as fruit and vegetable farming. |
||||
- **Tourism**: With its rich history, picturesque landscapes, and cultural festivals, Alsace-Lorraine attracts millions of tourists each year. |
||||
|
||||
## Climate |
||||
- **Continental Climate**: Alsace-Lorraine experiences a continental climate with cold winters and hot, often humid summers. The region’s proximity to the Vosges Mountains means it can also experience significant rainfall, particularly in Lorraine. |
||||
- **Average Temperatures**: Winters can see temperatures drop to around 0°C (32°F), while summer temperatures typically range from 18°C to 25°C (64°F to 77°F). |
||||
|
||||
## Notable People |
||||
- **Jean-Jacques Rousseau**: The famous philosopher and writer was born in Geneva but spent much of his life in the region, influencing its intellectual culture. |
||||
- **Gérard Depardieu**: The internationally acclaimed French actor hails from Châteauroux but has connections to the region through his career and projects. |
||||
- **François Rabelais**: The influential Renaissance writer, known for his work *Gargantua and Pantagruel*, was born in the region. |
||||
|
||||
## Conclusion |
||||
Alsace-Lorraine is a region with a rich, multifaceted history and culture, shaped by its unique position between France and Germany. Its charming towns, breathtaking landscapes, and exceptional food and wine make it a significant part of French heritage and a beloved destination for travelers. |
@ -0,0 +1,47 @@
|
||||
# Overview of Bourgogne (Burgundy) Region in France |
||||
|
||||
Bourgogne, or Burgundy, is a historic and picturesque region located in eastern France. Known for its rich wine heritage, medieval towns, and stunning landscapes, Burgundy is a symbol of French culture and tradition. |
||||
|
||||
## Geography |
||||
- **Location**: Bourgogne is located in central-eastern France, bordered by the regions of Franche-Comté, Rhône-Alpes, Auvergne, and Champagne-Ardenne. |
||||
- **Area**: Covers approximately 31,000 square kilometers. |
||||
- **Major Cities**: Dijon (capital), Auxerre, Beaune, Chalon-sur-Saône, Nevers. |
||||
|
||||
## History |
||||
- **Duchy of Burgundy**: Burgundy was once an independent duchy, and during the Middle Ages, it was one of the most powerful and influential regions in France. It played a key role in European politics. |
||||
- **Unification with France**: In the 15th century, the Duchy of Burgundy became part of France after the death of the last Duke, Charles the Bold, in 1477. The region’s autonomy was gradually absorbed into the French crown. |
||||
- **Historical Significance**: Burgundy has a deep historical legacy, with numerous medieval abbeys, castles, and battlefields that have shaped the region’s identity. |
||||
|
||||
## Culture |
||||
- **Wine Culture**: Burgundy is one of the world’s most famous wine-producing regions, renowned for its Pinot Noir and Chardonnay wines. The region’s vineyards produce some of the finest wines, especially in areas like Côte de Nuits, Côte de Beaune, and Chablis. |
||||
- **Cuisine**: Burgundy cuisine is rich and hearty, with dishes like boeuf bourguignon (beef stew in red wine), coq au vin (chicken cooked in wine), and escargots de Bourgogne (snails cooked in garlic and parsley butter). The region is also known for its mustard, particularly Dijon mustard. |
||||
- **Art and Architecture**: Burgundy is home to several historical and architectural landmarks, including Romanesque churches, medieval towns, and Renaissance palaces. The region has a long-standing tradition of art, with influences from both French and Flemish masters. |
||||
|
||||
## Natural Beauty |
||||
- **Burgundy Canal**: The Burgundy Canal offers scenic views and is a popular spot for boaters and cyclists. It connects the Yonne River to the Saône River and passes through charming villages. |
||||
- **Morvan Regional Natural Park**: Located in the heart of Burgundy, the Morvan Park is known for its forests, lakes, and wildlife, making it a haven for outdoor enthusiasts. |
||||
- **Vineyards**: The rolling hills of the Burgundy vineyards are a UNESCO World Heritage site and are dotted with charming wine villages like Beaune and Meursault. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Dijon**: The capital of Burgundy, known for its well-preserved medieval architecture, the Palace of the Dukes of Burgundy, and the famous Dijon mustard. |
||||
- **Chablis**: Famous for its world-renowned white wines, Chablis is a picturesque village surrounded by vineyards and stunning views. |
||||
- **Abbey of Fontenay**: A UNESCO World Heritage site, this Cistercian abbey dates back to the 12th century and is an example of Romanesque architecture at its best. |
||||
- **Basilica of Vézelay**: Another UNESCO site, this basilica is a key pilgrimage site and an important example of Romanesque architecture in France. |
||||
- **Clos de Vougeot**: A historic wine estate and château in the Côte de Nuits, Clos de Vougeot is at the heart of Burgundy's wine heritage. |
||||
|
||||
## Economy |
||||
- **Wine Industry**: Burgundy’s wine industry is the cornerstone of the region’s economy. The vineyards produce some of the world’s most sought-after wines, and the region is home to prestigious wine estates. |
||||
- **Agriculture**: In addition to wine production, Burgundy is also known for its agricultural output, including grain, dairy products, and livestock, especially cattle. |
||||
- **Tourism**: Burgundy attracts tourists for its wine tourism, beautiful landscapes, medieval towns, and rich history. The region is a popular destination for wine lovers, history buffs, and outdoor adventurers. |
||||
|
||||
## Climate |
||||
- **Continental Climate**: Burgundy has a continental climate with hot summers and cold winters. The region’s climate is ideal for viticulture, with warm days during the growing season and cool nights that help preserve the flavors of the grapes. |
||||
- **Average Temperatures**: Summers typically range from 20°C to 28°C (68°F to 82°F), while winters can dip to around 0°C (32°F). |
||||
|
||||
## Notable People |
||||
- **Gustave Eiffel**: Born in Dijon, Eiffel is famous for designing the Eiffel Tower in Paris. |
||||
- **Bernard Loiseau**: A renowned French chef from Burgundy, Loiseau was known for his exceptional culinary skills and Michelin-starred restaurants. |
||||
- **Romain Rolland**: The Nobel Prize-winning writer, known for his works such as *Jean-Christophe*, was born in Clamecy, Burgundy. |
||||
|
||||
## Conclusion |
||||
Bourgogne is a region that embodies the essence of French culture, combining rich history, world-class wine, exceptional cuisine, and beautiful landscapes. Whether you’re savoring a glass of Burgundy wine, exploring its medieval towns, or hiking through its scenic parks, Burgundy offers a timeless experience for travelers and connoisseurs alike. |
@ -0,0 +1,45 @@
|
||||
# Overview of Bretagne (Brittany) Region in France |
||||
|
||||
Bretagne, or Brittany, is a culturally distinct region located in the northwest of France. Known for its rugged coastline, rich history, and unique cultural heritage, Bretagne offers a fascinating blend of natural beauty and ancient traditions. |
||||
|
||||
## Geography |
||||
- **Location**: Situated on the Brittany Peninsula, bordered by the English Channel to the north, the Atlantic Ocean to the west and south, and the Normandy and Pays de la Loire regions to the east. |
||||
- **Area**: Covers approximately 27,208 square kilometers. |
||||
- **Major Cities**: Rennes (capital), Brest, Nantes, Saint-Malo, Quimper, Lorient. |
||||
|
||||
## History |
||||
- **Celtic Origins**: Originally inhabited by the Celts, who brought their language, traditions, and culture to the region. Bretagne still maintains a strong Celtic identity. |
||||
- **Duchy of Brittany**: From the 9th to the 16th century, Brittany was an independent duchy before joining France in 1532. |
||||
- **Breton Language**: Breton (Brezhoneg) is a Celtic language still spoken by a small population, especially in rural areas and in cultural events. |
||||
|
||||
## Culture |
||||
- **Music**: Bretagne is known for its traditional Celtic music, including bagpipes, fiddles, and the bombard. The region hosts festivals like the Festival Interceltique de Lorient, which celebrates Celtic culture. |
||||
- **Cuisine**: The local cuisine includes specialties like crêpes, galettes (buckwheat pancakes), seafood, and cider (known as "cidre"). The region is famous for its oysters and mussels. |
||||
- **Festivals**: Brittany hosts several cultural festivals, such as the Fest Noz, a traditional Breton dance event, and the Breizh Festival, which celebrates Breton culture. |
||||
|
||||
## Natural Beauty |
||||
- **Coastline**: Bretagne is known for its stunning coastline with dramatic cliffs, sandy beaches, and picturesque coves. The region has more than 2,700 kilometers of coastline. |
||||
- **Mont Saint-Michel**: While technically in Normandy, it is often associated with Brittany due to its proximity. This island commune with a striking abbey is a UNESCO World Heritage site. |
||||
- **Regional Parks**: Brittany is home to several regional natural parks, such as the Armorique Regional Nature Park, known for its varied landscapes, including moors, forests, and hills. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Carnac Stones**: Prehistoric standing stones dating back to the Neolithic period, located in the town of Carnac. They are among the most famous megalithic sites in the world. |
||||
- **Fort La Latte**: A medieval fortress on the north coast of Brittany, offering incredible views of the sea. |
||||
- **Saint-Malo**: A walled port city, famous for its cobblestone streets, stunning beaches, and historical significance as a center of piracy. |
||||
|
||||
## Economy |
||||
- **Agriculture**: The region is known for its dairy farming, particularly in the production of butter and cheese. Bretagne is also famous for its apple orchards, which are used to make cider. |
||||
- **Fishing**: Historically, Brittany has been one of the most important fishing regions in France, especially for shellfish, sardines, and tuna. |
||||
- **Tourism**: The natural beauty, history, and culture make Bretagne a popular destination for tourists, with significant income coming from visitors. |
||||
|
||||
## Climate |
||||
- **Mild Climate**: Brittany experiences a temperate maritime climate, characterized by mild winters and cool summers. The region is known for frequent rainfall and variable weather. |
||||
- **Average Temperatures**: Winters rarely drop below 5°C (41°F), while summers range from 15°C to 20°C (59°F to 68°F). |
||||
|
||||
## Notable People |
||||
- **Bertrand Du Guesclin**: A famous medieval French knight and national hero. |
||||
- **Jacques Cartier**: The explorer credited with claiming Canada for France in the 16th century. |
||||
- **Yann Tiersen**: A modern musician and composer, best known for his soundtrack for the film *Amélie*. |
||||
|
||||
## Conclusion |
||||
Bretagne is a region of deep cultural significance, rich history, and extraordinary natural landscapes. Whether you’re drawn to its Celtic roots, its rugged coastline, or its historical landmarks, Brittany offers something for everyone. |
@ -0,0 +1,47 @@
|
||||
# Overview of Gascogne Region in France |
||||
|
||||
Gascogne is a historical and cultural region in southwestern France, known for its rolling hills, vineyards, charming villages, and rich heritage. It is often associated with the rustic lifestyle, gastronomy, and the famed Musketeers of Dumas’ novels. |
||||
|
||||
## Geography |
||||
- **Location**: Situated in the southwest of France, Gascogne is bordered by the regions of Aquitaine to the west, Midi-Pyrénées to the south, and the Auvergne-Rhône-Alpes region to the east. It also touches the Pyrenees mountains to the south. |
||||
- **Area**: The region encompasses parts of the modern-day regions of Occitanie and Nouvelle-Aquitaine. |
||||
- **Major Cities**: Auch (historical capital), Agen, Condom, Lectoure, and Eauze. |
||||
|
||||
## History |
||||
- **Roman Influence**: Gascogne was known as part of the ancient Roman province of Novempopulania. The region’s rich history is reflected in its architecture and ancient ruins. |
||||
- **Visigoths and Franks**: The region saw control by the Visigoths and later the Franks, whose influence shaped local customs and governance. |
||||
- **Duchy of Gascogne**: During the Middle Ages, Gascogne was an independent duchy before becoming part of the Kingdom of France in the 13th century. |
||||
- **The Musketeers**: Gascogne is famously associated with the “Three Musketeers” of Alexandre Dumas’ novels. The fictional characters D'Artagnan, Athos, Porthos, and Aramis are portrayed as hailing from this region. |
||||
|
||||
## Culture |
||||
- **Gascon Language**: The Gascon language, a variety of Occitan, was historically spoken in the region. Though it has declined in use, it still carries cultural significance and is a symbol of regional identity. |
||||
- **Folk Traditions**: Gascogne is known for its folk traditions, including traditional music, dances, and festivals. The region is famous for its rural festivals, celebrating everything from local history to agricultural practices. |
||||
- **Cuisine**: Gascon cuisine is renowned for its hearty and flavorful dishes. Notable dishes include *foie gras*, *confit de canard* (duck confit), and *garbure* (a rich vegetable and meat soup). The region is also famous for its Armagnac, a brandy that is produced using traditional methods. |
||||
|
||||
## Natural Beauty |
||||
- **Rolling Hills and Vineyards**: Gascogne is known for its picturesque landscapes, featuring rolling hills, vast forests, and scenic vineyards. The region is ideal for hiking, cycling, and exploring the rural countryside. |
||||
- **The Pyrenees**: The southern border of Gascogne is defined by the Pyrenees mountains, which offer opportunities for outdoor activities like hiking and skiing. |
||||
- **Rivers and Lakes**: Gascogne is crisscrossed by rivers such as the Garonne and the Adour, making the region fertile for agriculture and creating stunning natural scenery. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Auch Cathedral**: A UNESCO World Heritage site, the Cathedral of Sainte-Marie in Auch is an impressive Gothic structure with a magnificent staircase leading to the church. |
||||
- **D’Artagnan’s Birthplace**: The town of Lupiac, where D'Artagnan, the hero of Alexandre Dumas’ *The Three Musketeers*, was born, attracts fans of the novels and history alike. |
||||
- **Château de Larressingle**: Often referred to as one of the most beautiful fortified villages in France, this medieval castle offers a glimpse into the region's past. |
||||
- **Armagnac Distilleries**: Visitors can tour the distilleries that produce the famous Armagnac brandy, with opportunities to taste and learn about the traditional distilling process. |
||||
|
||||
## Economy |
||||
- **Agriculture**: Gascogne is an important agricultural region, known for its production of ducks, geese (for foie gras), and pigs. The fertile soil supports the cultivation of corn, sunflowers, and grapes. |
||||
- **Wine and Brandy**: The region is famous for its vineyards and the production of Armagnac, a type of brandy. The wines of the region, especially those from the Côtes de Gascogne, are increasingly recognized for their quality. |
||||
- **Tourism**: With its rich history, natural beauty, and culinary traditions, Gascogne attracts tourists who are looking to experience authentic French rural life, enjoy local food and wine, and explore historical landmarks. |
||||
|
||||
## Climate |
||||
- **Mediterranean Climate**: Gascogne enjoys a temperate climate, with warm summers and mild winters. The southern part of the region, near the Pyrenees, has a more Mediterranean climate, while the northern part experiences a more oceanic influence. |
||||
- **Average Temperatures**: Summer temperatures typically range from 20°C to 30°C (68°F to 86°F), while winters are generally mild with temperatures ranging from 5°C to 10°C (41°F to 50°F). |
||||
|
||||
## Notable People |
||||
- **D'Artagnan**: The fictional hero of *The Three Musketeers*, D'Artagnan is one of the most famous characters associated with Gascogne, although based on a real person. |
||||
- **Charles de Batz-Castelmore d'Armanac**: The historical figure who inspired D'Artagnan, born in Gascogne, was a nobleman and soldier. |
||||
- **Henri IV**: The King of France, born in Pau (near Gascogne), famously said, “Paris is worth a Mass” and was instrumental in uniting France after years of religious conflict. |
||||
|
||||
## Conclusion |
||||
Gascogne is a region that offers a unique blend of history, culture, and natural beauty. From its medieval villages and legendary connections to the Musketeers, to its rich culinary traditions and scenic landscapes, Gascogne provides a true taste of southwestern France. Whether exploring its vineyards, tasting Armagnac, or immersing yourself in its rural charm, Gascogne is a region full of life and tradition. |
@ -0,0 +1,47 @@
|
||||
# Overview of Île-de-France Region in France |
||||
|
||||
Île-de-France is the central region of France, encompassing the nation’s capital, Paris. As the political, economic, and cultural heart of France, this region is not only historically significant but also a global center for art, fashion, and business. |
||||
|
||||
## Geography |
||||
- **Location**: Situated in the north-central part of France, Île-de-France is surrounded by the regions of Normandy, Hauts-de-France, Grand Est, Bourgogne-Franche-Comté, Centre-Val de Loire, and Provence-Alpes-Côte d'Azur. |
||||
- **Area**: Covers approximately 12,012 square kilometers. |
||||
- **Major Cities**: Paris (capital of both the region and France), Versailles, Créteil, Nanterre, and Montreuil. |
||||
|
||||
## History |
||||
- **Royal Legacy**: Île-de-France has historically been the core of the French monarchy. It was the heart of the Capetian Dynasty, beginning in the 10th century. The region is home to many royal palaces and historic sites. |
||||
- **French Revolution**: Paris, located in Île-de-France, was the focal point of the French Revolution in the late 18th century. Important revolutionary events, such as the storming of the Bastille, took place here. |
||||
- **World War II**: During WWII, Paris was occupied by Nazi forces from 1940 to 1944. The city was liberated in August 1944 by Allied forces. |
||||
|
||||
## Culture |
||||
- **Capital of Culture**: Paris is widely recognized as one of the world’s greatest cultural capitals. It is home to numerous world-class museums, theaters, and art galleries, including the Louvre, Musée d'Orsay, and the Centre Pompidou. |
||||
- **Fashion and Art**: Paris is the global capital of fashion, known for haute couture, and hosts prestigious fashion events like Paris Fashion Week. The city has also been the center of the art world for centuries, influencing movements such as Impressionism and Surrealism. |
||||
- **Gastronomy**: Île-de-France is known for its fine dining, with Michelin-starred restaurants, cafés, and bistros. The region is also famous for pâtisseries, including macarons and éclairs, and its traditional French dishes such as coq au vin and escargot. |
||||
|
||||
## Natural Beauty |
||||
- **Seine River**: The Seine River flows through Paris and the Île-de-France region, providing beautiful riverbanks and parks, perfect for leisure activities like boat tours, picnicking, and walking along its iconic bridges. |
||||
- **Bois de Boulogne & Bois de Vincennes**: These expansive public parks on the outskirts of Paris offer lush green spaces for recreation, hiking, and cycling. |
||||
- **Versailles Gardens**: The Gardens of the Palace of Versailles, with their meticulously designed lawns, fountains, and sculptures, are a UNESCO World Heritage site and one of the most famous gardens in the world. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Eiffel Tower**: The most iconic landmark in Paris, the Eiffel Tower attracts millions of visitors every year. It stands as a symbol of France and offers stunning panoramic views of the city. |
||||
- **Notre-Dame Cathedral**: A masterpiece of Gothic architecture, the Notre-Dame Cathedral is one of the most famous religious sites in the world, located on the Île de la Cité in the Seine. |
||||
- **Palace of Versailles**: A short trip from Paris, the Palace of Versailles is one of the grandest royal palaces in Europe, famous for its opulent architecture and the Hall of Mirrors. |
||||
- **Sainte-Chapelle**: Known for its stunning stained-glass windows, this Gothic chapel in Paris is one of the most beautiful examples of medieval architecture. |
||||
- **The Louvre**: The world’s largest art museum, the Louvre in Paris, is home to thousands of works of art, including Leonardo da Vinci's *Mona Lisa* and the *Venus de Milo*. |
||||
|
||||
## Economy |
||||
- **Economic Powerhouse**: Île-de-France is the economic center of France, contributing a significant portion to the country’s GDP. It is home to many multinational companies and is the main business hub in France. |
||||
- **Finance and Technology**: The region has a thriving financial sector centered in La Défense, Paris’s business district. It also hosts tech startups and innovations, particularly in areas like AI, fintech, and digital media. |
||||
- **Tourism**: Paris is one of the world’s top tourist destinations, attracting millions of visitors each year. The region’s tourism is a key driver of the economy, with tourists coming for the history, culture, and attractions. |
||||
|
||||
## Climate |
||||
- **Oceanic Climate**: Île-de-France experiences a temperate oceanic climate with mild winters and warm summers. Paris typically has rainy weather in the autumn and spring, with summer temperatures ranging from 18°C to 25°C (64°F to 77°F). |
||||
- **Average Temperatures**: Winter temperatures can hover around 3°C to 7°C (37°F to 45°F), while summer highs can range from 25°C to 30°C (77°F to 86°F). |
||||
|
||||
## Notable People |
||||
- **Napoleon Bonaparte**: Born on the island of Corsica, Napoleon became the Emperor of France and played a pivotal role in shaping the history of France and Europe. His influence is still felt throughout Île-de-France. |
||||
- **Marcel Proust**: The famous French writer, best known for his work *In Search of Lost Time*, lived and wrote in Paris during the late 19th and early 20th centuries. |
||||
- **Édith Piaf**: One of France’s most beloved singers, Piaf was born and raised in Paris and became an international icon of French music. |
||||
|
||||
## Conclusion |
||||
Île-de-France is the heart of France, blending rich history, cultural innovation, and economic power. With Paris at its center, the region is a global leader in fashion, art, and business. From historic landmarks like the Eiffel Tower and Versailles to its world-class museums and gastronomic delights, Île-de-France is a region that offers something for every visitor, making it a must-see destination for travelers. |
@ -0,0 +1,46 @@
|
||||
# Overview of Languedoc Region in France |
||||
|
||||
Languedoc is a historic and culturally rich region located in the southern part of France, known for its Mediterranean coastline, picturesque villages, and deep-rooted traditions. It is often celebrated for its wines, beaches, and beautiful landscapes. |
||||
|
||||
## Geography |
||||
- **Location**: Languedoc is situated in the southernmost part of France, bordered by the Mediterranean Sea to the east, the regions of Provence-Alpes-Côte d'Azur, Rhône-Alpes, and Auvergne-Rhône-Alpes to the north, and Midi-Pyrénées to the west. |
||||
- **Area**: Covers approximately 27,000 square kilometers. |
||||
- **Major Cities**: Montpellier (capital), Nîmes, Perpignan, Carcassonne, Béziers, and Sète. |
||||
|
||||
## History |
||||
- **Roman Influence**: Languedoc has a strong Roman heritage, with many ancient ruins, including the well-preserved Roman aqueduct, Pont du Gard, and the ancient city of Nîmes. |
||||
- **Cathar History**: In the Middle Ages, Languedoc was the center of the Cathar religious movement. The region was the focus of the Albigensian Crusade (1209-1229), a military campaign aimed at eradicating Catharism. |
||||
- **Rural Culture**: Historically, the region was a center of agriculture and viticulture, and it remains deeply connected to farming traditions, particularly wine production. |
||||
|
||||
## Culture |
||||
- **Language**: The Occitan language, historically spoken in the region, was once widely used, and it still carries cultural significance today. Languedoc’s name itself derives from the Occitan phrase *"langue d'oc,"* meaning “language of yes.” |
||||
- **Cuisine**: Languedoc cuisine is characterized by its Mediterranean influence, with seafood, olive oil, and fresh produce playing a central role. Famous dishes include *cassoulet* (a rich stew made with beans and meats), *brandade de morue* (a cod and garlic dish), and *tapenade* (olive spread). |
||||
- **Festivals**: The region is known for its vibrant festivals, such as the Feria de Nîmes, which celebrates bullfighting and the culture of southern France, and the Carcassonne Festival, which features music, theater, and other arts. |
||||
|
||||
## Natural Beauty |
||||
- **Mediterranean Coast**: The region boasts a stunning coastline along the Mediterranean Sea, with beautiful beaches like those in Cap d'Agde and the scenic Étang de Thau. |
||||
- **Languedoc-Roussillon Wine Route**: The Languedoc region is one of the largest wine-producing areas in France, and its wine route takes visitors through vineyards, picturesque villages, and wine estates. |
||||
- **Cévennes National Park**: This UNESCO-listed park is part of the Massif Central and offers stunning mountain landscapes, gorges, and wildlife, ideal for hiking and nature lovers. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Carcassonne**: A UNESCO World Heritage site, the medieval fortress of Carcassonne is one of France’s most iconic landmarks. The double-walled citadel offers a glimpse into the past with its preserved medieval architecture. |
||||
- **Pont du Gard**: A well-preserved Roman aqueduct, the Pont du Gard is a UNESCO World Heritage site and an engineering marvel of antiquity, offering scenic views of the surrounding landscape. |
||||
- **Nîmes**: Known as the "French Rome," Nîmes is home to remarkable Roman monuments, including the Arena of Nîmes (a Roman amphitheater), the Temple of Diana, and the Maison Carrée. |
||||
- **Sète**: A picturesque coastal town known for its canals, seafood, and vibrant cultural scene, Sète is often referred to as the "Venice of Languedoc." |
||||
- **Abbey of Saint-Guilhem-le-Désert**: This UNESCO World Heritage site is a well-preserved medieval abbey located in the stunning Hérault Valley. |
||||
|
||||
## Economy |
||||
- **Wine Production**: Languedoc is one of the largest wine-producing regions in France, known for producing a wide variety of wines, including reds, whites, and rosés. The region is famous for its *AOC* (Appellation d'Origine Contrôlée) wines, such as those from the Minervois, Faugères, and Corbières appellations. |
||||
- **Agriculture**: In addition to wine, Languedoc is known for producing fruits (particularly melons, peaches, and cherries), olives, and lavender. It is also a significant producer of sheep and goat cheese. |
||||
- **Tourism**: With its Mediterranean coastline, historic cities, and scenic landscapes, Languedoc is a popular tourist destination. The region’s vineyards and charming towns attract visitors for wine tourism, cultural exploration, and outdoor activities. |
||||
|
||||
## Climate |
||||
- **Mediterranean Climate**: Languedoc enjoys a Mediterranean climate, characterized by hot, dry summers and mild, wet winters. The region’s climate is perfect for vineyards and outdoor activities. |
||||
- **Average Temperatures**: Summer temperatures typically range from 25°C to 35°C (77°F to 95°F), while winters are mild, with temperatures ranging from 8°C to 15°C (46°F to 59°F). |
||||
|
||||
## Notable People |
||||
- **Georges Brassens**: The famous French singer-songwriter and poet was born in Sète, and his legacy is celebrated in the town with a museum and annual festivals. |
||||
- **Pierre-Paul Riquet**: The engineer who designed the Canal du Midi, which connects the Garonne River to the Mediterranean, greatly impacting the region’s agriculture and trade during the 17th century. |
||||
|
||||
## Conclusion |
||||
Languedoc is a region rich in history, culture, and natural beauty. From its Roman heritage and medieval fortresses to its beautiful beaches and vineyards, Languedoc offers a unique blend of ancient traditions and modern charm. Whether you’re enjoying a glass of wine, exploring historic towns, or relaxing by the sea, Languedoc provides an unforgettable experience for travelers. |
@ -0,0 +1,48 @@
|
||||
# Overview of Normandie Region in France |
||||
|
||||
Normandie (Normandy) is a historic and picturesque region located in the northern part of France. Known for its dramatic coastline, rich history, and cultural heritage, Normandy plays a central role in both French and world history. |
||||
|
||||
## Geography |
||||
- **Location**: Situated in the northernmost part of France, Normandy is bordered by the English Channel to the north, the regions of Île-de-France, Centre-Val de Loire, and Pays de la Loire to the south, and Brittany to the west. |
||||
- **Area**: Covers approximately 29,907 square kilometers. |
||||
- **Major Cities**: Rouen (capital), Caen, Le Havre, Cherbourg, and Dieppe. |
||||
|
||||
## History |
||||
- **Viking Heritage**: Normandy gets its name from the Norsemen (Vikings), who settled in the region in the 9th and 10th centuries. The region became known as "Normandy" after the Vikings (Normans) were granted land by the King of France. |
||||
- **William the Conqueror**: One of the most famous historical figures associated with Normandy is William the Conqueror, who, as Duke of Normandy, successfully invaded England in 1066 and became the King of England. |
||||
- **D-Day and WWII**: Normandy is internationally known for the D-Day landings on June 6, 1944, during World War II. The Allied invasion of Normandy was a pivotal event in the liberation of Western Europe from Nazi occupation. The beaches, such as Omaha Beach and Utah Beach, are significant historical sites. |
||||
|
||||
## Culture |
||||
- **Language**: The regional language of Normandy is Norman, a variety of the Old French language with influences from Old Norse. However, French is the primary language spoken today. |
||||
- **Cuisine**: Normandy cuisine is influenced by its coastal location, featuring seafood like oysters, mussels, and scallops. The region is also famous for its apples, which are used to make cider (cidre) and the famous apple brandy, Calvados. Dishes such as *coquilles Saint-Jacques* (scallops) and *camembert cheese* are iconic. |
||||
- **Folk Traditions**: The region is known for its folk traditions, including festivals, music, and dances that celebrate its Viking and maritime heritage. |
||||
|
||||
## Natural Beauty |
||||
- **Dramatic Coastline**: Normandy is known for its stunning coastline, including cliffs, sandy beaches, and small coves. The cliffs at Etretat are among the most photographed natural sites in France. |
||||
- **Normandy Beaches**: Famous for their historical significance, Normandy’s beaches are also a popular destination for travelers. The beaches of Omaha, Utah, and Juno were sites of the D-Day landings. |
||||
- **Countryside and Farming**: Normandy is also known for its green countryside, dotted with rolling hills, fields, and traditional farmhouses. The region's fertile land is perfect for the production of dairy products, apples, and crops. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Mont Saint-Michel**: A UNESCO World Heritage site, Mont Saint-Michel is one of France’s most iconic landmarks. This island commune features a medieval abbey perched atop a rocky hill, surrounded by tidal waters, creating a stunning visual. |
||||
- **D-Day Landing Beaches**: The beaches where the D-Day landings took place, such as Utah Beach, Omaha Beach, and Sword Beach, are significant historical sites and are home to several museums, memorials, and cemeteries dedicated to the soldiers who fought there. |
||||
- **Rouen Cathedral**: A masterpiece of Gothic architecture, the Rouen Cathedral is famous for its stunning facade and for being the subject of a series of paintings by Claude Monet. |
||||
- **Château de Caen**: Built by William the Conqueror in the 11th century, this castle in Caen is one of the largest medieval fortresses in Europe. |
||||
- **Jardin des Plantes de Rouen**: A botanical garden in Rouen that showcases a variety of plant species, it is a great place to explore nature and relax. |
||||
|
||||
## Economy |
||||
- **Agriculture**: Normandy is a major agricultural region, known for dairy farming, particularly the production of butter and cheese. The region is famous for its dairy products, with cheeses like Camembert, Livarot, and Pont-l’Évêque being integral to the local economy. |
||||
- **Cider Production**: Normandy is one of the primary cider-producing regions in France, with a long tradition of apple orchards. The region’s cider is often made from a variety of apples, resulting in dry, sweet, or sparkling ciders. |
||||
- **Fishing and Maritime**: The region’s location along the English Channel makes it a significant player in France’s fishing industry. Ports like Le Havre and Cherbourg are vital to the French maritime economy. |
||||
- **Tourism**: With its rich historical sites, picturesque countryside, and seaside attractions, Normandy is a popular tourist destination, drawing visitors to its beaches, memorials, and unique landmarks. |
||||
|
||||
## Climate |
||||
- **Oceanic Climate**: Normandy enjoys an oceanic climate, with mild winters and cool summers. The weather is influenced by the proximity to the English Channel, often resulting in cloudy, rainy days. |
||||
- **Average Temperatures**: Summers generally range from 18°C to 22°C (64°F to 72°F), while winters are mild, with temperatures ranging from 3°C to 7°C (37°F to 45°F). |
||||
|
||||
## Notable People |
||||
- **William the Conqueror**: Born in Falaise, Normandy, William the Conqueror is one of the most famous figures in history, known for his conquest of England in 1066. |
||||
- **Joan of Arc**: A national heroine of France, Joan of Arc was born in Domrémy, which was then part of Normandy, and played a significant role in the Hundred Years' War. |
||||
- **Gustave Flaubert**: The renowned French writer, best known for his novel *Madame Bovary*, was born in Rouen, Normandy. |
||||
|
||||
## Conclusion |
||||
Normandy is a region rich in history, culture, and natural beauty. From the stunning Mont Saint-Michel and the beaches of the D-Day landings to the pastoral landscapes and delicious cuisine, Normandy offers a mix of historical depth and natural charm. Whether exploring its historic towns, enjoying fresh seafood and cider, or paying tribute to its WWII heritage, Normandy provides a unique and unforgettable experience. |
@ -0,0 +1,48 @@
|
||||
# Overview of Poitou Region in France |
||||
|
||||
Poitou is a historic region located in the western part of France, known for its rich cultural heritage, beautiful landscapes, and historical significance. Today, it forms part of the Nouvelle-Aquitaine region, but it retains its unique identity through its history, architecture, and traditions. |
||||
|
||||
## Geography |
||||
- **Location**: Poitou is situated in the western part of France, bordered by the Atlantic Ocean to the west, the regions of Pays de la Loire to the north, Aquitaine to the south, and Centre-Val de Loire to the east. |
||||
- **Area**: Covers approximately 10,000 square kilometers. |
||||
- **Major Cities**: Poitiers (capital), La Rochelle, Niort, and Châtellerault. |
||||
|
||||
## History |
||||
- **Medieval Influence**: Poitou was an important region during the medieval period, especially known for its connection to the powerful counts of Poitou and the Dukes of Aquitaine. The region was also the birthplace of Eleanor of Aquitaine, one of the most influential women of the medieval period. |
||||
- **Anglo-French Conflict**: Poitou played a significant role during the Hundred Years' War, with both the English and the French vying for control of the region. It was once part of the Angevin Empire, which included large parts of modern-day France and England. |
||||
- **Renaissance and Religious Wars**: During the Renaissance, Poitou became a center for intellectual and cultural development. It also saw significant involvement in the Wars of Religion between Catholics and Protestants in the 16th century. |
||||
|
||||
## Culture |
||||
- **Language**: The traditional language of Poitou is Poitevin, a variety of the Occitan language, which was widely spoken in the region in medieval times. However, French is predominantly spoken today. |
||||
- **Cuisine**: Poitou cuisine is characterized by its use of fresh local ingredients, with specialties such as *mogettes* (white beans), *salmis* (a stew of game), and the region’s famous cheeses, including *Chabichou du Poitou*, a soft, creamy goat cheese. The region is also known for its seafood, particularly oysters from the Marennes-Oléron area. |
||||
- **Folk Traditions**: Poitou has a rich tradition of folk music and dance, with regional festivals celebrating the local culture. The region’s craft heritage, including pottery, woodwork, and textiles, continues to be celebrated. |
||||
|
||||
## Natural Beauty |
||||
- **Atlantic Coast**: Poitou has a beautiful coastline along the Atlantic Ocean, with scenic beaches and coastal landscapes. The island of Île de Ré, accessible by bridge from La Rochelle, is a popular destination for its charming villages, vineyards, and sandy beaches. |
||||
- **Marais Poitevin**: Also known as the “Green Venice,” the Marais Poitevin is a vast marshland and wetland area that is crisscrossed with canals. It is a paradise for nature lovers, offering opportunities for boating, birdwatching, and hiking. |
||||
- **Countryside**: The region also features gentle rolling hills, vineyards, and forests. The Poitou-Charentes region is known for its peaceful, rural landscapes, making it ideal for outdoor activities like cycling, hiking, and nature walks. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Poitiers**: The historic city of Poitiers is famous for its medieval architecture, including the Church of Saint-Hilaire-le-Grand, a UNESCO World Heritage site, and the Palais des Ducs d'Aquitaine, a former royal palace. |
||||
- **La Rochelle**: Known for its well-preserved Old Port, La Rochelle is a charming coastal town with a rich maritime history. The city's landmarks include the iconic La Rochelle Towers and the Maritime Museum. |
||||
- **Futuroscope**: Located near Poitiers, Futuroscope is one of France’s most popular theme parks, offering futuristic attractions, multimedia shows, and cutting-edge technology exhibitions. |
||||
- **Île de Ré**: This picturesque island is known for its beautiful beaches, historic lighthouses, and charming villages. It is a popular vacation spot for tourists seeking relaxation and outdoor activities. |
||||
- **Château de Niort**: This medieval fortress in Niort dates back to the 12th century and offers visitors a glimpse into the region’s medieval history. |
||||
|
||||
## Economy |
||||
- **Agriculture**: Poitou is traditionally an agricultural region, known for its livestock farming, particularly the production of Charolais cattle, as well as the cultivation of cereals, potatoes, and sunflowers. The region also produces a variety of fruits, including apples and grapes. |
||||
- **Wine Production**: The region is part of the larger wine-growing area of Charentes, which is famous for producing Cognac, a renowned brandy. The vineyards of the Charente and Charente-Maritime departments are integral to the local economy. |
||||
- **Tourism**: Poitou’s rich history, natural beauty, and charming cities attract many tourists. La Rochelle, Poitiers, and Île de Ré are major tourist destinations, while the Marais Poitevin and the coastal areas draw those interested in nature and outdoor activities. |
||||
- **Cognac Production**: Poitou is at the heart of the Cognac-producing region, with many distilleries located around the Charente River, where the famous spirit is made from grapes and aged for years in oak barrels. |
||||
|
||||
## Climate |
||||
- **Oceanic Climate**: Poitou enjoys an oceanic climate with mild winters and warm summers, influenced by the Atlantic Ocean. Coastal areas experience more moderate temperatures, while inland regions can have slightly warmer summers. |
||||
- **Average Temperatures**: Summer temperatures typically range from 18°C to 25°C (64°F to 77°F), while winters are generally mild, with temperatures ranging from 5°C to 10°C (41°F to 50°F). |
||||
|
||||
## Notable People |
||||
- **Eleanor of Aquitaine**: Born in Poitou, Eleanor was one of the most powerful and influential women in medieval Europe. She was Queen of France and later Queen of England and played a key role in the politics of both kingdoms. |
||||
- **François Rabelais**: The famous Renaissance writer, best known for his satirical work *Gargantua and Pantagruel*, was born in the Poitou region, and his works remain an important part of French literature. |
||||
- **René Descartes**: One of the most influential philosophers of the 17th century, Descartes spent much of his early life in Poitou, and his legacy continues to shape modern philosophy. |
||||
|
||||
## Conclusion |
||||
Poitou is a region rich in history, culture, and natural beauty. From its medieval towns and historic landmarks to its picturesque countryside and coastal beauty, Poitou offers a unique blend of traditions and modern attractions. Whether exploring the city of Poitiers, enjoying the fresh produce and local wine, or relaxing on the beaches of Île de Ré, Poitou provides an unforgettable experience for visitors. |
@ -0,0 +1,50 @@
|
||||
# Overview of Provence Region in France |
||||
|
||||
Provence is a stunning region in the southeastern part of France, renowned for its breathtaking landscapes, rich history, vibrant culture, and Mediterranean climate. It is one of the most beloved regions in France, known for its lavender fields, vineyards, ancient Roman ruins, and charming villages. |
||||
|
||||
## Geography |
||||
- **Location**: Provence is located in the southeastern part of France, bordered by the Mediterranean Sea to the south, the Rhône River to the west, the Alps to the north, and the region of Côte d'Azur to the east. |
||||
- **Area**: Covers approximately 31,400 square kilometers. |
||||
- **Major Cities**: Marseille (capital), Aix-en-Provence, Avignon, Arles, and Toulon. |
||||
|
||||
## History |
||||
- **Roman Heritage**: Provence has a rich Roman history, with the city of Arles serving as a significant Roman settlement. The region is home to some of the best-preserved Roman monuments in France, including the Arena of Nîmes and the Pont du Gard. |
||||
- **Medieval Influence**: Provence was part of the Kingdom of Arles in the Middle Ages and later became a major part of the Comtat Venaissin. It was also home to the Papacy for a time, with the popes residing in Avignon from 1309 to 1377. |
||||
- **Renaissance and Revolution**: Provence was a key region during the Renaissance, flourishing in the arts and culture. During the French Revolution, Provence played a significant role, with several uprisings and political changes. |
||||
|
||||
## Culture |
||||
- **Language**: The traditional language of Provence is Provençal, a variety of the Occitan language. While French is predominantly spoken today, Provençal still has cultural significance and is used in regional poetry, music, and literature. |
||||
- **Cuisine**: Provence is famous for its Mediterranean cuisine, emphasizing fresh vegetables, olive oil, herbs, seafood, and wine. Popular dishes include *bouillabaisse* (a fish stew), *ratatouille* (vegetable medley), *tapenade* (olive paste), and *pissaladière* (onion tart). |
||||
- **Wine**: The region is renowned for its wine production, particularly rosé wines from the Côtes de Provence, as well as reds and whites. The vineyards of Provence benefit from the Mediterranean climate, producing wines with distinctive flavors. |
||||
- **Folk Traditions**: Provence is known for its rich folk traditions, including festivals, music, dance, and crafts. The region celebrates a variety of traditional events, such as the Festival of the Calissons in Aix-en-Provence, and the Fête de la Lavande (Lavender Festival) in Sault. |
||||
|
||||
## Natural Beauty |
||||
- **Mediterranean Coast**: Provence boasts a beautiful coastline along the Mediterranean, with stunning beaches, rocky coves, and picturesque seaside towns such as Cassis, Sainte-Maxime, and Bandol. |
||||
- **Lavender Fields**: The lavender fields of Provence are one of the region's most iconic features. The fields bloom in vibrant purple hues during the summer months and are a major tourist attraction. |
||||
- **Alps and Vineyards**: To the north of Provence, the landscape rises into the Alps, offering spectacular mountain scenery, hiking, and skiing opportunities. The rolling hills and vineyards of the region produce some of the finest wines in France. |
||||
- **Gorges du Verdon**: Known as the "Grand Canyon of Europe," the Gorges du Verdon is a breathtaking river canyon with turquoise waters, cliffs, and stunning landscapes. It is a popular destination for outdoor activities like hiking, kayaking, and rock climbing. |
||||
|
||||
## Landmarks and Attractions |
||||
- **Palace of the Popes (Palais des Papes)**: Located in Avignon, this UNESCO World Heritage site is one of the largest and most important medieval Gothic buildings in Europe. It was the residence of popes during the 14th century. |
||||
- **Pont du Gard**: An ancient Roman aqueduct bridge located near Nîmes, the Pont du Gard is a UNESCO World Heritage site and an engineering marvel. |
||||
- **Roman Arena of Nîmes**: One of the best-preserved Roman amphitheaters, the Arena of Nîmes in Nîmes is still used for events today, including bullfights and concerts. |
||||
- **Château des Baux-de-Provence**: A ruined medieval castle perched atop the hills of Les Baux-de-Provence, offering panoramic views of the surrounding landscape. |
||||
- **Cassis and Calanques National Park**: The seaside town of Cassis is famous for its beautiful harbor and access to the Calanques National Park, a stunning area of limestone cliffs, turquoise waters, and hidden coves. |
||||
|
||||
## Economy |
||||
- **Agriculture**: Provence is known for its agricultural production, including the cultivation of olives, lavender, tomatoes, and herbs such as thyme and rosemary. Olive oil production is a key industry, and the region’s lavender fields are famous worldwide. |
||||
- **Wine Production**: Provence is one of the most important wine regions in France, especially known for its rosé wines. Vineyards are spread throughout the region, including areas like Côtes de Provence, Bandol, and Cassis. |
||||
- **Tourism**: Tourism is a major part of Provence's economy, with millions of visitors flocking to the region for its beaches, lavender fields, Roman ruins, and charming towns. The region’s Mediterranean climate and picturesque landscapes make it a year-round destination. |
||||
- **Crafts and Industry**: Provence is known for its artisanal crafts, such as pottery, textiles, and perfume making, particularly in the town of Grasse, which is renowned as the perfume capital of the world. |
||||
|
||||
## Climate |
||||
- **Mediterranean Climate**: Provence enjoys a Mediterranean climate, characterized by hot, dry summers and mild, wet winters. This climate is ideal for growing grapes, olives, and lavender, and contributes to the region’s appeal as a tourist destination. |
||||
- **Average Temperatures**: Summers are typically hot, with temperatures ranging from 25°C to 35°C (77°F to 95°F), while winters are mild, with temperatures ranging from 5°C to 15°C (41°F to 59°F). |
||||
|
||||
## Notable People |
||||
- **Paul Cézanne**: A famous Post-Impressionist painter, Cézanne was born in Aix-en-Provence and is closely associated with the landscapes of the region. His works, particularly those depicting the Mont Sainte-Victoire mountain, are iconic in the art world. |
||||
- **Marcel Pagnol**: A renowned writer, playwright, and filmmaker, Pagnol was born in Aubagne and is known for his works about Provençal life, including *Marius*, *Fanny*, and *César*, as well as his memoirs. |
||||
- **Vincent van Gogh**: The Dutch painter spent a year in the town of Saint-Rémy-de-Provence, where he produced some of his most famous works, including *Starry Night* and *Irises*. |
||||
|
||||
## Conclusion |
||||
Provence is a region that captivates with its stunning landscapes, rich history, and vibrant culture. From the lavender fields and Mediterranean beaches to the Roman ruins and charming villages, Provence offers something for everyone. Whether you're visiting for the cuisine, the wine, the history, or simply to relax in its beautiful surroundings, Provence is a timeless and unforgettable destination. |
@ -0,0 +1,235 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "fad6ee3f-45b8-4ac3-aa39-4a44dac91994", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Creating Text Embeddings From a Text File\n", |
||||
"- Loading data using TextLoader\n", |
||||
"- Splitting into chunks using CharacterTextSplitter\n", |
||||
"- Converting chunks into vector embeddings and creating a vectorstore\n", |
||||
"- Retreiving, reducing dimensions to 2D and displaying text embeddings" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "33b79f0d-7bd5-4e82-9295-2cc5cfa9495b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "391d12b3-ea25-4c66-93ba-71ef7c590be3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
||||
"from langchain.text_splitter import CharacterTextSplitter\n", |
||||
"from langchain.schema import Document\n", |
||||
"from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
||||
"from langchain.embeddings import HuggingFaceEmbeddings\n", |
||||
"from langchain_chroma import Chroma\n", |
||||
"import numpy as np\n", |
||||
"from sklearn.manifold import TSNE\n", |
||||
"import plotly.graph_objects as go" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "365d4346-bcf7-48b3-be13-b492f1877fab", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"db_name = \"my_vector_db\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "93887c1e-fb5e-4f9a-95f6-91a284e49695", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "86289eb8-25d8-405f-b1bb-3d9d9fed8671", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"loader = TextLoader(\"data.txt\", encoding=\"utf-8\")\n", |
||||
"data = loader.load()\n", |
||||
"\n", |
||||
"documents = []\n", |
||||
"for text in data:\n", |
||||
" documents.append(text)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "32320fff-2321-40ea-9b7d-294dc2dfba3a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"text_splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=5)\n", |
||||
"chunks = text_splitter.split_documents(documents)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fce762a5-4c78-4102-ab55-f95ee0c97286", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"len(chunks)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ddb5bc12-af30-476d-bbbb-f91a3ae8af2f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"embeddings = OpenAIEmbeddings()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "75ba81ec-9178-4ce4-83e2-82f937c85902", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"if os.path.exists(db_name):\n", |
||||
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c3ca2632-a8b3-4e7e-8370-d91579d31c23", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", |
||||
"print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0de67066-73f5-446f-9033-a00d45b0cdc1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get one vector and find how many dimensions it has\n", |
||||
"\n", |
||||
"collection = vectorstore._collection\n", |
||||
"sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0] # represents a single vector\n", |
||||
"dimensions = len(sample_embedding)\n", |
||||
"print(f\"The vectors have {dimensions:,} dimensions\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e50d972c-d740-4f0a-8bc2-e55ebe462a41", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"sample_embedding" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "aa96105d-b882-48d9-b088-6aab5db7b1e9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"result = collection.get(include=['embeddings','documents'])\n", |
||||
"vectors = np.array(result['embeddings']) \n", |
||||
"documents = result['documents']" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "213b4cf2-db0a-4610-8d8f-97607996ed17", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Reduce dimensionality to 2D using t-SNE\n", |
||||
"tsne = TSNE(n_components=2,perplexity=5, random_state=42)\n", |
||||
"reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
"# Create the 2D scatter plot\n", |
||||
"fig = go.Figure(data=[go.Scatter(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5, opacity=0.8),\n", |
||||
" text=[f\"Text: {d[:200]}...\" for d in documents],\n", |
||||
" hoverinfo='text'\n", |
||||
")])\n", |
||||
"\n", |
||||
"fig.update_layout(\n", |
||||
" title='2D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
||||
" width=800,\n", |
||||
" height=600,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
")\n", |
||||
"\n", |
||||
"fig.show()\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7d13aa60-da3e-4c61-af69-1ba9087e0181", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,283 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Import documents exported from Evernote to a vectorstore\n", |
||||
"### Use OpenAI file search with responses API\n", |
||||
"#### Prerequisite steps\n", |
||||
"* exported notes from your Evernote notebook as html \n", |
||||
"* converted the notes further to md-files and remove broken image links (use python/AI)\n", |
||||
"* the files are named with note titles\n", |
||||
"\n", |
||||
"Files are in one folder.\n", |
||||
"\n", |
||||
"\n", |
||||
"##### Query ChromaDB vectorstore\n", |
||||
"I tried to accomplish this task with RAG like the example by https://github.com/ed-donner/llm_engineering/commits?author=dinorrusso.\n", |
||||
"\n", |
||||
"I thought this to be a trivial task, but it was not 😃 That example uses Ollama running locally.\n", |
||||
"Even though the retriever had the information required, it was dropped from the answer.\n", |
||||
"\n", |
||||
"I tried then to use Chroma + OpenAI. After several attemps succeeded to create a vectorstore and query it. That's it for this time.\n", |
||||
"\n", |
||||
"##### Openai vectorstore, see bottom of the notebook\n", |
||||
"One attempt was to use OpenAI's fileSearch-tool which seemed pretty straightforward.\n", |
||||
"The con: loading files was not working always. Code is left though as reference." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Imports\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import gradio as gr\n", |
||||
"import openai\n", |
||||
"import chromadb\n", |
||||
"from chromadb.config import Settings\n", |
||||
"import os" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Load files to vectorstore" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", |
||||
"openai.api_key = os.environ['OPENAI_API_KEY']\n", |
||||
"\n", |
||||
"def chunk_text(text, max_tokens=2000):\n", |
||||
" words = text.split()\n", |
||||
" chunks = []\n", |
||||
" current_chunk = []\n", |
||||
" current_length = 0\n", |
||||
"\n", |
||||
" for word in words:\n", |
||||
" current_length += len(word) + 1 # +1 for the space\n", |
||||
" if current_length > max_tokens:\n", |
||||
" chunks.append(\" \".join(current_chunk))\n", |
||||
" current_chunk = [word]\n", |
||||
" current_length = len(word) + 1\n", |
||||
" else:\n", |
||||
" current_chunk.append(word)\n", |
||||
"\n", |
||||
" if current_chunk:\n", |
||||
" chunks.append(\" \".join(current_chunk))\n", |
||||
"\n", |
||||
" return chunks\n", |
||||
"\n", |
||||
"\n", |
||||
"# # Set up OpenAI API key\n", |
||||
"# openai.api_key = \"your_openai_api_key\" # Replace with your API key\n", |
||||
"chroma_client = chromadb.Client()\n", |
||||
"\n", |
||||
"# Create or get the existing collection\n", |
||||
"collection_name = \"EverNotes\"\n", |
||||
"\n", |
||||
"try:\n", |
||||
" existing_collection = chroma_client.get_collection(name=collection_name)\n", |
||||
" if existing_collection.count() > 0:\n", |
||||
" chroma_client.delete_collection(name=collection_name)\n", |
||||
"except:\n", |
||||
" print(f\"Collection {collection_name} does not exist. Creating a new one.\")\n", |
||||
"\n", |
||||
"# Create a collection in ChromaDB\n", |
||||
"collection = chroma_client.get_or_create_collection(name=collection_name)\n", |
||||
"\n", |
||||
"# Define your data\n", |
||||
"# it should be like this\n", |
||||
"# documents = [\"OpenAI is revolutionizing AI.\", \"ChromaDB makes embedding storage easy.\"]\n", |
||||
"# metadata = [{\"id\": 1}, {\"id\": 2}]\n", |
||||
"\n", |
||||
"folder_path = os.getenv('EVERNOTE_EXPORT')\n", |
||||
"documents = []\n", |
||||
"\n", |
||||
"for root, dirs, files in os.walk(folder_path):\n", |
||||
" for file in files:\n", |
||||
" if file.endswith('.md'): # Change this to the file extension you need\n", |
||||
" with open(os.path.join(root, file), 'r') as f:\n", |
||||
" documents.append(f.read())\n", |
||||
"\n", |
||||
"metadata = [{\"id\": i + 1} for i in range(len(documents))]\n", |
||||
"\n", |
||||
"# Generate embeddings using OpenAI\n", |
||||
"def get_embedding(text, model=\"text-embedding-ada-002\"):\n", |
||||
" response = openai.embeddings.create(input=text, model=model)\n", |
||||
" return response.data[0].embedding\n", |
||||
"\n", |
||||
"# Add documents and embeddings to ChromaDB in chunks\n", |
||||
"for doc, meta in zip(documents, metadata):\n", |
||||
" chunks = chunk_text(doc)\n", |
||||
" for chunk in chunks:\n", |
||||
" embedding = get_embedding(chunk)\n", |
||||
" collection.add(\n", |
||||
" documents=[chunk],\n", |
||||
" embeddings=[embedding],\n", |
||||
" metadatas=[meta],\n", |
||||
" ids=[str(meta[\"id\"])]\n", |
||||
" )\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Query ChromaDB" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# \n", |
||||
"query_text = \"Is there a video for Fitting the Shimano speed hub 7\"\n", |
||||
"query_embedding = get_embedding(query_text)\n", |
||||
"\n", |
||||
"results = collection.query(\n", |
||||
" query_embeddings=[query_embedding],\n", |
||||
" n_results=2\n", |
||||
")\n", |
||||
"\n", |
||||
"print(\"Query Results:\", results)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"##### Gradio interface" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Function to query ChromaDB\n", |
||||
"def query_chromadb(query_text):\n", |
||||
" query_embedding = get_embedding(query_text)\n", |
||||
" results = collection.query(\n", |
||||
" query_embeddings=[query_embedding],\n", |
||||
" n_results=2\n", |
||||
" )\n", |
||||
" return results\n", |
||||
"\n", |
||||
"# Gradio interface\n", |
||||
"def gradio_interface(query_text):\n", |
||||
" results = query_chromadb(query_text)\n", |
||||
" return results\n", |
||||
"\n", |
||||
"# Create Gradio app\n", |
||||
"iface = gr.Interface(\n", |
||||
" fn=gradio_interface,\n", |
||||
" inputs=\"text\",\n", |
||||
" outputs=\"text\",\n", |
||||
" title=\"ChromaDB Query Interface\",\n", |
||||
" description=\"Enter your query to search the ChromaDB collection.\"\n", |
||||
")\n", |
||||
"\n", |
||||
"iface.launch()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"#### Below OpenAI filesearch variant which had some failures in file uploads." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import glob\n", |
||||
"folder_path = os.environ['EVERNOTE_EXPORT'] \n", |
||||
"# Filter out other except .md-files\n", |
||||
"md_files = glob.glob(os.path.join(folder_path, '*.md'))\n", |
||||
"file_paths = [os.path.join(folder_path, file) for file in md_files]\n", |
||||
"file_streams = [open(path, 'rb') for path in file_paths]\n", |
||||
"\n", |
||||
"# Create vector store\n", |
||||
"vector_store = openai.vector_stores.create(\n", |
||||
" name=\"Evernote notes\",\n", |
||||
")\n", |
||||
"\n", |
||||
"# Batch Upload Limit: You can upload up to 100 files in a single batch\n", |
||||
"# https://community.openai.com/t/max-100-files-in-vector-store/729876/4\n", |
||||
"batch_size = 90\n", |
||||
"for i in range(0, len(file_streams), batch_size):\n", |
||||
" batch = file_streams[i:i + batch_size]\n", |
||||
" file_batch = openai.vector_stores.file_batches.upload_and_poll(\n", |
||||
" vector_store_id=vector_store.id,\n", |
||||
" files=batch\n", |
||||
" )\n", |
||||
" print(file_batch.status)\n", |
||||
" print(file_batch.file_counts)\n", |
||||
"\n", |
||||
"# There can be some fails in file counts:\n", |
||||
"# \"FileCounts(cancelled=0, completed=89, failed=1, in_progress=0, total=90)\"\"\n", |
||||
"# Usually 1 % fails. Did not find solution for improving that yet" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"\n", |
||||
"\n", |
||||
"response = openai.responses.create(\n", |
||||
" model=\"gpt-4o-mini\",\n", |
||||
" input=\"Is there a video for Fitting the Shimano speed hub 7?\",\n", |
||||
" tools=[{\n", |
||||
" \"type\": \"file_search\",\n", |
||||
" \"vector_store_ids\": [vector_store.id]\n", |
||||
" }],\n", |
||||
" include=None\n", |
||||
")\n", |
||||
"print(response)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": ".venv", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 2 |
||||
} |
@ -0,0 +1,359 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "c25c6e94-f3de-4367-b2bf-269ba7160977", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## An Expert Knowledge Worker Question-Answering Agent using RAG" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "15169580-cf11-4dee-8ec7-3a4ef59b19ee", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Aims\n", |
||||
"- Reads README.md files and loads data using TextLoader\n", |
||||
"- Splits into chunks using CharacterTextSplitter\n", |
||||
"- Converts chunks into vector embeddings and creates a datastore\n", |
||||
"- 2D and 3D visualisations\n", |
||||
"- Langchain to set up a conversation retrieval chain" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "051cf881-357d-406b-8eae-1610651e40f1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import glob\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ccfd403a-5bdb-4a8c-b3fd-d47ae79e43f7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports for langchain, plotly and Chroma\n", |
||||
"\n", |
||||
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
||||
"from langchain.text_splitter import CharacterTextSplitter\n", |
||||
"from langchain.schema import Document\n", |
||||
"from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
||||
"from langchain.embeddings import HuggingFaceEmbeddings\n", |
||||
"from langchain_chroma import Chroma\n", |
||||
"from langchain.memory import ConversationBufferMemory\n", |
||||
"from langchain.chains import ConversationalRetrievalChain\n", |
||||
"import numpy as np\n", |
||||
"from sklearn.manifold import TSNE\n", |
||||
"import plotly.graph_objects as go\n", |
||||
"import plotly.express as px\n", |
||||
"import matplotlib.pyplot as plt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2d853868-d2f6-43e1-b27c-b8e91d06b724", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"db_name = \"vector_db\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f152fc3b-0bf4-4d51-948f-95da1ebc030a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "24e621ac-df06-4af6-a60d-a9ed7adb884a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Read in documents using LangChain's loaders\n", |
||||
"\n", |
||||
"folder = \"my-knowledge-base/\"\n", |
||||
"text_loader_kwargs={'autodetect_encoding': True}\n", |
||||
"\n", |
||||
"loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", |
||||
"folder_docs = loader.load()\n", |
||||
"\n", |
||||
"for doc in folder_docs:\n", |
||||
" filename_md = os.path.basename(doc.metadata[\"source\"]) \n", |
||||
" filename, _ = os.path.splitext(filename_md) \n", |
||||
" doc.metadata[\"filename\"] = filename\n", |
||||
"\n", |
||||
"documents = folder_docs \n", |
||||
"\n", |
||||
"text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=200)\n", |
||||
"chunks = text_splitter.split_documents(documents)\n", |
||||
"\n", |
||||
"print(f\"Total number of chunks: {len(chunks)}\")\n", |
||||
"print(f\"Files found: {set(doc.metadata['filename'] for doc in documents)}\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f02f08ee-5ade-4f79-a500-045a8f1a532f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n", |
||||
"\n", |
||||
"embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", |
||||
"\n", |
||||
"# Delete if already exists\n", |
||||
"\n", |
||||
"if os.path.exists(db_name):\n", |
||||
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", |
||||
"\n", |
||||
"# Create vectorstore\n", |
||||
"\n", |
||||
"vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", |
||||
"print(f\"Vectorstore created with {vectorstore._collection.count()} documents\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7f665f4d-ccb1-43fb-b901-040117925732", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Let's investigate the vectors\n", |
||||
"\n", |
||||
"collection = vectorstore._collection\n", |
||||
"count = collection.count()\n", |
||||
"\n", |
||||
"sample_embedding = collection.get(limit=1, include=[\"embeddings\"])[\"embeddings\"][0]\n", |
||||
"dimensions = len(sample_embedding)\n", |
||||
"print(f\"There are {count:,} vectors with {dimensions:,} dimensions in the vector store\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6208a971-e8b7-48bc-be7a-6dcb82967fd2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# pre work\n", |
||||
"\n", |
||||
"result = collection.get(include=['embeddings','documents','metadatas'])\n", |
||||
"vectors = np.array(result['embeddings']) \n", |
||||
"documents = result['documents']\n", |
||||
"metadatas = result['metadatas']\n", |
||||
"filenames = [metadata['filename'] for metadata in metadatas]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "eb27bc8a-453b-4b19-84b4-dc495bb0e544", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import random\n", |
||||
"def random_color():\n", |
||||
" return f\"rgb({random.randint(0,255)},{random.randint(0,255)},{random.randint(0,255)})\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "78db67e5-ef10-4581-b8ac-3e0281ceba45", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def show_embeddings_2d(result):\n", |
||||
" vectors = np.array(result['embeddings']) \n", |
||||
" documents = result['documents']\n", |
||||
" metadatas = result['metadatas']\n", |
||||
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||
" filenames_unique = sorted(set(filenames))\n", |
||||
"\n", |
||||
" # color assignment\n", |
||||
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||
" colors = [color_map[name] for name in filenames]\n", |
||||
"\n", |
||||
" tsne = TSNE(n_components=2, random_state=42,perplexity=4)\n", |
||||
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
" # Create the 2D scatter plot\n", |
||||
" fig = go.Figure(data=[go.Scatter(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5,color=colors, opacity=0.8),\n", |
||||
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||
" hoverinfo='text'\n", |
||||
" )])\n", |
||||
"\n", |
||||
" fig.update_layout(\n", |
||||
" title='2D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
||||
" width=800,\n", |
||||
" height=600,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
" )\n", |
||||
"\n", |
||||
" fig.show()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2c250166-cb5b-4a75-8981-fae2d6dfe509", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"show_embeddings_2d(result)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3b290e38-0800-4453-b664-7a7622ff5ed2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def show_embeddings_3d(result):\n", |
||||
" vectors = np.array(result['embeddings']) \n", |
||||
" documents = result['documents']\n", |
||||
" metadatas = result['metadatas']\n", |
||||
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||
" filenames_unique = sorted(set(filenames))\n", |
||||
"\n", |
||||
" # color assignment\n", |
||||
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||
" colors = [color_map[name] for name in filenames]\n", |
||||
"\n", |
||||
" tsne = TSNE(n_components=3, random_state=42)\n", |
||||
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
" fig = go.Figure(data=[go.Scatter3d(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" z=reduced_vectors[:, 2],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
||||
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||
" hoverinfo='text'\n", |
||||
" )])\n", |
||||
"\n", |
||||
" fig.update_layout(\n", |
||||
" title='3D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", |
||||
" width=900,\n", |
||||
" height=700,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
" )\n", |
||||
"\n", |
||||
" fig.show()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "45d1d034-2503-4176-b1e4-f248e31c4770", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"show_embeddings_3d(result)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e79946a1-f93a-4b3a-8d19-deef40dec223", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# create a new Chat with OpenAI\n", |
||||
"llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", |
||||
"\n", |
||||
"# set up the conversation memory for the chat\n", |
||||
"memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
||||
"\n", |
||||
"# the retriever is an abstraction over the VectorStore that will be used during RAG\n", |
||||
"retriever = vectorstore.as_retriever(search_kwargs={\"k\": 50})\n", |
||||
"\n", |
||||
"# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n", |
||||
"conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "59f90c85-c113-4482-8574-8a728ef25459", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(question, history):\n", |
||||
" result = conversation_chain.invoke({\"question\": question})\n", |
||||
" return result[\"answer\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0520a8ff-01a4-4fa6-9dc8-57da87272edc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"view = gr.ChatInterface(chat, type=\"messages\").launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b4949b17-cd9c-4bff-bd5b-0f80df72e7dc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,353 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "d13be0fd-db15-4ab1-860a-b00257051339", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Gradio UI for Markdown-Based Q&A with Visualization" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "bc63fbdb-66a9-4c10-8dbd-11476b5e2d21", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"This interface enables users to:\n", |
||||
"- Upload Markdown files for processing\n", |
||||
"- Visualize similarity between document chunks in 2D and 3D using embeddings\n", |
||||
"- Ask questions and receive RAG enabled responses\n", |
||||
"- Mantain conversation context for better question answering\n", |
||||
"- Clear chat history when required for fresh sessions\n", |
||||
"- Store and retrieve embeddings using ChromaDB\n", |
||||
"\n", |
||||
"Integrates LangChain, ChromaDB, and OpenAI to process, store, and retrieve information efficiently." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "91da28d8-8e29-44b7-a62a-a3a109753727", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e47f670a-e2cb-4700-95d0-e59e440677a1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports for langchain, plotly and Chroma\n", |
||||
"\n", |
||||
"from langchain.document_loaders import DirectoryLoader, TextLoader\n", |
||||
"from langchain.text_splitter import CharacterTextSplitter\n", |
||||
"from langchain.schema import Document\n", |
||||
"from langchain_openai import OpenAIEmbeddings, ChatOpenAI\n", |
||||
"from langchain.embeddings import HuggingFaceEmbeddings\n", |
||||
"from langchain_chroma import Chroma\n", |
||||
"from langchain.memory import ConversationBufferMemory\n", |
||||
"from langchain.chains import ConversationalRetrievalChain\n", |
||||
"import numpy as np\n", |
||||
"from sklearn.manifold import TSNE\n", |
||||
"import plotly.graph_objects as go\n", |
||||
"import plotly.express as px\n", |
||||
"import matplotlib.pyplot as plt\n", |
||||
"from random import randint\n", |
||||
"import shutil" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "362d4976-2553-4ed8-8fbb-49806145cad1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"!pip install --upgrade gradio" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "968b6e96-557e-439f-b2f1-942c05168641", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"MODEL = \"gpt-4o-mini\"\n", |
||||
"db_name = \"vector_db\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "537f66de-6abf-4b34-8e05-6b9a9df8ae82", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Load environment variables in a file called .env\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "246c1c1b-fcfa-4f4c-b99c-024598751361", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"folder = \"my-knowledge-base/\"\n", |
||||
"db_name = \"vectorstore_db\"\n", |
||||
"\n", |
||||
"def process_files(files):\n", |
||||
" os.makedirs(folder, exist_ok=True)\n", |
||||
"\n", |
||||
" processed_files = []\n", |
||||
" for file in files:\n", |
||||
" file_path = os.path.join(folder, os.path.basename(file)) # Get filename\n", |
||||
" shutil.copy(file, file_path)\n", |
||||
" processed_files.append(os.path.basename(file))\n", |
||||
"\n", |
||||
" # Load documents using LangChain's DirectoryLoader\n", |
||||
" text_loader_kwargs = {'autodetect_encoding': True}\n", |
||||
" loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", |
||||
" folder_docs = loader.load()\n", |
||||
"\n", |
||||
" # Assign filenames as metadata\n", |
||||
" for doc in folder_docs:\n", |
||||
" filename_md = os.path.basename(doc.metadata[\"source\"])\n", |
||||
" filename, _ = os.path.splitext(filename_md)\n", |
||||
" doc.metadata[\"filename\"] = filename\n", |
||||
"\n", |
||||
" documents = folder_docs \n", |
||||
"\n", |
||||
" # Split documents into chunks\n", |
||||
" text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=200)\n", |
||||
" chunks = text_splitter.split_documents(documents)\n", |
||||
"\n", |
||||
" # Initialize embeddings\n", |
||||
" embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", |
||||
"\n", |
||||
" # Delete previous vectorstore\n", |
||||
" if os.path.exists(db_name):\n", |
||||
" Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection()\n", |
||||
"\n", |
||||
" # Store in ChromaDB\n", |
||||
" vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name)\n", |
||||
"\n", |
||||
" # Retrieve results\n", |
||||
" collection = vectorstore._collection\n", |
||||
" result = collection.get(include=['embeddings', 'documents', 'metadatas'])\n", |
||||
"\n", |
||||
" llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", |
||||
" memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", |
||||
" retriever = vectorstore.as_retriever(search_kwargs={\"k\": 35})\n", |
||||
" global conversation_chain\n", |
||||
" conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)\n", |
||||
"\n", |
||||
" processed_text = \"**Processed Files:**\\n\\n\" + \"\\n\".join(f\"- {file}\" for file in processed_files)\n", |
||||
" return result, processed_text" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "48678d3a-0ab2-4aa4-aa9e-4160c6a9cb24", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def random_color():\n", |
||||
" return f\"rgb({randint(0,255)},{randint(0,255)},{randint(0,255)})\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6caed889-9bb4-42ad-b1c2-da051aefc802", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def show_embeddings_2d(result):\n", |
||||
" vectors = np.array(result['embeddings']) \n", |
||||
" documents = result['documents']\n", |
||||
" metadatas = result['metadatas']\n", |
||||
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||
" filenames_unique = sorted(set(filenames))\n", |
||||
"\n", |
||||
" # color assignment\n", |
||||
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||
" colors = [color_map[name] for name in filenames]\n", |
||||
"\n", |
||||
" tsne = TSNE(n_components=2, random_state=42,perplexity=4)\n", |
||||
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
" # Create the 2D scatter plot\n", |
||||
" fig = go.Figure(data=[go.Scatter(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5,color=colors, opacity=0.8),\n", |
||||
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||
" hoverinfo='text'\n", |
||||
" )])\n", |
||||
"\n", |
||||
" fig.update_layout(\n", |
||||
" title='2D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x',yaxis_title='y'),\n", |
||||
" width=800,\n", |
||||
" height=600,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
" )\n", |
||||
"\n", |
||||
" return fig" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "de993495-c8cd-4313-a6bb-7d27494ecc13", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def show_embeddings_3d(result):\n", |
||||
" vectors = np.array(result['embeddings']) \n", |
||||
" documents = result['documents']\n", |
||||
" metadatas = result['metadatas']\n", |
||||
" filenames = [metadata['filename'] for metadata in metadatas]\n", |
||||
" filenames_unique = sorted(set(filenames))\n", |
||||
"\n", |
||||
" # color assignment\n", |
||||
" color_map = {name: random_color() for name in filenames_unique}\n", |
||||
" colors = [color_map[name] for name in filenames]\n", |
||||
"\n", |
||||
" tsne = TSNE(n_components=3, random_state=42)\n", |
||||
" reduced_vectors = tsne.fit_transform(vectors)\n", |
||||
"\n", |
||||
" fig = go.Figure(data=[go.Scatter3d(\n", |
||||
" x=reduced_vectors[:, 0],\n", |
||||
" y=reduced_vectors[:, 1],\n", |
||||
" z=reduced_vectors[:, 2],\n", |
||||
" mode='markers',\n", |
||||
" marker=dict(size=5, color=colors, opacity=0.8),\n", |
||||
" text=[f\"Type: {t}<br>Text: {d[:100]}...\" for t, d in zip(filenames, documents)],\n", |
||||
" hoverinfo='text'\n", |
||||
" )])\n", |
||||
"\n", |
||||
" fig.update_layout(\n", |
||||
" title='3D Chroma Vector Store Visualization',\n", |
||||
" scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'),\n", |
||||
" width=900,\n", |
||||
" height=700,\n", |
||||
" margin=dict(r=20, b=10, l=10, t=40)\n", |
||||
" )\n", |
||||
"\n", |
||||
" return fig" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7b7bf62b-c559-4e97-8135-48cd8d97a40e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(question, history):\n", |
||||
" result = conversation_chain.invoke({\"question\": question})\n", |
||||
" return result[\"answer\"]\n", |
||||
"\n", |
||||
"def visualise_data(result):\n", |
||||
" fig_2d = show_embeddings_2d(result)\n", |
||||
" fig_3d = show_embeddings_3d(result)\n", |
||||
" return fig_2d,fig_3d" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "99217109-fbee-4269-81c7-001e6f768a72", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"css = \"\"\"\n", |
||||
".btn {background-color: #1d53d1;}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e1429ea1-1d9f-4be6-b270-01997864c642", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"with gr.Blocks(css=css) as ui:\n", |
||||
" gr.Markdown(\"# Markdown-Based Q&A with Visualization\")\n", |
||||
" with gr.Row():\n", |
||||
" file_input = gr.Files(file_types=[\".md\"], label=\"Upload Markdown Files\")\n", |
||||
" with gr.Column(scale=1):\n", |
||||
" processed_output = gr.Markdown(\"Progress\")\n", |
||||
" with gr.Row():\n", |
||||
" process_btn = gr.Button(\"Process Files\",elem_classes=[\"btn\"])\n", |
||||
" with gr.Row():\n", |
||||
" question = gr.Textbox(label=\"Chat \", lines=10)\n", |
||||
" answer = gr.Markdown(label= \"Response\")\n", |
||||
" with gr.Row():\n", |
||||
" question_btn = gr.Button(\"Ask a Question\",elem_classes=[\"btn\"])\n", |
||||
" clear_btn = gr.Button(\"Clear Output\",elem_classes=[\"btn\"])\n", |
||||
" with gr.Row():\n", |
||||
" plot_2d = gr.Plot(label=\"2D Visualization\")\n", |
||||
" plot_3d = gr.Plot(label=\"3D Visualization\")\n", |
||||
" with gr.Row():\n", |
||||
" visualise_btn = gr.Button(\"Visualise Data\",elem_classes=[\"btn\"])\n", |
||||
"\n", |
||||
" result = gr.State([])\n", |
||||
" # Action: When button is clicked, process files and update visualization\n", |
||||
" clear_btn.click(fn=lambda:(\"\", \"\"), inputs=[],outputs=[question, answer])\n", |
||||
" process_btn.click(process_files, inputs=[file_input], outputs=[result,processed_output])\n", |
||||
" question_btn.click(chat, inputs=[question], outputs= [answer])\n", |
||||
" visualise_btn.click(visualise_data, inputs=[result], outputs=[plot_2d,plot_3d])\n", |
||||
"\n", |
||||
"# Launch Gradio app\n", |
||||
"ui.launch(inbrowser=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d3686048-ac29-4df1-b816-e58996913ef1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,636 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "28a0673e-96b5-43f2-8a8b-bd033bf851b0", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Add a Validation Set\n", |
||||
"\n", |
||||
"In the lecture, we created a curated dataset with **400,000 training items** and **2,000 test items**, but we did not include a validation (dev) set. This notebook demonstrates how to take Ed Donner’s dataset, [ed-donner/pricer-data](https://huggingface.co/datasets/ed-donner/pricer-data), and add a dev set to it.\n", |
||||
"\n", |
||||
"> **Note**: This notebook heavily uses snippets from the lectures’ `day2.ipynb` of Week 6.\n", |
||||
"\n", |
||||
"**Download the Updated Dataset**: \n", |
||||
"You can find the resulting dataset here: [antonawinkler/pricer-data](https://huggingface.co/datasets/antonawinkler/pricer-data)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "67cedf85-8125-4322-998e-9375fe745597", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"\n", |
||||
"# Standard libraries\n", |
||||
"import os\n", |
||||
"import random\n", |
||||
"from itertools import chain\n", |
||||
"from collections import Counter, defaultdict\n", |
||||
"\n", |
||||
"# Third-party libraries\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from huggingface_hub import login\n", |
||||
"from datasets import concatenate_datasets, load_dataset, Dataset, DatasetDict\n", |
||||
"import matplotlib.pyplot as plt\n", |
||||
"import numpy as np\n", |
||||
"\n", |
||||
"# Local modules\n", |
||||
"from items import Item\n", |
||||
"from loaders import ItemLoader\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7390a6aa-79cb-4dea-b6d7-de7e4b13e472", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# environment\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0732274a-aa6a-44fc-aee2-40dc8a8e4451", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Log in to HuggingFace\n", |
||||
"\n", |
||||
"hf_token = os.environ['HF_TOKEN']\n", |
||||
"login(hf_token, add_to_git_credential=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1adcf323-de9d-4c24-a9c3-d7ae554d06ca", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"%matplotlib inline" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "e2b6dc50-ac5c-4cf2-af2e-968ed8ef86d7", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Load the Original Dataset\n", |
||||
"\n", |
||||
"Load the original data from McAuley-Lab/Amazon-Reviews-2023." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d1d06cd3-f3c2-44f0-a9f2-13b54ff8be5c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"dataset_names = [\n", |
||||
" \"Automotive\",\n", |
||||
" \"Electronics\",\n", |
||||
" \"Office_Products\",\n", |
||||
" \"Tools_and_Home_Improvement\",\n", |
||||
" \"Cell_Phones_and_Accessories\",\n", |
||||
" \"Toys_and_Games\",\n", |
||||
" \"Appliances\",\n", |
||||
" \"Musical_Instruments\",\n", |
||||
"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "aa8fd0f0-509a-4298-8fcc-e499a061e1be", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"items = []\n", |
||||
"for dataset_name in dataset_names:\n", |
||||
" loader = ItemLoader(dataset_name)\n", |
||||
" items.extend(loader.load())" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "bf6b6b66-4a4b-41c2-b366-1f598cf18351", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# Create Balanced Dataset\n", |
||||
"\n", |
||||
"We apply the balancing algorithm from the course." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "549a4bad-abe7-4d36-ad77-fc70ba0f151c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"slots = defaultdict(list)\n", |
||||
"for item in items:\n", |
||||
" slots[round(item.price)].append(item)\n", |
||||
"\n", |
||||
"np.random.seed(42)\n", |
||||
"random.seed(42)\n", |
||||
"sample = []\n", |
||||
"for i in range(1, 1000):\n", |
||||
" slot = slots[i]\n", |
||||
" if i>=240:\n", |
||||
" sample.extend(slot)\n", |
||||
" elif len(slot) <= 1200:\n", |
||||
" sample.extend(slot)\n", |
||||
" else:\n", |
||||
" weights = np.array([1 if item.category=='Automotive' else 5 for item in slot])\n", |
||||
" weights = weights / np.sum(weights)\n", |
||||
" selected_indices = np.random.choice(len(slot), size=1200, replace=False, p=weights)\n", |
||||
" selected = [slot[i] for i in selected_indices]\n", |
||||
" sample.extend(selected)\n", |
||||
"\n", |
||||
"print(f\"There are {len(sample):,} items in the sample\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "04280d2b-210a-4fad-9163-1b32a87fb990", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"The output I get is `There are 408,635 items in the sample`\n", |
||||
"\n", |
||||
"Since there are 400,000 items in the train set of ed-donner/pricer-data, we can aim for a 98/1/1 split." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "0d1e2836-0cae-4496-a5d4-d80bc14d566b", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Load Ed Donner's Pricer Data Set" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a84e5a71-fc44-4cdf-9bc2-c69f80b8ee94", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"dataset_ori = load_dataset(\"ed-donner/pricer-data\")\n", |
||||
"train_ori = dataset_ori['train']\n", |
||||
"test_ori = dataset_ori['test']" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "e9c5c877-3d30-4013-9d0f-1e490755afeb", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Observation 1: Order of the Data Has Changed\n", |
||||
"\n", |
||||
"`dataset_without_devset` should be a subset of `sample`. The order however can be different. Let us check this.\n", |
||||
"\n", |
||||
"I see different results for the following two cells below, indicating that the order has changed." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "56ad8682-4d7f-4aad-9976-96eb6d9b4a5a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"sample[0].prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3e29a5ab-ca61-41cc-9b33-22d374681b85", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"train_ori[0]['text']" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "469a5b3c-c1a2-461d-a88d-27aa08905b31", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Observation 2: Duplicate Items\n", |
||||
"\n", |
||||
"As an further challenge, the dataset shows duplicates with identical scrubbed descriptions. For some of these duplicates the prices are identical too (I see 1774), for others they differ (I see 6747).\n", |
||||
"\n", |
||||
"> **Note**: Below we use `defaultdict(list)` instead of `set` because it allows to inspect duplicates easily." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "94adffe8-edf6-4503-9f8f-34e4dfd29da9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"PRICE_IS = \"\\n\\nPrice is $\"\n", |
||||
"def get_key(text, price):\n", |
||||
" prefix, price_is, _price_nearest_dollar = text.partition(PRICE_IS)\n", |
||||
" return f\"{prefix}{price_is}{price}\"\n", |
||||
"def get_key_without_price(text):\n", |
||||
" prefix, price_is, _price_nearest_dollar = text.partition(PRICE_IS)\n", |
||||
" return f\"{prefix}\"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a015ba1b-69e0-4651-850f-d93d3f078d16", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Identify duplicates by text+price\n", |
||||
"train_ori_dict = defaultdict(list)\n", |
||||
"for datapoint in train_ori:\n", |
||||
" # Creates a key from the text and price (scrubbed)\n", |
||||
" key = get_key(datapoint[\"text\"], datapoint[\"price\"])\n", |
||||
" train_ori_dict[key].append(datapoint)\n", |
||||
"\n", |
||||
"# Number of exact duplicates (same text AND same price)\n", |
||||
"exact_duplicates = len(train_ori) - len(train_ori_dict)\n", |
||||
"print(f\"There are {exact_duplicates} duplicates with the same description and price.\")\n", |
||||
"\n", |
||||
"# Identify duplicates by text alone (ignoring price)\n", |
||||
"train_ori_dict_no_price = defaultdict(list)\n", |
||||
"for datapoint in train_ori:\n", |
||||
" key_no_price = get_key_without_price(datapoint[\"text\"])\n", |
||||
" train_ori_dict_no_price[key_no_price].append(datapoint)\n", |
||||
"\n", |
||||
"# Number of duplicates that differ in price but share the same text\n", |
||||
"different_price_duplicates = len(train_ori_dict) - len(train_ori_dict_no_price)\n", |
||||
"print(f\"In addition, there are {different_price_duplicates} data points where the description is duplicated but the price is different.\")\n", |
||||
"\n", |
||||
"# Total number of duplicates if we consider text alone\n", |
||||
"overall_duplicates = len(train_ori) - len(train_ori_dict_no_price)\n", |
||||
"print(f\"Overall number of duplicates: {overall_duplicates}\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e577dd8b-be0f-4ab0-b45f-9d3459b1286a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"test_ori_dict = defaultdict(list)\n", |
||||
"for datapoint in test_ori:\n", |
||||
" key = get_key(datapoint['text'], datapoint['price'])\n", |
||||
" test_ori_dict[key].append(datapoint)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0198fc23-0825-4ce1-a961-1d390d86cbdc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"sample_dict = defaultdict(list)\n", |
||||
"for datapoint in sample:\n", |
||||
" key = get_key(datapoint.prompt, datapoint.price)\n", |
||||
" sample_dict[key].append(datapoint)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "37f24d22-51ef-472b-8c73-e969637fa925", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Check if all data points in train_ori/test_ori are included in the new sample_dict.\n", |
||||
"missing = []\n", |
||||
"count_found = 0\n", |
||||
"\n", |
||||
"for datapoint in chain(train_ori, test_ori):\n", |
||||
" key = get_key(datapoint[\"text\"], datapoint[\"price\"])\n", |
||||
" if key not in sample_dict:\n", |
||||
" missing.append(datapoint)\n", |
||||
" else:\n", |
||||
" count_found += 1\n", |
||||
"\n", |
||||
"print(f\"We found {count_found} datapoints in sample_dict.\")\n", |
||||
"print(f\"We are missing {len(missing)} datapoints that are not present in sample_dict.\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "60c9d186-c688-4559-9b51-f0045d16829b", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Expected output of the previous cell\n", |
||||
"```\n", |
||||
"We found 402000 datapoints in sample_dict.\n", |
||||
"We are missing 0 datapoints that are not present in sample_dict.\n", |
||||
"```" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "3b05e22d-a755-4ee5-a18b-620f7ab1df8f", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Add Data Points to the Test and Validation Sets\n", |
||||
"\n", |
||||
"Since we can match all data points in the original train and test sets from `ed-donner/pricer-data`, we’ll now incorporate any *unused* items from our balanced sample into the test set and create a new validation (dev) set. Our goal is to achieve a **98/1/1** split for train, validation, and test." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "16638cf9-03c3-46bc-8116-cafdd9e23ac9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"sample_not_used_yet = [datapoint for key in sample_dict.keys() - train_ori_dict.keys() - test_ori_dict.keys() for datapoint in sample_dict[key]]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "58a593ad-29a1-4b35-9753-45db75e09666", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# As a santity check, let us visually verify that the distribution of sample_still_available is in line with the complete sample.\n", |
||||
"\n", |
||||
"# Plot the distribution of prices in sample\n", |
||||
"def plot_price_distribution(items, name):\n", |
||||
" prices = [float(item.price) for item in items]\n", |
||||
" plt.figure(figsize=(15, 10))\n", |
||||
" plt.title(f\"{name} - Avg {sum(prices)/len(prices):.2f} and highest {max(prices):,.2f}\\n\")\n", |
||||
" plt.xlabel('Price ($)')\n", |
||||
" plt.ylabel('Count')\n", |
||||
" # see https://stackoverflow.com/questions/57026223/how-to-re-scale-the-counts-in-a-matplotlib-histogram\n", |
||||
" (counts, bins) = np.histogram(prices, bins=range(0, 1000, 10))\n", |
||||
" plt.hist(bins[:-1], color=\"darkblue\", bins=bins, weights=counts/len(prices))\n", |
||||
" plt.show() \n", |
||||
"\n", |
||||
"\n", |
||||
"def plot_category_distribution(items, name):\n", |
||||
" category_counts = Counter()\n", |
||||
" for item in items:\n", |
||||
" category_counts[item.category]+=1\n", |
||||
" categories = sorted(category_counts.keys())\n", |
||||
" counts = [category_counts[category] for category in categories]\n", |
||||
"\n", |
||||
" # plot a pie chart\n", |
||||
" plt.figure(figsize=(12, 10))\n", |
||||
" plt.pie(counts, labels=categories, autopct='%1.0f%%', startangle=90)\n", |
||||
" \n", |
||||
" # Add a circle at the center to create a donut chart (optional)\n", |
||||
" centre_circle = plt.Circle((0,0), 0.70, fc='white')\n", |
||||
" fig = plt.gcf()\n", |
||||
" fig.gca().add_artist(centre_circle)\n", |
||||
" plt.title(f'{name} - Categories')\n", |
||||
" \n", |
||||
" # Equal aspect ratio ensures that pie is drawn as a circle\n", |
||||
" plt.axis('equal') \n", |
||||
"\n", |
||||
" plt.show()\n", |
||||
"plot_price_distribution(sample, 'Complete set')\n", |
||||
"plot_price_distribution(sample_not_used_yet, 'Not used yet')\n", |
||||
"plot_category_distribution(sample, 'Complete set')\n", |
||||
"plot_category_distribution(sample_not_used_yet, 'Not used yet')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ba252265-b976-426a-aefc-ebc93b153fd4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# now add the unused items to the validation and test set\n", |
||||
"random.seed(42)\n", |
||||
"random.shuffle(sample_not_used_yet)\n", |
||||
"validation_items = sample_not_used_yet[:4000]\n", |
||||
"added_test_items = sample_not_used_yet[4000:]\n", |
||||
"\n", |
||||
"# create Huggingface dataset\n", |
||||
"validation_dataset = Dataset.from_dict({\"text\": [item.prompt for item in validation_items], \"price\": [item.price for item in validation_items]})\n", |
||||
"added_test_dataset = Dataset.from_dict({\"text\": [item.prompt for item in added_test_items], \"price\": [item.price for item in added_test_items]})\n", |
||||
"\n", |
||||
"dataset = DatasetDict({\n", |
||||
" \"train\": train_ori,\n", |
||||
" \"test\": concatenate_datasets([test_ori, added_test_dataset]),\n", |
||||
" \"validation\": validation_dataset,\n", |
||||
"})\n", |
||||
"\n", |
||||
"print(f\"Divided into a training set of {dataset['train'].num_rows:,} items, a validation set of {dataset['validation'].num_rows:,} items, and a test set of {dataset['test'].num_rows:,} items\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c39ac5d7-84f8-4f7d-98e1-d24651ba3a80", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# If you're ready to push to the hub, and fill in the dots with your HF username\n", |
||||
"\n", |
||||
"HF_USER = ...\n", |
||||
"DATASET_NAME = f\"{HF_USER}/pricer-data\"\n", |
||||
"dataset.push_to_hub(DATASET_NAME, private=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "3fcb2492-ef2a-468e-8bf1-deb18eef4d9c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Use of Validation Sets\n", |
||||
"\n", |
||||
"When you train your model in Week 7.\n", |
||||
"\n", |
||||
"```python\n", |
||||
"# load the train and validation set\n", |
||||
"train = load_dataset(DATASET_NAME, split='train[:100%]') # or less than 100%\n", |
||||
"validation = load_dataset(DATASET_NAME, split='validation[:100%]') # or less than 100% \n", |
||||
"\n", |
||||
"# Define training parameters\n", |
||||
"train_parameters = SFTConfig(\n", |
||||
" eval_strategy=\"steps\", # or \"epoch\"\n", |
||||
" eval_steps=EVAL_STEPS,\n", |
||||
" ...\n", |
||||
")\n", |
||||
"\n", |
||||
"# Initialize fine-tuning with validation set\n", |
||||
"fine_tuning = SFTTrainer(\n", |
||||
" eval_dataset=validation,\n", |
||||
" ...\n", |
||||
")\n", |
||||
"```" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "bceb4407-d91d-4731-9e96-189f6f953cbc", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## A Closer Look at the Duplicates\n", |
||||
"\n", |
||||
"We have now created a dataset that includes a validation set and additional test data. During this process, we observed that **2% of the data contains duplicates**, where the scrubbed descriptions are identical.\n", |
||||
"\n", |
||||
"Duplicates can contribute to model overfitting. However, since only **2% of the dataset is duplicated**, the impact is likely minimal. Moreover, many of these duplicates actually refer to different physical objects rather than being true duplicates.\n", |
||||
"\n", |
||||
"### False Duplicates\n", |
||||
"\n", |
||||
"The “duplicates” we observe are often not duplicates in the original dataset. Minor differences in product descriptions may be removed by the scrubbing process, leading to items that *appear* identical but aren’t. For example:\n", |
||||
"\n", |
||||
"```\n", |
||||
"<RinoGear Screen Protector Designed for Sony Xperia XZ Screen Protector Case Friendly Accessories Flexible Full Coverage Clear TPU Film = $0.95>\n", |
||||
"<RinoGear (2-Pack) Screen Protector Designed for Sony Xperia XZ Screen Protector Case Friendly Accessories Flexible Full Coverage Clear TPU Film = $2.95>\n", |
||||
"```\n", |
||||
"\"(2-Pack)\" is removed in the scrub method.\n", |
||||
"\n", |
||||
"Similarly:\n", |
||||
"```\n", |
||||
"[<EBC Brakes USR7115 USR Series Sport Slotted Rotor = $31.22>,\n", |
||||
" <EBC Brakes USR7314 USR Series Sport Slotted Rotor = $71.46>,\n", |
||||
" <EBC Brakes USR7409 USR Series Sport Slotted Rotor = $88.67>,\n", |
||||
"...\n", |
||||
" <EBC Brakes USR7305 USR Series Sport Slotted Rotor = $406.55>,\n", |
||||
" <EBC Brakes USR7384 USR Series Sport Slotted Rotor = $413.61>,\n", |
||||
" <EBC Brakes USR1602 USR Series Sport Slotted Rotor = $615.1>]\n", |
||||
"```\n", |
||||
"These all represent different rotor models. \n", |
||||
"\n", |
||||
"**Even when both the scrubbed text and the price are identical**, the items may still refer to distinct products. For instance:\n", |
||||
"```\n", |
||||
"<5304486359 Refrigerator Door Handles Set Replacement for Frigidaire FFTR1821QW5A Refrigerator - Compatible with 5304486359 White Door Handles - UpStart Components Brand = $17.99>\n", |
||||
"<5304486359 Refrigerator Door Handles Set Replacement for Frigidaire FFTR1831QP1 Refrigerator - Compatible with 5304486359 White Door Handles - UpStart Components Brand = $17.99>\n", |
||||
"```\n", |
||||
"\n", |
||||
"### True Duplicates\n", |
||||
"Finding *true* duplicates—where the scrubbed text, price, and underlying real-world product match—seems relatively rare. The following items in the **Appliances** set, for instance, likely refer to the same physical product:\n", |
||||
"```python\n", |
||||
"{'main_category': 'Tools & Home Improvement',\n", |
||||
" 'title': 'Whirlpool 8318084 Lid Switch for Washer',\n", |
||||
" 'average_rating': 4.6,\n", |
||||
" 'rating_number': 511,\n", |
||||
" 'features': ['Works with the following models: Whirlpool 1CLBR5432PQ0, Whirlpool 1CLBR5432PQ1, Whirlpool 1CLSQ9549PG0',\n", |
||||
" 'This products adds a great value',\n", |
||||
" 'This product is manufactured in United States',\n", |
||||
" 'Works with the following models: Whirlpool 1CLBR5432PQ0, Whirlpool 1CLBR5432PQ1, Whirlpool 1CLSQ9549PG0',\n", |
||||
" 'Whirlpool 1CLSQ9549PG1, Whirlpool 1CLSQ9549PW0',\n", |
||||
" 'Whirlpool 1CLSQ9549PW1, Whirlpool 1CLSR7010PQ0',\n", |
||||
" 'Whirlpool 1CLSR7010PQ1, Whirlpool 1CLSR7300PQ0',\n", |
||||
" 'Genuine Replacement Part'],\n", |
||||
" 'description': ['Product Description',\n", |
||||
" 'Part Number 8318084 (AP3180933) replaces 1018522, AH886960, EA886960, PS886960., Easy to use and handle. This products adds a great value This product is manufactured in United States.',\n", |
||||
" 'From the Manufacturer',\n", |
||||
" 'Whirlpool 8318084 Lid Switch for Washer. Works with the following models: Whirlpool 1CLBR5432PQ0, Whirlpool 1CLBR5432PQ1, Whirlpool 1CLSQ9549PG0, Whirlpool 1CLSQ9549PG1, Whirlpool 1CLSQ9549PW0, Whirlpool 1CLSQ9549PW1, Whirlpool 1CLSR7010PQ0, Whirlpool 1CLSR7010PQ1, Whirlpool 1CLSR7300PQ0. Genuine Replacement Part.'],\n", |
||||
" 'price': '25.55',\n", |
||||
" 'images': {'hi_res': [None],\n", |
||||
" 'large': ['https://m.media-amazon.com/images/I/31QE91zX0mL._AC_.jpg'],\n", |
||||
" 'thumb': ['https://m.media-amazon.com/images/I/31QE91zX0mL._AC_US75_.jpg'],\n", |
||||
" 'variant': ['MAIN']},\n", |
||||
" 'videos': {'title': [\"Your Washer Won't Spin?\", '8318084 Washer Lid Switch'],\n", |
||||
" 'url': ['https://www.amazon.com/vdp/09c00a975b4b46198b5703483f424981?ref=dp_vse_rvc_0',\n", |
||||
" 'https://www.amazon.com/vdp/3c9b3dc3c93444978d542af3fab13c49?ref=dp_vse_rvc_1'],\n", |
||||
" 'user_id': ['', '']},\n", |
||||
" 'store': 'Whirlpool',\n", |
||||
" 'categories': ['Appliances',\n", |
||||
" 'Parts & Accessories',\n", |
||||
" 'Washer Parts & Accessories'],\n", |
||||
" 'details': '{\"Manufacturer\": \"Whirlpool\", \"Part Number\": \"8318084\", \"Item Weight\": \"1.34 ounces\", \"Product Dimensions\": \"3 x 2 x 2 inches\", \"Item model number\": \"8318084\", \"Is Discontinued By Manufacturer\": \"No\", \"Item Package Quantity\": \"1\", \"Included Components\": \"Kkk\", \"Batteries Included?\": \"No\", \"Batteries Required?\": \"No\", \"Warranty Description\": \"Kk\", \"Best Sellers Rank\": {\"Tools & Home Improvement\": 231142, \"Washer Parts & Accessories\": 1074}, \"Date First Available\": \"August 7, 2008\"}',\n", |
||||
" 'parent_asin': 'B01CT25N26',\n", |
||||
" 'bought_together': None,\n", |
||||
" 'subtitle': None,\n", |
||||
" 'author': None}\n", |
||||
"\n", |
||||
"{'main_category': 'Tools & Home Improvement',\n", |
||||
" 'title': 'Whirlpool 8318084 Lid Switch for Washer',\n", |
||||
" 'average_rating': 4.6,\n", |
||||
" 'rating_number': 514,\n", |
||||
" 'features': ['Works with the following models: Whirlpool 1CLBR5432PQ0, Whirlpool 1CLBR5432PQ1, Whirlpool 1CLSQ9549PG0',\n", |
||||
" 'This products adds a great value',\n", |
||||
" 'This product is manufactured in United States',\n", |
||||
" 'Works with the following models: Whirlpool 1CLBR5432PQ0, Whirlpool 1CLBR5432PQ1, Whirlpool 1CLSQ9549PG0',\n", |
||||
" 'Whirlpool 1CLSQ9549PG1, Whirlpool 1CLSQ9549PW0',\n", |
||||
" 'Whirlpool 1CLSQ9549PW1, Whirlpool 1CLSR7010PQ0',\n", |
||||
" 'Whirlpool 1CLSR7010PQ1, Whirlpool 1CLSR7300PQ0',\n", |
||||
" 'Genuine Replacement Part'],\n", |
||||
" 'description': ['Product Description',\n", |
||||
" 'Part Number 8318084 (AP3180933) replaces 1018522, AH886960, EA886960, PS886960., Easy to use and handle. This products adds a great value This product is manufactured in United States.',\n", |
||||
" 'From the Manufacturer',\n", |
||||
" 'Whirlpool 8318084 Lid Switch for Washer. Works with the following models: Whirlpool 1CLBR5432PQ0, Whirlpool 1CLBR5432PQ1, Whirlpool 1CLSQ9549PG0, Whirlpool 1CLSQ9549PG1, Whirlpool 1CLSQ9549PW0, Whirlpool 1CLSQ9549PW1, Whirlpool 1CLSR7010PQ0, Whirlpool 1CLSR7010PQ1, Whirlpool 1CLSR7300PQ0. Genuine Replacement Part.'],\n", |
||||
" 'price': '25.55',\n", |
||||
" 'images': {'hi_res': [None],\n", |
||||
" 'large': ['https://m.media-amazon.com/images/I/31QE91zX0mL._AC_.jpg'],\n", |
||||
" 'thumb': ['https://m.media-amazon.com/images/I/31QE91zX0mL._AC_US75_.jpg'],\n", |
||||
" 'variant': ['MAIN']},\n", |
||||
" 'videos': {'title': ['AMI PARTS,Parts Specialist'],\n", |
||||
" 'url': ['https://www.amazon.com/vdp/09a12ea79b1a4081a18909825437760b?ref=dp_vse_rvc_0'],\n", |
||||
" 'user_id': ['']},\n", |
||||
" 'store': 'Whirlpool',\n", |
||||
" 'categories': ['Appliances',\n", |
||||
" 'Parts & Accessories',\n", |
||||
" 'Washer Parts & Accessories'],\n", |
||||
" 'details': '{\"Manufacturer\": \"Whirlpool\", \"Part Number\": \"8318084\", \"Item Weight\": \"1.34 ounces\", \"Product Dimensions\": \"3 x 2 x 2 inches\", \"Item model number\": \"8318084\", \"Is Discontinued By Manufacturer\": \"No\", \"Item Package Quantity\": \"1\", \"Included Components\": \"kkk\", \"Batteries Included?\": \"No\", \"Batteries Required?\": \"No\", \"Warranty Description\": \"kk\", \"Best Sellers Rank\": {\"Tools & Home Improvement\": 166821, \"Washer Parts & Accessories\": 684}, \"Date First Available\": \"August 7, 2008\"}',\n", |
||||
" 'parent_asin': 'B0050O1UR8',\n", |
||||
" 'bought_together': None,\n", |
||||
" 'subtitle': None,\n", |
||||
" 'author': None}\n", |
||||
"```\n", |
||||
"\n", |
||||
"### Takeaway\n", |
||||
"2% of the dataset contains duplicates, but most of these represent different physical objects. It does not appear to be worthwhile to remove them from the dataset. In fact it can be better the keep them to have representative data.\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "0a1d7b72-a1ab-4fc4-9065-738bd11f8058", |
||||
"metadata": {}, |
||||
"source": [] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "403a42a2-3913-4905-9475-97509fe86c5e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.9" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,71 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "00f05a05-d989-4bf7-b1f1-9418e25ecd58", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"# The Product Pricer Continued\n", |
||||
"\n", |
||||
"I tested numerous frontier models from OpenAI, Anthropic, Google, and others via Groq API.\n", |
||||
"\n", |
||||
"Here are the results of all tests including ones from Day 3 and how the frontier models stacked up.\n", |
||||
"\n", |
||||
"They are ordered by Error from best to worst.\n", |
||||
"\n", |
||||
"I ran each model once on 2025-03-09.\n", |
||||
"\n", |
||||
"Main repo at [https://github.com/kellewic/llm](https://github.com/kellewic/llm)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a69cc81a-e582-4d04-8e12-fd83e120a7d1", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"| Rank | Model | Error ($) | RMSLE | Hits (%) | Chart Link |\n", |
||||
"|------|-----------------------------------|-----------|-------|----------|------------|\n", |
||||
"| 1 | **gemini-2.0-flash** | 73.48 | 0.56 | 56.4% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/gemini-2.0-flash.png) |\n", |
||||
"| 2 | **gpt-4o-2024-08-06** | 75.66 | 0.89 | 57.6% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/gpt-4o-2024-08-06.png) |\n", |
||||
"| 3 | **gemini-2.0-flash-lite** | 76.42 | 0.61 | 56.0% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/gemini-2.0-flash-lite.png) |\n", |
||||
"| 4 | **gpt-4o-mini (original)** | 81.61 | 0.60 | 51.6% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/gpt-4o-mini.png) |\n", |
||||
"| 5 | **claude-3-5-haiku-20241022** | 85.25 | 0.62 | 50.8% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/claude-3-5-haiku-20241022.png) |\n", |
||||
"| 6 | **claude-3-5-sonnet-20241022** | 88.97 | 0.61 | 49.2% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/claude-3-5-sonnet-20241022.png) |\n", |
||||
"| 7 | **claude-3-7-sonnet-20250219** | 89.41 | 0.62 | 55.2% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/claude-3-7-sonnet-20250219.png) |\n", |
||||
"| 8 | **mistral-saba-24b** | 98.02 | 0.82 | 44.8% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/mistral-saba-24b.png) |\n", |
||||
"| 9 | **llama-3.3-70b-versatile** | 98.24 | 0.70 | 44.8% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/llama-3.3-70b-versatile.png) |\n", |
||||
"| 10 | **GPT-4o-mini (fine-tuned)** | 101.49 | 0.81 | 41.2% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_tuning/gpt_fine_tuned.png) |\n", |
||||
"| 11 | **Random Forest Regressor** | 105.10 | 0.89 | 37.6% | [📊](https://github.com/kellewic/llm/blob/main/basic_model_training/random_forest_pricer.png) |\n", |
||||
"| 12 | **deepseek-r1-distill-llama-70b** | 109.09 | 0.67 | 48.4% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/deepseek-r1-distill-llama-70b.png) |\n", |
||||
"| 13 | **Linear SVR** | 110.91 | 0.92 | 29.2% | [📊](https://github.com/kellewic/llm/blob/main/basic_model_training/svr_pricer.png) |\n", |
||||
"| 14 | **Word2Vec LR** | 113.14 | 1.05 | 22.8% | [📊](https://github.com/kellewic/llm/blob/main/basic_model_training/word2vec_lr_pricer.png) |\n", |
||||
"| 15 | **Bag of Words LR** | 113.60 | 0.99 | 24.8% | [📊](https://github.com/kellewic/llm/blob/main/basic_model_training/bow_lr_pricer.png) |\n", |
||||
"| 16 | **Human Performance** | 126.55 | 1.00 | 32.0% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/human_pricer.png) |\n", |
||||
"| 17 | **Average** | 137.17 | 1.19 | 15.2% | [📊](https://github.com/kellewic/llm/blob/main/basic_model_training/average_pricer.png) |\n", |
||||
"| 18 | **Linear Regression** | 139.20 | 1.17 | 15.6% | [📊](https://github.com/kellewic/llm/blob/main/basic_model_training/linear_regression_pricer.png) |\n", |
||||
"| 19 | **deepseek-r1-distill-qwen-32b** | 151.59 | 0.80 | 38.4% | [📊](https://github.com/kellewic/llm/blob/main/frontier_model_test/deepseek-r1-distill-qwen-32b.png) |" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.12.2" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
Loading…
Reference in new issue