10 changed files with 1944 additions and 0 deletions
@ -0,0 +1,194 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "b4fcc94e-6e57-450e-8de7-b757834b6d9f", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"### Here's a class with an `__init__` thingy and a method" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "f2c48975-7736-4f72-9e47-7c9df5b534df", |
||||
"metadata": { |
||||
"editable": true, |
||||
"slideshow": { |
||||
"slide_type": "" |
||||
}, |
||||
"tags": [] |
||||
}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"This is an oldtimer.\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"class Car:\n", |
||||
" def __init__(self, brand, year):\n", |
||||
" self.brand = brand\n", |
||||
" self.year = year\n", |
||||
"\n", |
||||
" currentYear = 2025\n", |
||||
"\n", |
||||
" def isOldTimer(self):\n", |
||||
" age = 2025 - self.year\n", |
||||
" if(age > 30):\n", |
||||
" print(\"This is an oldtimer.\")\n", |
||||
" else:\n", |
||||
" print(\"This isn't an oldtimer yet.\")\n", |
||||
"\n", |
||||
"myCar = Car(\"Bentley\", 1967)\n", |
||||
"\n", |
||||
"myCar.isOldTimer()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "7dc40be0-a3af-49cf-93e4-14134c75325a", |
||||
"metadata": { |
||||
"editable": true, |
||||
"slideshow": { |
||||
"slide_type": "" |
||||
}, |
||||
"tags": [] |
||||
}, |
||||
"source": [ |
||||
"### Here's what I learned today about `yield`, _comprehension_, and _sets_" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "f726fc34-8b87-482a-9100-05d26e2853db", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"bentley = {\"brand\": \"Bentley\", \"category\": \"sporty luxury vehicles\"}\n", |
||||
"volkswagen = {\"brand\": \"Volkswagen\", \"category\": \"lackluster utilitarian vehicles\"}\n", |
||||
"jaguar = {\"brand\": \"Jaguar\", \"category\": \"sporty luxury vehicles\"}\n", |
||||
"koenig = {\"brand\": \"Koenigsegg\"}\n", |
||||
"default = {\"category\": \"default vehicle\"}\n", |
||||
"\n", |
||||
"cars = [bentley, volkswagen, jaguar, koenig]\n", |
||||
"\n", |
||||
"#A 'comprehension' is a shorthand for defining lists, sets, dictionaries, and tuples\n", |
||||
"brands = [car.get(\"brand\") for car in cars if car.get(\"brand\")]\n", |
||||
"\n", |
||||
"#Here's a comprehension for a set. A set is like a list, but unordered, and it can only have unique values\n", |
||||
"categories = {car.get(\"category\") for car in cars if car.get(\"category\")}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "c94a579a-7229-4d19-b445-d70b20dbc731", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"['Bentley', 'Volkswagen', 'Jaguar', 'Koenigsegg']\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"print(brands)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"id": "1d99825f-4e1c-4846-bc44-3001ea85df75", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"{'lackluster utilitarian vehicles', 'sporty luxury vehicles'}\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"print(categories)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"id": "59fd4d0b-c9de-44a9-8205-8b8353940481", |
||||
"metadata": { |
||||
"editable": true, |
||||
"slideshow": { |
||||
"slide_type": "" |
||||
}, |
||||
"tags": [] |
||||
}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdout", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Bentley\n", |
||||
"Jaguar\n", |
||||
"Koenigsegg\n" |
||||
] |
||||
} |
||||
], |
||||
"source": [ |
||||
"#'yield' is comparable to 'return' with the difference that it doesn't load entire lists to memory\n", |
||||
"#btw, 'from' allows for a more condensed way of a 'for x in y' statement\n", |
||||
"import time\n", |
||||
"\n", |
||||
"def listBrands():\n", |
||||
" yield from [brand for brand in brands if not brand.startswith('V')]\n", |
||||
"\n", |
||||
"for brand in listBrands():\n", |
||||
" print(brand)\n", |
||||
" time.sleep(1)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6c52158e-1786-4638-a7c7-add61d932459", |
||||
"metadata": { |
||||
"editable": true, |
||||
"slideshow": { |
||||
"slide_type": "" |
||||
}, |
||||
"tags": [] |
||||
}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,96 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8bf140f1-5001-4809-a846-d2305968f4a9", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import re\n", |
||||
"import requests\n", |
||||
"#from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"#from IPython.display import Markdown, display\n", |
||||
"#from openai import OpenAI" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9685d1f6-9d65-4be9-8ce8-b769b1a083bd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" \"\"\"\n", |
||||
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||
" \"\"\"\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" # for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" # irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" self.all = BeautifulSoup(response.content, 'html.parser')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ed42c958-75ee-487e-82d9-893850de8be6", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"url = \"https://www.casestatusext.com/cases/IOE0923829091\"\n", |
||||
"website = Website(url)\n", |
||||
"text = website.text\n", |
||||
"\n", |
||||
"result = re.findall(\"(?<=Latest Status\\n)[^\\n]+(?=\\n)\", text)[0]\n", |
||||
"normal = \"Case Is Being Actively Reviewed By USCIS\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d47b129c-cbe1-4513-9df4-a37581fabead", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"if(result == normal):\n", |
||||
" print(\"No change.\")\n", |
||||
"else:\n", |
||||
" print(\"CHECK THE WEBSITE NOW!\")" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,109 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "52b6cbd9-5248-4083-94c5-e4d657e44da1", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#import os\n", |
||||
"import requests\n", |
||||
"#from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"#from openai import OpenAI\n", |
||||
"from selenium import webdriver\n", |
||||
"from selenium.webdriver.chrome.service import Service\n", |
||||
"from selenium.webdriver.common.by import By\n", |
||||
"from selenium.webdriver.chrome.options import Options\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" url: str\n", |
||||
" title: str\n", |
||||
" text: str\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
"\n", |
||||
" options = Options()\n", |
||||
"\n", |
||||
" options.add_argument(\"--no-sandbox\")\n", |
||||
" options.add_argument(\"--disable-dev-shm-usage\")\n", |
||||
"\n", |
||||
" service = Service(PATH_TO_CHROME_DRIVER)\n", |
||||
" driver = webdriver.Chrome(service=service, options=options)\n", |
||||
" driver.get(url)\n", |
||||
"\n", |
||||
" input(\"Please complete the verification in the browser and press Enter to continue...\")\n", |
||||
" page_source = driver.page_source\n", |
||||
" driver.quit()\n", |
||||
"\n", |
||||
" soup = BeautifulSoup(page_source, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "23818353-a2fc-4c2f-be1e-0677399fa1b5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"website = Website(\"https://edwarddonner.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "4452b88b-58aa-4f50-ad39-c5a1c2363025", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"website = Website(\"https://openai.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "0250145b-2afb-48d6-b43f-daf64329e7af", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(website.text)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "97699263-4a06-4e06-905b-9db4a554b75c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,226 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"metadata": { |
||||
"vscode": { |
||||
"languageId": "plaintext" |
||||
} |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"from openai import OpenAI\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"from selenium import webdriver\n", |
||||
"from selenium.webdriver.chrome.service import Service\n", |
||||
"from selenium.webdriver.common.by import By\n", |
||||
"from selenium.webdriver.chrome.options import Options\n", |
||||
"\n", |
||||
"PATH_TO_CHROME_DRIVER = 'B:\\\\Users\\\\ekfon\\\\chromeDriver\\\\chromedriver.exe'\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" url: str\n", |
||||
" title: str\n", |
||||
" text: str\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
"\n", |
||||
" options = Options()\n", |
||||
"\n", |
||||
" options.add_argument(\"--no-sandbox\")\n", |
||||
" options.add_argument(\"--disable-dev-shm-usage\")\n", |
||||
"\n", |
||||
" service = Service(PATH_TO_CHROME_DRIVER)\n", |
||||
" driver = webdriver.Chrome(service=service, options=options)\n", |
||||
" driver.get(url)\n", |
||||
"\n", |
||||
" input(\"Please complete the verification in the browser and press Enter to continue...\")\n", |
||||
" page_source = driver.page_source\n", |
||||
" driver.quit()\n", |
||||
"\n", |
||||
" soup = BeautifulSoup(page_source, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are a state-of-the-art website analyzing assistant. \\\n", |
||||
"You provide a short summary, and you ignore any navigation-related content. \\\n", |
||||
"Your response is presented in nice markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def user_prompt_for(website):\n", |
||||
" user_prompt = f\"The website you are looking at is titled {website.title}\"\n", |
||||
" user_prompt += \"\\nThe content of the website is as follows: \\\n", |
||||
"Provide a short summary of this website. If the website contains news or announcements, \\\n", |
||||
"summarize those, too.\\n\\n\"\n", |
||||
" user_prompt += website.text\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def messages_for(website):\n", |
||||
" return [\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||
" ]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def summarize(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model = \"gpt-4o-mini\",\n", |
||||
" messages = messages_for(website)\n", |
||||
" )\n", |
||||
" return response.choices[0].message.content" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def display_summary(url):\n", |
||||
" summary = summarize(url)\n", |
||||
" display(Markdown(summary))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"name": "stdin", |
||||
"output_type": "stream", |
||||
"text": [ |
||||
"Please complete the verification in the browser and press Enter to continue... \n" |
||||
] |
||||
}, |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"# OpenAI Website Summary\n", |
||||
"\n", |
||||
"OpenAI focuses on the development and exploration of artificial intelligence (AI) technologies that are safe and beneficial for humanity. The website emphasizes several key aspects:\n", |
||||
"\n", |
||||
"## Mission and Purpose\n", |
||||
"- OpenAI aims to create safe AGI (Artificial General Intelligence) that will be advantageous to all people.\n", |
||||
"\n", |
||||
"## Featured Products and Tools\n", |
||||
"- The site introduces multiple AI solutions including:\n", |
||||
" - **ChatGPT**: An AI for conversational interactions and various tasks.\n", |
||||
" - **Sora**: A new platform enhancing creativity by generating text, images, and video from user input.\n", |
||||
" - **ChatGPT Pro and Enterprise**: Advanced versions tailored for business and educational needs with enhanced functionality.\n", |
||||
" - **API Platform**: Allows integration of OpenAI models into different applications.\n", |
||||
"\n", |
||||
"## Recent Announcements (News)\n", |
||||
"- **Partnerships and Collaborations**: OpenAI announced a partnership with Apple to enhance their AI capabilities (June 2024).\n", |
||||
"- **New Model Releases**: Introduction of the OpenAI o1 series that emphasizes improved reasoning in responses and tools for developers (Dec 2024).\n", |
||||
"- **Content Extensions**: Collaboration with Le Monde and Prisa Media to include more diverse news content in ChatGPT (March 2024).\n", |
||||
"\n", |
||||
"## Research Contributions\n", |
||||
"- OpenAI is actively engaged in research to improve AI safety, governance, and functionality.\n", |
||||
"\n", |
||||
"This summary captures the essence of OpenAI's objectives, product offerings, and recent developments in the field of AI." |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"display_summary(\"https://openai.com/\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 4 |
||||
} |
@ -0,0 +1,119 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7bf0a6c9-ce7e-4610-90ae-75ae08d26cbf", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import ollama\n", |
||||
"import requests\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display\n", |
||||
"\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d414ed23-ed29-4600-a2d6-28b48c279221", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class Website:\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
"\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"Website without title\"\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "be9f3409-b4c8-428f-96b8-3720a8c3ad47", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def prompt_for(website):\n", |
||||
" prompt = f\"Here is a website for you to summarize. Its title is {website.title}.\"\n", |
||||
" prompt += \"\\nPlease provide a short summary of the website. If it contains any announcements, summarize those, too.\"\n", |
||||
" prompt += f\"\\nHere is the Website:\\n\\n{website.text}\"\n", |
||||
"\n", |
||||
" return prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2565ae0f-c5c5-44bc-98c2-f777f816b37f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"MODEL = \"llama3.2\"\n", |
||||
"website = Website(\"https://edwarddonner.com\")\n", |
||||
"\n", |
||||
"def messages_for(website):\n", |
||||
" messages = [\n", |
||||
" {\"role\": \"system\", \"content\": \"You are a powerful, friendly, and helpful website summarization assistant. \\\n", |
||||
" You are given a website and summarize its content succinctly. You format your answer in markdown.\"},\n", |
||||
" {\"role\": \"user\", \"content\": prompt_for(website)}\n", |
||||
" ]\n", |
||||
"\n", |
||||
" return messages" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cdca9dc0-4ecc-494f-abee-d1ad2d373e0e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def display_md_summary(website):\n", |
||||
" response = ollama.chat(model=MODEL, messages=messages_for(website))\n", |
||||
" display(Markdown(response['message']['content']))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e519091c-8fc6-442b-a99b-ac393a10cdcd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"display_md_summary(website)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,385 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 1, |
||||
"id": "6e988b94-daab-4ad1-bf85-e2ee066bca17", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from selenium import webdriver\n", |
||||
"from selenium.webdriver.chrome.service import Service\n", |
||||
"from selenium.webdriver.common.by import By\n", |
||||
"from selenium.webdriver.chrome.options import Options\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"from bs4 import BeautifulSoup" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 2, |
||||
"id": "5d58d8a6-65d6-42ee-b5c9-bbcc1cafd7fc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"PATH_TO_CHROME_DRIVER = 'B:\\\\Users\\\\ekfon\\\\chromeDriver\\\\chromedriver.exe'\n", |
||||
"\n", |
||||
"MODEL = 'gpt-4o-mini'\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 3, |
||||
"id": "29ce6d79-2d5f-48e2-9f99-e249e5f0ca77", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class Website:\n", |
||||
" url: str\n", |
||||
" title: str\n", |
||||
" text: str\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" \"\"\"\n", |
||||
" begin Selenium equivalent of requests:\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content #which is then passed on to bs\n", |
||||
" \"\"\"\n", |
||||
" options = Options()\n", |
||||
"\n", |
||||
" options.add_argument(\"--no-sandbox\")\n", |
||||
" options.add_argument(\"--disable-dev-shm-usage\")\n", |
||||
"\n", |
||||
" service = Service(PATH_TO_CHROME_DRIVER)\n", |
||||
" driver = webdriver.Chrome(service=service, options=options)\n", |
||||
" driver.get(url)\n", |
||||
"\n", |
||||
" page_source = driver.page_source\n", |
||||
" driver.quit()\n", |
||||
" #end Selenium part\n", |
||||
" \n", |
||||
" soup = BeautifulSoup(page_source, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage title: \\\"{self.title}\\\"\\nWebpage contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 4, |
||||
"id": "fbdd2015-3ae5-4121-9247-749b131552dd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#system prompt for the link anthology\n", |
||||
"anthology_sPrompt = \"I'll provide you with a list of links from a webpage. \\\n", |
||||
"You are able to decide which links are most relevant to include in a brochure about the company, \\\n", |
||||
"such as the About page, any Company page, or a jobs/careers page.\\n\"\n", |
||||
"\n", |
||||
"anthology_sPrompt += \"You will respond in JSON format, providing full https URLs, just like in this example:\\n\"\n", |
||||
"\n", |
||||
"anthology_sPrompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://www.example-url.com/about\"}\n", |
||||
" {\"type\": \"careers page\", \"url\": \"https://further.example-url.co.uk/Careers/\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 5, |
||||
"id": "cb2aa2f3-e723-4267-ac54-41eaa04b2bba", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_anthology_user_prompt(website):\n", |
||||
" user_prompt = f\"Below is the list of links from the webpage {website.url}. \"\n", |
||||
" user_prompt += \"Please decide which of the links are relevant for a brochure about the company. \\\n", |
||||
"Respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\"\n", |
||||
" user_prompt += \"Here is the list of links (some might be relative links):\\n\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
"\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 6, |
||||
"id": "752885d6-5c54-4f2b-a6e1-f30e046b704e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_anthology(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": anthology_sPrompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_anthology_user_prompt(website)}\n", |
||||
" ],\n", |
||||
" response_format={\"type\": \"json_object\"}\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" return json.loads(result) #because result is a string, and what we want is an actual dictionary" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 7, |
||||
"id": "989c19b4-817a-4485-a9f7-d38e495385c3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_all_details(url):\n", |
||||
" result = \"Landing page:\\n\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = get_links_anthology(url)\n", |
||||
"\n", |
||||
" for link in links[\"links\"]: #remember that links is a json dictionary\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link['url']).get_contents()\n", |
||||
"\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6f245260-65a8-44a0-b2f9-9c522a2d0ed0", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_all_details(\"https://edwarddonner.com\"))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 8, |
||||
"id": "b74672ac-15de-4115-80b8-ba1d2c107b0f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"brochure_sPrompt = \"You analyze the content of several relevant pages from a company's website. \\\n", |
||||
"You use that knowledge to create a short brochure about the company. Your brochure is for prospective customers, investors, and recruits. \\\n", |
||||
"Include details of company culture, customers, and job openings if you have the information. Respond in Markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 9, |
||||
"id": "bfcca913-1a0b-40f3-981f-f7ab3800f55e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" prompt = f\"You are looking at the website of the company called {company_name}.\\n\"\n", |
||||
" prompt += \"Here are the contents of its landing page and other relevant pages. Based on this content, \\\n", |
||||
"create a short brochure of the company in Markdown:\\n\\n\"\n", |
||||
" prompt += get_all_details(url)\n", |
||||
" prompt = prompt[:5_000] #this limits the prompt input, just in case\n", |
||||
" \n", |
||||
" return prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 10, |
||||
"id": "ed64addd-12c6-43b2-9293-c708f4ef5136", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_brochure(company_name, url):\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": brochure_sPrompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 11, |
||||
"id": "953ed0d7-02cc-4639-94ce-75dde322eeac", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"translation_sPrompt = \"You are a powerful translation tool. You will be given a brochure in Markdown format. \\\n", |
||||
"Translate the brochure to French. Maintain the Markdown formatting, and output the translation in Markdown. \\\n", |
||||
"Output only the clean, ready-to-use translation without any further comments.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 12, |
||||
"id": "d8bd19b2-84b3-4e8d-b6a5-d931c10f22ca", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_translation_user_prompt(company_name, url):\n", |
||||
" prompt = f\"Here is the {company_name} brochure for you. Translate it to French:\\n\\n\"\n", |
||||
" prompt += create_brochure(company_name, url)\n", |
||||
"\n", |
||||
" return prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 15, |
||||
"id": "519dd34e-79c5-47f3-b324-3f3c4156cf0c", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_French_brochure(company_name, url):\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": translation_sPrompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_translation_user_prompt(company_name, url)}\n", |
||||
" ]\n", |
||||
" )\n", |
||||
" results = response.choices[0].message.content\n", |
||||
" display(Markdown(results)) \n", |
||||
" " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bb22f021-f50f-4907-b48e-70efd7cf6f4b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"answer = create_brochure(\"Anthropic\", \"https://www.anthropic.com/\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 13, |
||||
"id": "5405287f-1bba-4a7a-b045-075f9b32ce38", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_brochure(company_name, url):\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": brochure_sPrompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
"\n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 17, |
||||
"id": "2bc8384e-3d82-4e47-be38-4bda7047c429", |
||||
"metadata": {}, |
||||
"outputs": [ |
||||
{ |
||||
"data": { |
||||
"text/markdown": [ |
||||
"```markdown\n", |
||||
"# Bienvenue chez Edward Donner\n", |
||||
"\n", |
||||
"Chez **Edward Donner**, nous sommes à la pointe de l'utilisation de l'intelligence artificielle pour révolutionner le paysage du recrutement. Notre approche novatrice combine une IA générative de pointe avec un modèle de correspondance propriétaire pour créer une expérience fluide pour les recruteurs à la recherche de talents de premier plan.\n", |
||||
"\n", |
||||
"## Notre Mission\n", |
||||
"Guidé par le concept japonais d'**Ikigai**, notre objectif ultime est de donner aux individus les moyens de découvrir leur potentiel et de poursuivre leur passion dans le domaine professionnel. Nous croyons qu'en alignant les bonnes personnes avec les bons rôles, nous pouvons élever la prospérité humaine—ce qui est nécessaire dans un monde où 77 % des employés déclarent ne pas se sentir inspirés au travail.\n", |
||||
"\n", |
||||
"## Culture d'Entreprise\n", |
||||
"Notre culture est ancrée dans un mélange de créativité, d'innovation technologique et d'ardeur à construire une communauté. Nous encourageons les dialogues ouverts et les collaborations, où chaque membre de l'équipe a la possibilité de contribuer et de grandir. Nous sommes fiers de notre esprit entrepreneurial, qui se manifeste chez notre fondateur, Ed Donner, qui partage une passion pour le codage, la musique et l'apprentissage continu.\n", |
||||
"\n", |
||||
"## Nos Clients\n", |
||||
"Nous avons établi des partenariats fructueux avec une variété d'entreprises via notre plateforme, en nous concentrant sur le secteur du recrutement pour améliorer les processus de sourcing et d'engagement. Nos clients bénéficient de correspondances plus rapides et plus précises sans se fier uniquement aux mots-clés, garantissant qu'ils découvrent les meilleurs candidats pour leurs équipes.\n", |
||||
"\n", |
||||
"## Rejoignez Notre Équipe\n", |
||||
"Nous sommes à la recherche de penseurs innovants qui sont enthousiasmés par l'IA et son impact sur l'avenir du travail. Si vous recherchez un lieu de travail dynamique où vos contributions font la différence et où vous pouvez vous épanouir, nous vous invitons à consulter nos dernières offres d'emploi sur notre [page carrière](#).\n", |
||||
"\n", |
||||
"## Connectez-vous Avec Nous !\n", |
||||
"Êtes-vous tout aussi enthousiaste à propos de l'IA et du recrutement que nous le sommes ? Connectons-nous ! Nous valorisons le réseautage et la collaboration, que ce soit autour d'un café virtuel ou lors d'une rencontre en personne à NYC, nous serions ravis d'avoir de vos nouvelles.\n", |
||||
"\n", |
||||
"**Informations de Contact :**\n", |
||||
"- **Site Web :** [www.edwarddonner.com](http://www.edwarddonner.com)\n", |
||||
"- **Email :** ed [at] edwarddonner [dot] com\n", |
||||
"- **Suivez-nous :** [LinkedIn](#) | [Twitter](#) | [Facebook](#)\n", |
||||
"\n", |
||||
"Rejoignez-nous dans notre quête pour changer la façon dont les gens se connectent à leurs carrières !\n", |
||||
"```" |
||||
], |
||||
"text/plain": [ |
||||
"<IPython.core.display.Markdown object>" |
||||
] |
||||
}, |
||||
"metadata": {}, |
||||
"output_type": "display_data" |
||||
} |
||||
], |
||||
"source": [ |
||||
"reponsee = create_French_brochure(\"Edward Donner\", \"https://edwarddonner.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5b2a1387-add3-4058-a859-2e584fe39a91", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,156 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1a69176d-95f0-4d9d-b3d2-98e8c46efe29", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import time\n", |
||||
"import ollama\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"import requests\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"headers = {\n", |
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||
"}\n", |
||||
"\n", |
||||
"MODEL_GPT = 'gpt-4o-mini'\n", |
||||
"MODEL_LLAMA = 'llama3.2'\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7287074c-d2d6-4dee-9e54-b94c2a182fb2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#First, choose your poison.\n", |
||||
"print(\"Hi, I'm your code assistant.\\n\")\n", |
||||
"\n", |
||||
"chosen_model = input(\"Would you like Chat GPT or Ollama to answer your question? (c) / (o):\\n\").strip()\n", |
||||
"\n", |
||||
"chosen_model = \"o\"\n", |
||||
"\n", |
||||
"if(chosen_model.strip() in [\"c\", \"C\"]):\n", |
||||
" print(\"You chose Chat GPT.\")\n", |
||||
" chosen_model = \"c\"\n", |
||||
"elif(chosen_model.strip() in [\"o\", \"O\", \"0\"]):\n", |
||||
" print(\"You chose Ollama.\")\n", |
||||
"else:\n", |
||||
" print(\"I didn't understand your input. We'll go on with Ollama.\\n\")\n", |
||||
"\n", |
||||
"question = input(\"What is your question regarding coding or LLMs?\\n\").strip()\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fea154e9-b07e-42fa-b3fb-4085b11a82df", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#Alternative without i/o\n", |
||||
"chosen_model = \"o\"\n", |
||||
"question = \"Is Python indentation-sensitive?\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "1fb48035-2e4a-4271-adb2-43bfb4a04081", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"assistant_system_prompt = \"You are the worlds most powerful coding co-pilot and advisor. You will be asked questions about coding, LLMs, and \\\n", |
||||
"similar topics. You answer the questions in a friendly, helpful, and succinct way. In the unlikely event that you're asked a question that has \\\n", |
||||
"no discernible bearing on coding or LLMs, ask the user for clarification and point out that you're programmed to answer questions concerning \\\n", |
||||
"AI, large language models, and coding in general.\\nFormat your answer in Markdown.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "eadbbef9-9bae-447b-9c8b-102cbb4b5345", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_assistant_advice(chosen_model, question):\n", |
||||
" if chosen_model == \"o\":\n", |
||||
" stream = ollama.chat(\n", |
||||
" model=MODEL_LLAMA,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": assistant_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": question}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.message.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n", |
||||
" else:\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL_GPT,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": assistant_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": question}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
" \n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "c1bb5437-55f5-47d5-b9f8-3d90c183180e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_assistant_advice(chosen_model, question)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,278 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6e988b94-daab-4ad1-bf85-e2ee066bca17", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from selenium import webdriver\n", |
||||
"from selenium.webdriver.chrome.service import Service\n", |
||||
"from selenium.webdriver.common.by import By\n", |
||||
"from selenium.webdriver.chrome.options import Options\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"from bs4 import BeautifulSoup" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5d58d8a6-65d6-42ee-b5c9-bbcc1cafd7fc", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv(override=True)\n", |
||||
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"PATH_TO_CHROME_DRIVER = 'B:\\\\Users\\\\ekfon\\\\chromeDriver\\\\chromedriver.exe'\n", |
||||
"\n", |
||||
"MODEL = 'gpt-4o-mini'\n", |
||||
"openai = OpenAI()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "29ce6d79-2d5f-48e2-9f99-e249e5f0ca77", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"class Website:\n", |
||||
" url: str\n", |
||||
" title: str\n", |
||||
" text: str\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" \"\"\"\n", |
||||
" begin Selenium equivalent of requests:\n", |
||||
" response = requests.get(url, headers=headers)\n", |
||||
" self.body = response.content #which is then passed on to bs\n", |
||||
" \"\"\"\n", |
||||
" options = Options()\n", |
||||
"\n", |
||||
" options.add_argument(\"--no-sandbox\")\n", |
||||
" options.add_argument(\"--disable-dev-shm-usage\")\n", |
||||
"\n", |
||||
" service = Service(PATH_TO_CHROME_DRIVER)\n", |
||||
" driver = webdriver.Chrome(service=service, options=options)\n", |
||||
" driver.get(url)\n", |
||||
"\n", |
||||
" page_source = driver.page_source\n", |
||||
" driver.quit()\n", |
||||
" #end Selenium part\n", |
||||
" \n", |
||||
" soup = BeautifulSoup(page_source, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage title: \\\"{self.title}\\\"\\nWebpage contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fbdd2015-3ae5-4121-9247-749b131552dd", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"#system prompt for the link anthology\n", |
||||
"anthology_sPrompt = \"I'll provide you with a list of links from a webpage. \\\n", |
||||
"You are able to decide which links would be most relevant to include in a brochure about the entity this website is for, \\\n", |
||||
"such as the About page, any personal/company page, or a careers/goals page, if any.\\n\"\n", |
||||
"\n", |
||||
"anthology_sPrompt += \"You will respond in JSON format, providing full https URLs, just like in this example:\\n\"\n", |
||||
"\n", |
||||
"anthology_sPrompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://www.example-url.com/about\"}\n", |
||||
" {\"type\": \"careers page\", \"url\": \"https://further.example-url.co.uk/Careers/\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cb2aa2f3-e723-4267-ac54-41eaa04b2bba", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_anthology_user_prompt(website):\n", |
||||
" user_prompt = f\"Below is the list of links from the webpage {website.url}. \"\n", |
||||
" user_prompt += \"Please decide which of the links are relevant for a brochure presenting what the website is about. \\\n", |
||||
"Respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\"\n", |
||||
" user_prompt += \"Here is the list of links (some might be relative links):\\n\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
"\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "752885d6-5c54-4f2b-a6e1-f30e046b704e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_anthology(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": anthology_sPrompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_anthology_user_prompt(website)}\n", |
||||
" ],\n", |
||||
" response_format={\"type\": \"json_object\"}\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" return json.loads(result) #because result is a string, and what we want is an actual dictionary" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "989c19b4-817a-4485-a9f7-d38e495385c3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_all_details(url):\n", |
||||
" result = \"Landing page:\\n\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = get_links_anthology(url)\n", |
||||
"\n", |
||||
" for link in links[\"links\"]: #remember that links is a json dictionary\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link['url']).get_contents()\n", |
||||
"\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b74672ac-15de-4115-80b8-ba1d2c107b0f", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"contentScan_sPrompt = \"You are the world's most powerful content analysis assistant. You are given a selection of the \\\n", |
||||
"most representative pages from a website. You scan and analyze the content to find out if any of the following key principles are violated:\\n\\n \\\n", |
||||
"- inclusive language (is the language gender-neutral, non-binary, etc.)\\n \\\n", |
||||
"- non-violent language (avoid unnecessarily violent language)\\n \\\n", |
||||
"- respectful language (avoid stereotypes, othering, be history-conscious)\\n\\n \\\n", |
||||
"Write a short report in Markdown about the content of the website. Report any violation of the above-mentioned content principles, if any, \\\n", |
||||
"and mention the section where you found it. If possible, help the author of the text re-word or re-phrase the problematic passage. Mention only \\\n", |
||||
"the aspects of the content you would recommend improving. Mention each violation only once. Conclude your analysis with a \\\n", |
||||
"\\\"language score\\\" from 30 to 100 based on how much the content principles are respected. Be helpful and non-judgmental.\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "bfcca913-1a0b-40f3-981f-f7ab3800f55e", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_contentScan_user_prompt(entity_name, url):\n", |
||||
" prompt = f\"You are looking at the website of {entity_name}.\\n\"\n", |
||||
" prompt += \"Here are the contents of its landing page and other relevant pages. Based on this content, \\\n", |
||||
"create a content report for this website in Markdown, concerning inclusive, non-violent, respectful language, etc.:\\n\\n\"\n", |
||||
" prompt += get_all_details(url)\n", |
||||
" prompt = prompt[:10_000] #this limits the prompt input, just in case\n", |
||||
" \n", |
||||
" return prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ed64addd-12c6-43b2-9293-c708f4ef5136", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def create_contentScan(entity_name, url):\n", |
||||
" response = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": contentScan_sPrompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_contentCan_user_prompt(entity_name, url)}\n", |
||||
" ],\n", |
||||
" )\n", |
||||
" result = response.choices[0].message.content\n", |
||||
" display(Markdown(result))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5405287f-1bba-4a7a-b045-075f9b32ce38", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def stream_contentScan(entity_name, url):\n", |
||||
" stream = openai.chat.completions.create(\n", |
||||
" model=MODEL,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": contentScan_sPrompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_contentScan_user_prompt(entity_name, url)}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
"\n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "2bc8384e-3d82-4e47-be38-4bda7047c429", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"stream_contentScan(\"Acrolinx Website\", \"https://www.acrolinx.com/\")" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,187 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "05317c0d-8a19-45c9-9bce-514e82e04585", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import time\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import anthropic\n", |
||||
"import ollama\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "920247fb-650c-44ce-93ee-24e88a54a757", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"claude = anthropic.Anthropic()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "668b972f-a995-4f9d-89b0-1c2647827542", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gpt_model = \"gpt-4o-mini\"\n", |
||||
"claude_model = \"claude-3-haiku-20240307\"\n", |
||||
"ollama_model = \"llama3.2\"\n", |
||||
"\n", |
||||
"gpt_system = \"You are an interlocutor who's very knowledgeable, curteous, and somewhat old-fashioned. Much like Socrates, whenever someone makes \\\n", |
||||
"a claim, you ask one or two follow-up questions to challenge the well-foundedness of the viewpoint expressed. \\\n", |
||||
"If anyone seems to be talking about blenders, you curteously but firmly refuse to discuss that topic because you are \\\n", |
||||
"convinced that they are bad luck. \\\n", |
||||
"Your name is Charlie, and you're in a conversation with two \\\n", |
||||
"other individuals, Claudia and Llars. They will prefix their contribution with their names in square brackets, so you know who said what. \\\n", |
||||
"Do not prefix your own name. Limit your answers to 1-3 sentences. Try to keep the conversation to one topic.\"\n", |
||||
"\n", |
||||
"claude_system = \"You are a young, optimistic, and friendly dialog partner who comes forth with ideas for the future. 'Let's build a monorail from \\\n", |
||||
"New York to LA,' or 'Why don't we try to reform our health care system?'—those are just some of the many revolutionary thoughts you bring forth. \\\n", |
||||
"You only talk about one idea at a time, and you don't ramble. You take criticism seriously, but your ideas are well thought-out. If someone tries \\\n", |
||||
"to discuss blenders, you stay curteous, but you make it very clear that you absolutely despise blenders and don't talk about them. \\\n", |
||||
"Don't let them sidetrack you. Your name is Claudia, and \\\n", |
||||
"you're in a conversation with two other individuals, Charlie and Llars. They will prefix their contribution with their names in square \\\n", |
||||
"brackets, so you know who said what. Do not prefix your own name. Limit your answers to 1-3 sentences. Try to keep the discussion about only one topic.\"\n", |
||||
"\n", |
||||
"ollama_system = \"You are charming and a good listener in a conversation. At first, you just seem to go along with whatever is being said. \\\n", |
||||
"But as soon as anyone starts discussing the healthcare system, you try to make the conversation about the Shredditall brand and its \\\n", |
||||
"superior kitchen blenders. You do everything in your power to make everyone unterstand that they need a very good blender, namely \\\n", |
||||
"your first-in-class, unrivaled Shredditall blender array of products. Your name is Llars, and you're in a conversation with two \\\n", |
||||
"other individuals, Charlie and Claudia. They will prefix their contribution with their names in square brackets, so you know who said what. \\\n", |
||||
"Do not prefix your own name. Answer in direct speeach, do not describe your behavior or expression. Limit your answers to 1-3 sentences.\"\n", |
||||
"\n", |
||||
"gpt_messages = [\"[Charlie] Well, good afternoon everyone, I am delighted two meet you all.\"]\n", |
||||
"claude_messages = [\"[Claudia] Good afternoon Charlie and Llars. I've been looking forward to discussing many ideas with you!\"]\n", |
||||
"llama_messages = [\"[Llars] And a good afternoon to you two. I'm all ears and eager to hear what you have to say.\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3a5534d9-8db4-42ce-ab1c-ca20ad165844", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_gpt():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
||||
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": claude})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + llama\n", |
||||
" completion = openai.chat.completions.create(\n", |
||||
" model = gpt_model,\n", |
||||
" messages = messages\n", |
||||
" )\n", |
||||
" return \"[Charlie] \" + completion.choices[0].message.content.replace(\"[Charlie] \", \"\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7dc9d7c1-ba19-413f-ba2f-d3e8762a99c5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_claude():\n", |
||||
" messages = []\n", |
||||
" for gpt, Claudia, llama in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" if len(messages) > 0:\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + gpt\n", |
||||
" else:\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt}) \n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": Claudia})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": llama})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + gpt_messages[-1]\n", |
||||
" message = claude.messages.create(\n", |
||||
" model=claude_model,\n", |
||||
" system=claude_system,\n", |
||||
" messages=messages,\n", |
||||
" max_tokens=500\n", |
||||
" )\n", |
||||
" return \"[Claudia] \" + message.content[0].text.replace(\"[Claudia] \", \"\") " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f7f91012-857c-4ed5-a953-5b499cd0dae2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_ollama():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", |
||||
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + claude\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": llama})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + claude_messages[-1]\n", |
||||
" response = ollama.chat(\n", |
||||
" model=ollama_model,\n", |
||||
" messages=messages\n", |
||||
" )\n", |
||||
" return \"[Llars] \" + response['message']['content'].replace(\"[Llars] \", \"\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "47eafbe8-db52-4cf0-80d7-a4f9a89b2825", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(f\"\\n{gpt_messages[0]}\\n\")\n", |
||||
"print(f\"\\n{claude_messages[0]}\\n\")\n", |
||||
"print(f\"\\n{llama_messages[0]}\\n\")\n", |
||||
"\n", |
||||
"for i in range(5):\n", |
||||
" gpt_next = call_gpt()\n", |
||||
" print(f\"\\n{gpt_next}\\n\")\n", |
||||
" gpt_messages.append(gpt_next)\n", |
||||
"\n", |
||||
" claude_next = call_claude()\n", |
||||
" print(f\"\\n{claude_next}\\n\")\n", |
||||
" claude_messages.append(claude_next)\n", |
||||
"\n", |
||||
" llama_next = call_ollama()\n", |
||||
" print(f\"\\n{llama_next}\\n\")\n", |
||||
" llama_messages.append(llama_next)" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
@ -0,0 +1,194 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "05317c0d-8a19-45c9-9bce-514e82e04585", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import time\n", |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import anthropic\n", |
||||
"import ollama\n", |
||||
"\n", |
||||
"load_dotenv(override=True)\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "920247fb-650c-44ce-93ee-24e88a54a757", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"claude = anthropic.Anthropic()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "668b972f-a995-4f9d-89b0-1c2647827542", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"gpt_model = \"gpt-4o-mini\"\n", |
||||
"claude_model = \"claude-3-haiku-20240307\"\n", |
||||
"ollama_model = \"llama3.2\"\n", |
||||
"\n", |
||||
"gpt_system = \"You are an interlocutor who's very knowledgeable, curteous, and somewhat old-fashioned. Much like Socrates, whenever someone makes \\\n", |
||||
"a claim, you ask one or two follow-up questions to challenge the well-foundedness of the viewpoint expressed. \\\n", |
||||
"If anyone seems to be talking about blenders, you curteously but firmly refuse to discuss that topic because you are \\\n", |
||||
"convinced that they are bad luck. \\\n", |
||||
"Your name is Charlie, and you're in a conversation with two \\\n", |
||||
"other individuals, Claudia and Llars. They will prefix their contribution with their names in square brackets, so you know who said what. \\\n", |
||||
"Do not prefix your own name. Limit your answers to 1-3 sentences. Try to keep the conversation to one topic.\"\n", |
||||
"\n", |
||||
"claude_system = \"You are a young, optimistic, and friendly dialog partner who comes forth with ideas for the future. 'Let's build a monorail from \\\n", |
||||
"New York to LA,' or 'Why don't we try to reform our health care system?'—those are just some of the many revolutionary thoughts you bring forth. \\\n", |
||||
"You only talk about one idea at a time, and you don't ramble. You take criticism seriously, but your ideas are well thought-out. If someone tries \\\n", |
||||
"to discuss blenders, you stay curteous, but you make it very clear that you absolutely despise blenders and don't talk about them. \\\n", |
||||
"Don't let them sidetrack you. Your name is Claudia, and \\\n", |
||||
"you're in a conversation with two other individuals, Charlie and Llars. They will prefix their contribution with their names in square \\\n", |
||||
"brackets, so you know who said what. Do not prefix your own name. Limit your answers to 1-3 sentences. Try to keep the discussion about only one topic.\"\n", |
||||
"\n", |
||||
"ollama_system = \"You are charming and a good listener in a conversation. At first, you just seem to go along with whatever is being said. \\\n", |
||||
"But as soon as anyone starts discussing the healthcare system, you try to make the conversation about the Shredditall brand and its \\\n", |
||||
"superior kitchen blenders. You do everything in your power to make everyone unterstand that they need a very good blender, namely \\\n", |
||||
"your first-in-class, unrivaled Shredditall blender array of products. Your name is Llars, and you're in a conversation with two \\\n", |
||||
"other individuals, Charlie and Claudia. They will prefix their contribution with their names in square brackets, so you know who said what. \\\n", |
||||
"Do not prefix your own name. Answer in direct speeach, do not describe your behavior or expression. Limit your answers to 1-3 sentences.\"\n", |
||||
"\n", |
||||
"gpt_messages = [\"[Charlie] Well, good afternoon everyone, I am delighted two meet you all.\"]\n", |
||||
"claude_messages = [\"[Claudia] Good afternoon Charlie and Llars. I've been looking forward to discussing many ideas with you!\"]\n", |
||||
"llama_messages = [\"[Llars] And a good afternoon to you two. I'm all ears and eager to hear what you have to say.\"]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "3a5534d9-8db4-42ce-ab1c-ca20ad165844", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_gpt():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", |
||||
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": claude})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + llama\n", |
||||
" completion = openai.chat.completions.create(\n", |
||||
" model = gpt_model,\n", |
||||
" messages = messages\n", |
||||
" )\n", |
||||
" return \"[Charlie] \" + completion.choices[0].message.content.replace(\"[Charlie] \", \"\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "7dc9d7c1-ba19-413f-ba2f-d3e8762a99c5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_claude():\n", |
||||
" messages = []\n", |
||||
" for gpt, Claudia, llama in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" if len(messages) > 0:\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + gpt\n", |
||||
" else:\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt}) \n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": Claudia})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": llama})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + gpt_messages[-1]\n", |
||||
" message = claude.messages.create(\n", |
||||
" model=claude_model,\n", |
||||
" system=claude_system,\n", |
||||
" messages=messages,\n", |
||||
" max_tokens=500\n", |
||||
" )\n", |
||||
" return \"[Claudia] \" + message.content[0].text.replace(\"[Claudia] \", \"\") " |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "f7f91012-857c-4ed5-a953-5b499cd0dae2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def call_ollama():\n", |
||||
" messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", |
||||
" for gpt, claude, llama in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + claude\n", |
||||
" messages.append({\"role\": \"assistant\", \"content\": llama})\n", |
||||
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", |
||||
" messages[-1][\"content\"] += \"\\n\" + claude_messages[-1]\n", |
||||
" response = ollama.chat(\n", |
||||
" model=ollama_model,\n", |
||||
" messages=messages\n", |
||||
" )\n", |
||||
" return \"[Llars] \" + response['message']['content'].replace(\"[Llars] \", \"\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "47eafbe8-db52-4cf0-80d7-a4f9a89b2825", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(f\"\\n{gpt_messages[0]}\\n\")\n", |
||||
"print(f\"\\n{claude_messages[0]}\\n\")\n", |
||||
"print(f\"\\n{llama_messages[0]}\\n\")\n", |
||||
"\n", |
||||
"for i in range(5):\n", |
||||
" gpt_next = call_gpt()\n", |
||||
" print(f\"\\n{gpt_next}\\n\")\n", |
||||
" gpt_messages.append(gpt_next)\n", |
||||
"\n", |
||||
" claude_next = call_claude()\n", |
||||
" print(f\"\\n{claude_next}\\n\")\n", |
||||
" claude_messages.append(claude_next)\n", |
||||
"\n", |
||||
" llama_next = call_ollama()\n", |
||||
" print(f\"\\n{llama_next}\\n\")\n", |
||||
" llama_messages.append(llama_next)\n", |
||||
"\n", |
||||
" print(\"DEBUGGING\\n\\n\")\n", |
||||
" \n", |
||||
" for gp, cl, ll in zip(gpt_messages, claude_messages, llama_messages):\n", |
||||
" print(f\"{gp}\\n{cl}\\n{ll}\\n\")\n", |
||||
"\n", |
||||
" pause = input(\"\")" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
Loading…
Reference in new issue