1 changed files with 402 additions and 0 deletions
@ -0,0 +1,402 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "76cf81ba-4caf-41dc-9cdb-c3a89d4b9f50", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import os\n", |
||||||
|
"import json\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import google.generativeai as genai #This is how I finally got both to work: system_instruction and stream\n", |
||||||
|
"import anthropic\n", |
||||||
|
"from IPython.display import Markdown\n", |
||||||
|
"from selenium import webdriver\n", |
||||||
|
"from selenium.webdriver.chrome.service import Service\n", |
||||||
|
"from selenium.webdriver.common.by import By\n", |
||||||
|
"from selenium.webdriver.chrome.options import Options\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "8fa0a173-710c-4fe8-94a3-c0768ae0a067", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"load_dotenv()\n", |
||||||
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||||
|
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"PATH_TO_CHROME_DRIVER = 'B:\\\\Users\\\\ekfon\\\\chromeDriver\\\\chromedriver.exe'" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "cd9f3562-6049-44e4-80ad-d41318a547b5", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"claude = anthropic.Anthropic()\n", |
||||||
|
"genai.configure(api_key=google_api_key) #for gemini" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e5faea20-2168-4386-b22c-12a240b45990", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"force_dark_mode = \"\"\"\n", |
||||||
|
"function refresh() {\n", |
||||||
|
" const url = new URL(window.location);\n", |
||||||
|
" if (url.searchParams.get('__theme') !== 'dark') {\n", |
||||||
|
" url.searchParams.set('__theme', 'dark');\n", |
||||||
|
" window.location.href = url.href;\n", |
||||||
|
" }\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"force_light_mode = \"\"\"\n", |
||||||
|
"function refresh() {\n", |
||||||
|
" const url = new URL(window.location);\n", |
||||||
|
" if (url.searchParams.get('__theme') !== 'light') {\n", |
||||||
|
" url.searchParams.set('__theme', 'light');\n", |
||||||
|
" window.location.href = url.href;\n", |
||||||
|
" }\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e52e4249-340a-410f-ba39-38d5c231395e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"class Website:\n", |
||||||
|
" url: str\n", |
||||||
|
" title: str\n", |
||||||
|
" text: str\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" self.url = url\n", |
||||||
|
" options = Options()\n", |
||||||
|
"\n", |
||||||
|
" options.add_argument(\"--no-sandbox\")\n", |
||||||
|
" options.add_argument(\"--disable-dev-shm-usage\")\n", |
||||||
|
"\n", |
||||||
|
" service = Service(PATH_TO_CHROME_DRIVER)\n", |
||||||
|
" driver = webdriver.Chrome(service=service, options=options)\n", |
||||||
|
" driver.get(url)\n", |
||||||
|
"\n", |
||||||
|
" page_source = driver.page_source\n", |
||||||
|
" driver.quit()\n", |
||||||
|
" \n", |
||||||
|
" soup = BeautifulSoup(page_source, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" if soup.body:\n", |
||||||
|
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n", |
||||||
|
" else:\n", |
||||||
|
" self.text = \"\"\n", |
||||||
|
"\n", |
||||||
|
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||||
|
" self.links = [link for link in links if link]\n", |
||||||
|
"\n", |
||||||
|
" def get_contents(self):\n", |
||||||
|
" return f\"Webpage title: \\\"{self.title}\\\"\\nWebpage contents:\\n{self.text}\\n\\n\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f883d449-1139-4510-aaab-9912f8bd2837", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#system prompt for the link anthology\n", |
||||||
|
"anthology_sPrompt = \"I'll provide you with a list of links from a webpage. \\\n", |
||||||
|
"You are able to decide which links are most relevant to include in a brochure about the company, \\\n", |
||||||
|
"such as the About page, any Company page, or a jobs/careers page.\\n\"\n", |
||||||
|
"\n", |
||||||
|
"anthology_sPrompt += \"You will respond in JSON format, providing full https URLs, just like in this example:\\n\"\n", |
||||||
|
"\n", |
||||||
|
"anthology_sPrompt += \"\"\"\n", |
||||||
|
"{\n", |
||||||
|
" \"links\": [\n", |
||||||
|
" {\"type\": \"about page\", \"url\": \"https://www.example-url.com/about\"}\n", |
||||||
|
" {\"type\": \"careers page\", \"url\": \"https://further.example-url.co.uk/Careers/\"}\n", |
||||||
|
" ]\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "113f53f6-15ec-415e-a1cf-e408eb0079fc", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_anthology_user_prompt(website):\n", |
||||||
|
" user_prompt = f\"Below is the list of links from the webpage {website.url}. \"\n", |
||||||
|
" user_prompt += \"Please decide which of the links are relevant for a brochure about the company. \\\n", |
||||||
|
"Respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\"\n", |
||||||
|
" user_prompt += \"Here is the list of links (some might be relative links):\\n\\n\"\n", |
||||||
|
" user_prompt += \"\\n\".join(website.links)\n", |
||||||
|
"\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "025311d1-8761-4e4f-8408-ef763fed1cb2", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_links_anthology_gpt(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" response = openai.chat.completions.create(\n", |
||||||
|
" model='gpt-4o-mini',\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": anthology_sPrompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_anthology_user_prompt(website)}\n", |
||||||
|
" ],\n", |
||||||
|
" response_format={\"type\": \"json_object\"}\n", |
||||||
|
" )\n", |
||||||
|
" result = response.choices[0].message.content\n", |
||||||
|
" return json.loads(result) #because result is a string, and what we want is an actual dictionary" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c0cdb206-8efc-4ab5-8c17-1b991bdc6e6d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_links_anthology_claude(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" response = claude.messages.create(\n", |
||||||
|
" model=\"claude-3-haiku-20240307\",\n", |
||||||
|
" system=anthology_sPrompt,\n", |
||||||
|
" messages=[{\"role\": \"user\", \"content\": get_anthology_user_prompt(website)}],\n", |
||||||
|
" max_tokens=500\n", |
||||||
|
" )\n", |
||||||
|
" result = response.content[0].text\n", |
||||||
|
" return json.loads(result) #because result is a string, and what we want is an actual dictionary" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b01f0574-b3ec-463f-81f6-7dd5938a91ca", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_links_anthology_gemini(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" model = genai.GenerativeModel(\n", |
||||||
|
" \"models/gemini-2.0-flash\",\n", |
||||||
|
" system_instruction=anthology_sPrompt + \"Do not comment on your answer. Do not put ``` around your answer.\",\n", |
||||||
|
" )\n", |
||||||
|
" response = model.generate_content(get_anthology_user_prompt(website))\n", |
||||||
|
" result = response.text\n", |
||||||
|
" return json.loads(result.replace(\"```json\\n\",\"\").replace(\"\\n```\",\"\")) #Gemini sometimes does this" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "228eed10-3c30-4f17-8e0a-cf01579a3708", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_all_details(url, model):\n", |
||||||
|
" result = \"Landing page:\\n\\n\"\n", |
||||||
|
" result += Website(url).get_contents()\n", |
||||||
|
" if model == 'GPT':\n", |
||||||
|
" links = get_links_anthology_gpt(url)\n", |
||||||
|
" elif model == 'Claude':\n", |
||||||
|
" links = get_links_anthology_claude(url)\n", |
||||||
|
" else:\n", |
||||||
|
" links = get_links_anthology_gemini(url)\n", |
||||||
|
"\n", |
||||||
|
" for link in links[\"links\"]: #remember that links is a json dictionary\n", |
||||||
|
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||||
|
" result += Website(link['url']).get_contents()\n", |
||||||
|
"\n", |
||||||
|
" return result" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "559a316c-d80d-4a6d-8272-8220e491e6c3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"brochure_sPrompt = \"You analyze the content of several relevant pages from a company's website. \\\n", |
||||||
|
"You use that knowledge to create a short brochure about the company. Your brochure is for prospective customers, investors, and recruits. \\\n", |
||||||
|
"Include details of company culture, customers, and job openings if you have the information. Respond in Markdown.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c4649bc6-6be8-4fc9-9011-47348f61087f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_brochure_user_prompt(company_name, url, model):\n", |
||||||
|
" prompt = f\"You are looking at the website of the company called {company_name}.\\n\"\n", |
||||||
|
" prompt += \"Here are the contents of its landing page and other relevant pages. Based on this content, \\\n", |
||||||
|
"create a short brochure of the company in Markdown:\\n\\n\"\n", |
||||||
|
" prompt += get_all_details(url, model)\n", |
||||||
|
" prompt = prompt[:5_000] #this limits the prompt input, just in case\n", |
||||||
|
" \n", |
||||||
|
" return prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c0d80bf9-be76-4243-a448-9f1124d367a9", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_gpt_brochure(company_name, url):\n", |
||||||
|
" stream = openai.chat.completions.create(\n", |
||||||
|
" model='gpt-4o-mini',\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": brochure_sPrompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url, 'GPT')}\n", |
||||||
|
" ],\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" response += chunk.choices[0].delta.content or ''\n", |
||||||
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||||
|
" yield response" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "cb48f4ba-6b33-460e-a654-f91d1cb156ac", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_claude_brochure(company_name, url):\n", |
||||||
|
" result = claude.messages.stream(\n", |
||||||
|
" model=\"claude-3-haiku-20240307\",\n", |
||||||
|
" max_tokens=1000,\n", |
||||||
|
" temperature=0.7,\n", |
||||||
|
" system=brochure_sPrompt,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url, 'Claude')},\n", |
||||||
|
" ],\n", |
||||||
|
" )\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" with result as stream:\n", |
||||||
|
" for text in stream.text_stream:\n", |
||||||
|
" response += text or \"\"\n", |
||||||
|
" yield response" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d3afba59-6e5c-4d1b-a849-0fbd6861aadd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_gemini_brochure(company_name, url):\n", |
||||||
|
" model = genai.GenerativeModel(\n", |
||||||
|
" \"models/gemini-2.0-flash\",\n", |
||||||
|
" system_instruction=brochure_sPrompt + \"Do not comment on your answer. Do not put ``` around your answer.\",\n", |
||||||
|
" ) \n", |
||||||
|
" response = model.generate_content(\n", |
||||||
|
" get_brochure_user_prompt(company_name, url, 'Gemini'),\n", |
||||||
|
" stream=True\n", |
||||||
|
" ) \n", |
||||||
|
" result = \"\"\n", |
||||||
|
" for chunk in response:\n", |
||||||
|
" result += chunk.text or \"\"\n", |
||||||
|
" yield result" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "ea6fde83-d756-49c6-926b-bfdc0f782310", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_model_brochure(model, company_name, url):\n", |
||||||
|
" if model==\"GPT\":\n", |
||||||
|
" result = stream_gpt_brochure(company_name, url)\n", |
||||||
|
" elif model==\"Claude\":\n", |
||||||
|
" result = stream_claude_brochure(company_name, url)\n", |
||||||
|
" elif model==\"Gemini\":\n", |
||||||
|
" result = stream_gemini_brochure(company_name, url)\n", |
||||||
|
" else:\n", |
||||||
|
" raise ValueError(\"Unknown model\")\n", |
||||||
|
" yield from result" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3c403e6b-36df-4248-83eb-0e71d6e2ee0d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"view = gr.Interface(\n", |
||||||
|
" fn=stream_model_brochure,\n", |
||||||
|
" inputs=[gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\", value=\"GPT\"), gr.Textbox(label=\"Company name:\"), gr.Textbox(label=\"Website URL:\")],\n", |
||||||
|
" outputs=[gr.Markdown(label=\"Brochure:\")],\n", |
||||||
|
" flagging_mode=\"never\",\n", |
||||||
|
" js=force_dark_mode\n", |
||||||
|
")\n", |
||||||
|
"view.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
Loading…
Reference in new issue