{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "import requests\n", "import json\n", "from typing import List\n", "from dotenv import load_dotenv\n", "from bs4 import BeautifulSoup\n", "from IPython.display import Markdown, display, update_display, clear_output\n", "import openai\n", "\n", "load_dotenv(override=True)\n", "api_key = os.getenv('OPENAI_API_KEY')\n", "\n", "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", " print(\"API key looks good so far\")\n", "else:\n", " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", "\n", "MODEL = 'gpt-4o-mini'\n", "\n", "headers = {\n", " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", "}\n", "\n", "# Prompt user for company name and URL\n", "company_name = input(\"Enter the company name: \")\n", "url = input(\"Enter the company URL: \")\n", "\n", "class Website:\n", " \"\"\"\n", " A utility class to represent a Website that we have scraped, now with links\n", " \"\"\"\n", "\n", " def __init__(self, url):\n", " self.url = url\n", " response = requests.get(url, headers=headers)\n", " self.body = response.content\n", " soup = BeautifulSoup(self.body, 'html.parser')\n", " self.title = soup.title.string if soup.title else \"No title found\"\n", " if soup.body:\n", " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", " irrelevant.decompose()\n", " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", " else:\n", " self.text = \"\"\n", " links = [link.get('href') for link in soup.find_all('a')]\n", " self.links = [link for link in links if link]\n", "\n", " def get_contents(self):\n", " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", "# multi-shot prompt\n", "link_system_prompt = \"You are provided with a list of links found on a webpage. \\You are able to decide which of the links would be most relevant to include in a brochure about the company, \\such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", "link_system_prompt += \"You should respond in JSON as in this example:\"\n", "link_system_prompt += \"\"\"\n", " EXAMPLE 1:\n", " {\n", " \"links\": [\n", " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", " {\"type\": \"careers page\", \"url\": \"https://another.full.url/careers\"}\n", " ]\n", " }\n", " EXAMPLE 2:\n", " {\n", " \"links\": [\n", " {\"type\": \"company blog\", \"url\": \"https://blog.example.com\"},\n", " {\"type\": \"our story\", \"url\": \"https://example.com/our-story\"}\n", " ]\n", " }\n", " \"\"\"\n", "\n", "def get_links_user_prompt(website):\n", " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\ Do not include Terms of Service, Privacy, email links.\\n\"\n", " user_prompt += \"Links (some might be relative links):\\n\"\n", " user_prompt += \"\\n\".join(website.links)\n", " return user_prompt\n", "\n", "\n", "def get_links(url):\n", " website = Website(url)\n", " response = openai.chat.completions.create(\n", " model=MODEL,\n", " messages=[\n", " {\"role\": \"system\", \"content\": link_system_prompt},\n", " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", " ],\n", " response_format={\"type\": \"json_object\"}\n", " )\n", " result = response.choices[0].message.content\n", " return json.loads(result)\n", "\n", "def get_all_details(url):\n", " result = \"Landing page:\\n\"\n", " result += Website(url).get_contents()\n", " links = get_links(url)\n", "\n", " for link in links[\"links\"]:\n", " result += f\"\\n\\n{link['type']}\\n\"\n", " result += Website(link[\"url\"]).get_contents()\n", " return result\n", "\n", "# set format to json_object\n", "system_prompt = (\n", " \"You are an assistant that analyzes the contents of several relevant pages from a company website \"\n", " \"and creates a short tempered, irritated, disappointed in the world type of brochure about the company for prospective customers, investors, and recruits. \"\n", " \"Respond in markdown. Include details of company culture, customers, and careers/jobs if you have the information. Add emoticons where ever possible.\\n\\n\"\n", "\n", " \"Please structure the brochure using the following sections:\\n\"\n", " \"1. **Introduction**: A brief overview of the company.\\n\"\n", " \"2. **Company Culture**: Emphasize fun, atmosphere, and any unique cultural elements.\\n\"\n", " \"3. **Customers**: Mention notable customers or industries.\\n\"\n", " \"4. **Careers/Jobs**: Highlight career opportunities.\\n\"\n", " \"5. **Conclusion**: Wrap up with a final lighthearted message.\\n\"\n", " \"6. Finish the brochure with a very sarcastic and pun-intended mission statement.\\n\"\n", ")\n", "\n", "def get_brochure_user_prompt(company_name, url):\n", " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", " user_prompt += get_all_details(url)\n", " user_prompt = user_prompt[:20_000]\n", " return user_prompt\n", "\n", "def stream_brochure():\n", " global brochure_text # Access the global variable\n", " brochure_text = \"\" # Initialize\n", " \n", " stream = openai.chat.completions.create(\n", " model=MODEL,\n", " messages=[\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", " ],\n", " stream=True\n", " )\n", " \n", " response = \"\"\n", " display_handle = display(Markdown(\"\"), display_id=True)\n", " for chunk in stream: \n", " content = chunk.choices[0].delta.content or ''\n", " response += content\n", " brochure_text += content # Accumulate the text\n", " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", " update_display(Markdown(response), display_id=display_handle.display_id)\n", "\n", "def user_translate_brochure(lang):\n", " # Clear previous output\n", " clear_output(wait=True)\n", " \n", " # Stream #2: translate accumulated text\n", " translation_stream = openai.chat.completions.create( # Changed from ChatCompletion\n", " model=MODEL,\n", " messages=[\n", " {\"role\": \"user\", \"content\": f\"Translate the following to {lang}:\\n\\n{brochure_text}\"}\n", " ],\n", " stream=True\n", " )\n", " \n", " # Setup display for streaming translation\n", " display_handle = display(Markdown(\"\"), display_id=True)\n", " translated_text = \"\"\n", " \n", " for chunk in translation_stream:\n", " content = chunk.choices[0].delta.content or \"\"\n", " if content:\n", " translated_text += content\n", " update_display(Markdown(translated_text), display_id=display_handle.display_id)\n", "\n", "# stream the brochure in english\n", "stream_brochure()\n", "\n", "# prompt user for language choice\n", "language_choice = input(\"Enter the language to translate the brochure into (e.g., 'French'): \")\n", "\n", "# translate the brochure and stream the translation\n", "user_translate_brochure(language_choice)" ] } ], "metadata": { "kernelspec": { "display_name": "llms", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 4 }