{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "metadata": {}, "outputs": [], "source": [ "# imports\n", "\n", "import os\n", "import requests\n", "from dotenv import load_dotenv\n", "from bs4 import BeautifulSoup\n", "from IPython.display import Markdown, display\n", "from openai import OpenAI\n", "\n", "# If you get an error running this cell, then please head over to the troubleshooting notebook!" ] }, { "cell_type": "code", "execution_count": null, "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", "metadata": {}, "outputs": [], "source": [ "# Load environment variables in a file called .env\n", "\n", "load_dotenv(override=True)\n", "api_key = os.getenv('OPENAI_API_KEY')\n", "\n", "# Check the key\n", "\n", "if not api_key:\n", " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", "elif not api_key.startswith(\"sk-proj-\"):\n", " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", "elif api_key.strip() != api_key:\n", " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", "else:\n", " print(\"API key found and looks good so far!\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", "metadata": {}, "outputs": [], "source": [ "openai = OpenAI()\n", "\n", "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" ] }, { "cell_type": "code", "execution_count": null, "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", "metadata": {}, "outputs": [], "source": [ "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", "\n", "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", "and provides a short summary, ignoring text that might be navigation related. \\\n", "Respond in markdown.\"" ] }, { "cell_type": "code", "execution_count": null, "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", "metadata": {}, "outputs": [], "source": [ "# A function that writes a User Prompt that asks for summaries of websites:\n", "\n", "def user_prompt_for(website):\n", " user_prompt = f\"You are looking at a website titled {website.title}\"\n", " user_prompt += \"\\nThe contents of this website is as follows; \\\n", "please provide a short summary of this website in markdown. \\\n", "If it includes news or announcements, then summarize these too.\\n\\n\"\n", " user_prompt += website.text\n", " return user_prompt" ] }, { "cell_type": "code", "execution_count": null, "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", "metadata": {}, "outputs": [], "source": [ "# See how this function creates exactly the format above\n", "\n", "def messages_for(website):\n", " return [\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", " ]" ] }, { "cell_type": "markdown", "id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", "metadata": {}, "source": [ "# Sharing your code\n", "\n", "I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", "\n", "If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks\n", "\n", "Here are good instructions courtesy of an AI friend: \n", "https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" ] }, { "cell_type": "code", "execution_count": null, "id": "acbb92b2-b625-4a37-b03a-09dc8f06b222", "metadata": {}, "outputs": [], "source": [ "!pip install selenium" ] }, { "cell_type": "code", "execution_count": null, "id": "d6448a12-6aa1-4dd1-aaf1-c8a3a3c3ecb0", "metadata": {}, "outputs": [], "source": [ "!pip install webdriver-manager" ] }, { "cell_type": "code", "execution_count": null, "id": "f4484fcf-8b39-4c3f-9674-37970ed71988", "metadata": {}, "outputs": [], "source": [ "# A class to represent a Webpage\n", "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", "\n", "# Some websites need you to use proper headers when fetching them:\n", "# Import necessary modules\n", "from selenium import webdriver\n", "from selenium.webdriver.chrome.options import Options\n", "from selenium.webdriver.chrome.service import Service\n", "from webdriver_manager.chrome import ChromeDriverManager\n", "from bs4 import BeautifulSoup\n", "import time\n", "\n", "class ScrapeWebsite:\n", " def __init__(self, url):\n", " \"\"\"\n", " Create this Website object from the given URL using Selenium + BeautifulSoup\n", " Supports JavaScript-heavy and normal websites uniformly.\n", " \"\"\"\n", " self.url = url\n", "\n", " # Configure headless Chrome\n", " options = Options()\n", " options.add_argument('--headless')\n", " options.add_argument('--no-sandbox')\n", " options.add_argument('--disable-dev-shm-usage')\n", "\n", " # Use webdriver-manager to manage ChromeDriver\n", " service = Service(ChromeDriverManager().install())\n", "\n", " # Initialize the Chrome WebDriver with the service and options\n", " driver = webdriver.Chrome(service=service, options=options)\n", "\n", " # Start Selenium WebDriver\n", " driver.get(url)\n", "\n", " # Wait for JS to load (adjust as needed)\n", " time.sleep(3)\n", "\n", " # Fetch the page source after JS execution\n", " page_source = driver.page_source\n", " driver.quit()\n", "\n", " # Parse the HTML content with BeautifulSoup\n", " soup = BeautifulSoup(page_source, 'html.parser')\n", "\n", " # Extract title\n", " self.title = soup.title.string if soup.title else \"No title found\"\n", "\n", " # Remove unnecessary elements\n", " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", " irrelevant.decompose()\n", "\n", " # Extract the main text\n", " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "f576f485-60c0-4539-bfb3-79d821ebefa4", "metadata": {}, "outputs": [], "source": [ "def summarize_js_website(url):\n", " website = ScrapeWebsite(url)\n", " response = openai.chat.completions.create(\n", " model = \"gpt-4o-mini\",\n", " messages = messages_for(website)\n", " )\n", " return response.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": null, "id": "00ac3659-e4f0-4b64-8041-ba35bfa2c4c9", "metadata": {}, "outputs": [], "source": [ "summary = summarize_js_website(\"https://dheerajmaddi.netlify.app/\")" ] }, { "cell_type": "code", "execution_count": null, "id": "d526136e-9960-4f09-aad0-32f8c11de0ac", "metadata": {}, "outputs": [], "source": [ "display(Markdown(summary))" ] }, { "cell_type": "code", "execution_count": null, "id": "bcf1fd75-9964-4223-bcda-f2794bc9f7af", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.12" } }, "nbformat": 4, "nbformat_minor": 5 }