You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

369 lines
13 KiB

{
"cells": [
{
"cell_type": "markdown",
"id": "83bbedd0-eb58-48de-992e-484071b10104",
"metadata": {},
"source": [
"# Web Scraper with JavaScript Support\n",
"Uses day1-webscraping-selenium-for-javascript.ipynb solution simplified so easy to run.\n",
"\n",
"## Install dependencies\n",
"Uncomment and run once"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f2d91971-9dd0-4714-8ec7-f1fb25f95140",
"metadata": {},
"outputs": [],
"source": [
"# !pip install selenium\n",
"# !pip install undetected-chromedriver\n",
"# !ollama pull llama3.2"
]
},
{
"cell_type": "markdown",
"id": "967258fe-3296-464c-962d-2bcf821eae67",
"metadata": {},
"source": [
"## Import required dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fe8a87c8-0475-45a1-8ca2-fb9059e5470b",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"import undetected_chromedriver as uc\n",
"from selenium.webdriver.common.by import By\n",
"from selenium.webdriver.support.ui import WebDriverWait\n",
"from selenium.webdriver.support import expected_conditions as EC\n",
"import time\n",
"from bs4 import BeautifulSoup\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "markdown",
"id": "df60545e-2ab6-4e37-b41c-27ddf2affb92",
"metadata": {},
"source": [
"## Run setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a3846089-efa2-4602-8bc3-5f6f4945de64",
"metadata": {},
"outputs": [],
"source": [
"chrome_path = \"C:/Program Files/Google/Chrome/Application/chrome.exe\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b835812d-3692-4192-abc4-15fc463bd08f",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "acb89abb-dcee-4da6-98f8-e339d258f2a4",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
"# If it STILL doesn't work (horrors!) then please see the troubleshooting notebook, or try the below line instead:\n",
"# openai = OpenAI(api_key=\"your-key-here-starting-sk-proj-\")"
]
},
{
"cell_type": "markdown",
"id": "e860e963-e7a1-4888-a4b9-db9c24bb9a6e",
"metadata": {},
"source": [
"# Create Prompts"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d4933c36-db8a-4333-8f81-e9db7ba41287",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n"
]
},
{
"cell_type": "markdown",
"id": "17cfab59-304d-4d2f-b324-c388d9e87fca",
"metadata": {},
"source": [
"# Create Functions"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ca5e96e0-4d8f-49de-a608-a735a5b23b1a",
"metadata": {},
"outputs": [],
"source": [
"# Setup for how OpenAI expects to receive messages in a particular structure\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
"\n",
"# Use Selenium and chrome to scrape website\n",
"class WebsiteCrawler:\n",
" def __init__(self, url, wait_time=20, chrome_binary_path=None):\n",
" \"\"\"\n",
" Initialize the WebsiteCrawler using Selenium to scrape JavaScript-rendered content.\n",
" \"\"\"\n",
" self.url = url\n",
" self.wait_time = wait_time\n",
"\n",
" options = uc.ChromeOptions()\n",
" options.add_argument(\"--disable-gpu\")\n",
" options.add_argument(\"--no-sandbox\")\n",
" options.add_argument(\"--disable-dev-shm-usage\")\n",
" options.add_argument(\"--disable-blink-features=AutomationControlled\")\n",
" options.add_argument(\"start-maximized\")\n",
" options.add_argument(\n",
" \"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
" )\n",
" if chrome_binary_path:\n",
" options.binary_location = chrome_binary_path\n",
"\n",
" self.driver = uc.Chrome(options=options)\n",
"\n",
" try:\n",
" # Load the URL\n",
" self.driver.get(url)\n",
"\n",
" # Wait for Cloudflare or similar checks\n",
" time.sleep(10)\n",
"\n",
" # Ensure the main content is loaded\n",
" WebDriverWait(self.driver, self.wait_time).until(\n",
" EC.presence_of_element_located((By.TAG_NAME, \"main\"))\n",
" )\n",
"\n",
" # Extract the main content\n",
" main_content = self.driver.find_element(By.CSS_SELECTOR, \"main\").get_attribute(\"outerHTML\")\n",
"\n",
" # Parse with BeautifulSoup\n",
" soup = BeautifulSoup(main_content, \"html.parser\")\n",
" self.title = self.driver.title if self.driver.title else \"No title found\"\n",
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" except Exception as e:\n",
" print(f\"Error occurred: {e}\")\n",
" self.title = \"Error occurred\"\n",
" self.text = \"\"\n",
"\n",
" finally:\n",
" self.driver.quit()\n",
"\n",
"def new_summary(url, chrome_path):\n",
" web = WebsiteCrawler(url, 30, chrome_path)\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(web)\n",
" )\n",
"\n",
" web_summary = response.choices[0].message.content\n",
" \n",
" return display(Markdown(web_summary))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "20a8a14b-0a29-4f74-a591-d587b965409b",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"# Setup for how OpenAI expects to receive messages in a particular structure\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
"\n",
"# Use Selenium and chrome to scrape website\n",
"class WebsiteCrawler:\n",
" def __init__(self, url, wait_time=20, chrome_binary_path=None):\n",
" \"\"\"\n",
" Initialize the WebsiteCrawler using Selenium to scrape JavaScript-rendered content.\n",
" \"\"\"\n",
" self.url = url\n",
" self.wait_time = wait_time\n",
"\n",
" options = uc.ChromeOptions()\n",
" options.add_argument(\"--disable-gpu\")\n",
" options.add_argument(\"--no-sandbox\")\n",
" options.add_argument(\"--disable-dev-shm-usage\")\n",
" options.add_argument(\"--disable-blink-features=AutomationControlled\")\n",
" options.add_argument(\"start-maximized\")\n",
" options.add_argument(\n",
" \"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
" )\n",
" if chrome_binary_path:\n",
" options.binary_location = chrome_binary_path\n",
"\n",
" self.driver = uc.Chrome(options=options)\n",
"\n",
" try:\n",
" # Load the URL\n",
" self.driver.get(url)\n",
"\n",
" # Wait for Cloudflare or similar checks\n",
" time.sleep(10)\n",
"\n",
" # Ensure the main content is loaded\n",
" WebDriverWait(self.driver, self.wait_time).until(\n",
" EC.presence_of_element_located((By.TAG_NAME, \"main\"))\n",
" )\n",
"\n",
" # Extract the main content\n",
" main_content = self.driver.find_element(By.CSS_SELECTOR, \"main\").get_attribute(\"outerHTML\")\n",
"\n",
" # Parse with BeautifulSoup\n",
" soup = BeautifulSoup(main_content, \"html.parser\")\n",
" self.title = self.driver.title if self.driver.title else \"No title found\"\n",
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" except Exception as e:\n",
" print(f\"Error occurred: {e}\")\n",
" self.title = \"Error occurred\"\n",
" self.text = \"\"\n",
"\n",
" finally:\n",
" self.driver.quit()\n",
"\n",
"def new_summary(url, chrome_path):\n",
" web = WebsiteCrawler(url, 30, chrome_path)\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(web)\n",
" )\n",
"\n",
" web_summary = response.choices[0].message.content\n",
" \n",
" return display(Markdown(web_summary))"
]
},
{
"cell_type": "markdown",
"id": "e5f974b3-e417-43a2-88f1-8db06096cd53",
"metadata": {},
"source": [
"# Scrape and Summarize Web Page"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "55f240cb-1fca-46bf-81d1-1beeea64439d",
"metadata": {},
"outputs": [],
"source": [
"url = \"https://www.canva.com/\"\n",
"new_summary(url, chrome_path)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}