You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

278 lines
10 KiB

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "6e988b94-daab-4ad1-bf85-e2ee066bca17",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from selenium import webdriver\n",
"from selenium.webdriver.chrome.service import Service\n",
"from selenium.webdriver.common.by import By\n",
"from selenium.webdriver.chrome.options import Options\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"from bs4 import BeautifulSoup"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5d58d8a6-65d6-42ee-b5c9-bbcc1cafd7fc",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"PATH_TO_CHROME_DRIVER = 'B:\\\\Users\\\\ekfon\\\\chromeDriver\\\\chromedriver.exe'\n",
"\n",
"MODEL = 'gpt-4o-mini'\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "29ce6d79-2d5f-48e2-9f99-e249e5f0ca77",
"metadata": {},
"outputs": [],
"source": [
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" \"\"\"\n",
" begin Selenium equivalent of requests:\n",
" response = requests.get(url, headers=headers)\n",
" self.body = response.content #which is then passed on to bs\n",
" \"\"\"\n",
" options = Options()\n",
"\n",
" options.add_argument(\"--no-sandbox\")\n",
" options.add_argument(\"--disable-dev-shm-usage\")\n",
"\n",
" service = Service(PATH_TO_CHROME_DRIVER)\n",
" driver = webdriver.Chrome(service=service, options=options)\n",
" driver.get(url)\n",
"\n",
" page_source = driver.page_source\n",
" driver.quit()\n",
" #end Selenium part\n",
" \n",
" soup = BeautifulSoup(page_source, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" if soup.body:\n",
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" self.text = \"\"\n",
"\n",
" links = [link.get('href') for link in soup.find_all('a')]\n",
" self.links = [link for link in links if link]\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage title: \\\"{self.title}\\\"\\nWebpage contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fbdd2015-3ae5-4121-9247-749b131552dd",
"metadata": {},
"outputs": [],
"source": [
"#system prompt for the link anthology\n",
"anthology_sPrompt = \"I'll provide you with a list of links from a webpage. \\\n",
"You are able to decide which links would be most relevant to include in a brochure about the entity this website is for, \\\n",
"such as the About page, any personal/company page, or a careers/goals page, if any.\\n\"\n",
"\n",
"anthology_sPrompt += \"You will respond in JSON format, providing full https URLs, just like in this example:\\n\"\n",
"\n",
"anthology_sPrompt += \"\"\"\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://www.example-url.com/about\"}\n",
" {\"type\": \"careers page\", \"url\": \"https://further.example-url.co.uk/Careers/\"}\n",
" ]\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb2aa2f3-e723-4267-ac54-41eaa04b2bba",
"metadata": {},
"outputs": [],
"source": [
"def get_anthology_user_prompt(website):\n",
" user_prompt = f\"Below is the list of links from the webpage {website.url}. \"\n",
" user_prompt += \"Please decide which of the links are relevant for a brochure presenting what the website is about. \\\n",
"Respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\"\n",
" user_prompt += \"Here is the list of links (some might be relative links):\\n\\n\"\n",
" user_prompt += \"\\n\".join(website.links)\n",
"\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "752885d6-5c54-4f2b-a6e1-f30e046b704e",
"metadata": {},
"outputs": [],
"source": [
"def get_links_anthology(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": anthology_sPrompt},\n",
" {\"role\": \"user\", \"content\": get_anthology_user_prompt(website)}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" result = response.choices[0].message.content\n",
" return json.loads(result) #because result is a string, and what we want is an actual dictionary"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "989c19b4-817a-4485-a9f7-d38e495385c3",
"metadata": {},
"outputs": [],
"source": [
"def get_all_details(url):\n",
" result = \"Landing page:\\n\\n\"\n",
" result += Website(url).get_contents()\n",
" links = get_links_anthology(url)\n",
"\n",
" for link in links[\"links\"]: #remember that links is a json dictionary\n",
" result += f\"\\n\\n{link['type']}\\n\"\n",
" result += Website(link['url']).get_contents()\n",
"\n",
" return result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b74672ac-15de-4115-80b8-ba1d2c107b0f",
"metadata": {},
"outputs": [],
"source": [
"contentScan_sPrompt = \"You are the world's most powerful content analysis assistant. You are given a selection of the \\\n",
"most representative pages from a website. You scan and analyze the content to find out if any of the following key principles are violated:\\n\\n \\\n",
"- inclusive language (is the language gender-neutral, non-binary, etc.)\\n \\\n",
"- non-violent language (avoid unnecessarily violent language)\\n \\\n",
"- respectful language (avoid stereotypes, othering, be history-conscious)\\n\\n \\\n",
"Write a short report in Markdown about the content of the website. Report any violation of the above-mentioned content principles, if any, \\\n",
"and mention the section where you found it. If possible, help the author of the text re-word or re-phrase the problematic passage. Mention only \\\n",
"the aspects of the content you would recommend improving. Mention each violation only once. Conclude your analysis with a \\\n",
"\\\"language score\\\" from 30 to 100 based on how much the content principles are respected. Be helpful and non-judgmental.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bfcca913-1a0b-40f3-981f-f7ab3800f55e",
"metadata": {},
"outputs": [],
"source": [
"def get_contentScan_user_prompt(entity_name, url):\n",
" prompt = f\"You are looking at the website of {entity_name}.\\n\"\n",
" prompt += \"Here are the contents of its landing page and other relevant pages. Based on this content, \\\n",
"create a content report for this website in Markdown, concerning inclusive, non-violent, respectful language, etc.:\\n\\n\"\n",
" prompt += get_all_details(url)\n",
" prompt = prompt[:10_000] #this limits the prompt input, just in case\n",
" \n",
" return prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ed64addd-12c6-43b2-9293-c708f4ef5136",
"metadata": {},
"outputs": [],
"source": [
"def create_contentScan(entity_name, url):\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": contentScan_sPrompt},\n",
" {\"role\": \"user\", \"content\": get_contentCan_user_prompt(entity_name, url)}\n",
" ],\n",
" )\n",
" result = response.choices[0].message.content\n",
" display(Markdown(result))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5405287f-1bba-4a7a-b045-075f9b32ce38",
"metadata": {},
"outputs": [],
"source": [
"def stream_contentScan(entity_name, url):\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": contentScan_sPrompt},\n",
" {\"role\": \"user\", \"content\": get_contentScan_user_prompt(entity_name, url)}\n",
" ],\n",
" stream=True\n",
" )\n",
"\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2bc8384e-3d82-4e47-be38-4bda7047c429",
"metadata": {},
"outputs": [],
"source": [
"stream_contentScan(\"Acrolinx Website\", \"https://www.acrolinx.com/\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}