Browse Source

enhanced structure and comments for week 1 and added a Spanish version

pull/2/head
Simon Dufty 7 months ago
parent
commit
bdd3ef77e0
  1. 163
      week1/SD code.txt
  2. 251
      week1/day1.ipynb
  3. 356
      week1/day5-Enhanced.ipynb
  4. 4823
      week1/day5.ipynb

163
week1/SD code.txt

@ -0,0 +1,163 @@
# imports
import os
import requests
import json
from typing import List
from dotenv import load_dotenv
from bs4 import BeautifulSoup
from IPython.display import Markdown, display, update_display
from openai import OpenAI
# Initialize and constants
load_dotenv()
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')
MODEL = 'gpt-4o-mini'
openai = OpenAI()
# A class to represent a Webpage
class Website:
url: str
title: str
body: str
links: List[str]
def __init__(self, url):
self.url = url
response = requests.get(url)
self.body = response.content
soup = BeautifulSoup(self.body, 'html.parser')
self.title = soup.title.string if soup.title else "No title found"
if soup.body:
for irrelevant in soup.body(["script", "style", "img", "input"]):
irrelevant.decompose()
self.text = soup.body.get_text(separator="\n", strip=True)
else:
self.text = ""
links = [link.get('href') for link in soup.find_all('a')]
self.links = [link for link in links if link]
def get_contents(self):
return f"Webpage Title:\n{self.title}\nWebpage Contents:\n{self.text}\n\n"
link_system_prompt = """
You are provided with a list of links found on a webpage. Your task is to first categorize each link into one of the following categories:
- about page
- careers page
- terms of service
- privacy policy
- contact page
- other (please specify).
Once the links are categorized, please choose which links are most relevant to include in a brochure about the company.
The brochure should only include links such as About pages, Careers pages, or Company Overview pages. Exclude any links related to Terms of Service, Privacy Policy, or email addresses.
Respond in the following JSON format:
{
"categorized_links": [
{"category": "about page", "url": "https://full.url/about"},
{"category": "careers page", "url": "https://full.url/careers"},
{"category": "terms of service", "url": "https://full.url/terms"},
{"category": "privacy policy", "url": "https://full.url/privacy"},
{"category": "other", "specify": "contact page", "url": "https://full.url/contact"}
],
"brochure_links": [
{"type": "about page", "url": "https://full.url/about"},
{"type": "careers page", "url": "https://full.url/careers"}
]
}
Please find the links below and proceed with the task:
Links (some may be relative links):
[INSERT LINK LIST HERE]
"""
def get_links_user_prompt(website):
user_prompt = f"Here is the list of links on the website of {website.url} - "
user_prompt += "please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \
Do not include Terms of Service, Privacy, email links.\n"
user_prompt += "Links (some might be relative links):\n"
user_prompt += "\n".join(website.links)
return user_prompt
def get_links(url):
website = Website(url)
completion = openai.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": link_system_prompt},
{"role": "user", "content": get_links_user_prompt(website)}
],
response_format={"type": "json_object"}
)
result = completion.choices[0].message.content
return json.loads(result)
from urllib.parse import urljoin
def get_all_details(url):
result = "Landing page:\n"
result += Website(url).get_contents() # Get the landing page content
links = get_links(url) # Retrieve the links JSON
brochure_links = links.get('brochure_links', []) # Get the brochure links list (which is already a list)
print("Found Brochure links:", brochure_links) # Debug output to show the brochure links
# Iterate over each brochure link
for link in brochure_links:
result += f"\n\n{link['type']}:\n" # Add the type of link (about page, careers page, etc.)
# Handle relative URLs by converting them to absolute URLs
full_url = urljoin(url, link["url"])
# Fetch and append the content of the brochure link URL
result += Website(full_url).get_contents()
return result
system_prompt = "You are an assistant that analyzes the contents of several relevant pages from a company website \
and creates a brochure about the company for prospective customers, investors and recruits. Respond in markdown.\
Include details of company culture, customers and careers/jobs if you have the information.\
Structure the brochure to include specific sections as follows:\
About Us\
What we do\
How We Do It\
Where We Do It\
Our People\
Our Culture\
Connect with Us.\
Please provide two versions of the brochure, the first in English, the second in Spanish. The contents of the brochure are to be the same for both languages."
def get_brochure_user_prompt(company_name, url):
user_prompt = f"You are looking at a company called: {company_name}\n"
user_prompt += f"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\n"
user_prompt += get_all_details(url)
user_prompt = user_prompt[:20_000] # Truncate if more than 20,000 characters
return user_prompt
def stream_brochure(company_name, url):
stream = openai.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": get_brochure_user_prompt(company_name, url)}
],
stream=True
)
response = ""
display_handle = display(Markdown(""), display_id=True)
for chunk in stream:
response += chunk.choices[0].delta.content or ''
response = response.replace("```","").replace("markdown", "")
update_display(Markdown(response), display_id=display_handle.display_id)
stream_brochure("Anthropic", "https://anthropic.com")

251
week1/day1.ipynb

@ -16,7 +16,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -51,7 +51,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", "id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -65,7 +65,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "c5e793b2-6775-426a-a139-4848291d0463", "id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -89,10 +89,63 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 4,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Home - Edward Donner\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"I’m the co-founder and CTO of\n",
"Nebula.io\n",
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"August 6, 2024\n",
"Outsmart LLM Arena – a battle of diplomacy and deviousness\n",
"June 26, 2024\n",
"Choosing the Right LLM: Toolkit and Resources\n",
"February 7, 2024\n",
"Fine-tuning an LLM on your texts: a simulation of you\n",
"January 31, 2024\n",
"Fine-tuning an LLM on your texts: part 4 – QLoRA\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [ "source": [
"# Let's try one out\n", "# Let's try one out\n",
"\n", "\n",
@ -121,7 +174,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 5,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699", "id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -133,7 +186,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 6,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -166,7 +219,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 7,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -188,7 +241,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -204,17 +257,30 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 12,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/plain": [
"\"# Summary of Edward Donner's Website\\n\\nEdward Donner's website serves as a platform for sharing insights on artificial intelligence, specifically focusing on large language models (LLMs). He expresses a passion for coding, DJing, and electronic music production. Edward is the co-founder and CTO of Nebula.io, which utilizes AI to enhance talent discovery and management processes. He previously founded the AI startup untapt, which was acquired in 2021.\\n\\n## Recent Posts\\n- **Outsmart LLM Arena** (August 6, 2024): An announcement about an interactive platform where LLMs compete in strategy and negotiation.\\n- **Choosing the Right LLM: Toolkit and Resources** (June 26, 2024): A guide for selecting suitable LLMs for various applications.\\n- **Fine-tuning an LLM on Your Texts: A Simulation of You** (February 7, 2024): Discussion on personalizing LLMs for individual use.\\n- **Fine-tuning an LLM on Your Texts: Part 4 – QLoRA** (January 31, 2024): Detailed exploration of a specific method for fine-tuning LLMs.\\n\\nEdward invites readers to connect with him through various platforms for further discussion and engagement.\""
]
},
"execution_count": 12,
"metadata": {}, "metadata": {},
"outputs": [], "output_type": "execute_result"
}
],
"source": [ "source": [
"summarize(\"https://edwarddonner.com\")" "summarize(\"https://edwarddonner.com\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 13,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342", "id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -226,39 +292,186 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 14,
"id": "3018853a-445f-41ff-9560-d925d1774b2f", "id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"# Summary of Edward Donner's Website\n",
"\n",
"The website serves as a personal and professional platform for Ed Donner, a programmer, LLM (Large Language Model) enthusiast, and the co-founder and CTO of Nebula.io. The site includes insights into Ed's interests in coding, electronic music production, and his background in AI startups.\n",
"\n",
"## Key Highlights:\n",
"\n",
"- **Outsmart**: An initiative that features LLMs competing in scenarios that require diplomacy and cunning.\n",
"- **About Ed**: Ed shares his professional journey, including the founding of Nebula.io and his previous startup, untapt, which was acquired in 2021. He emphasizes using AI to help people fulfill their potential.\n",
"- **Posts and Resources**: The site includes several posts discussing topics such as selecting the right LLM, fine-tuning LLMs, and practical tools for working with them.\n",
"\n",
"## Recent Announcements:\n",
"1. **August 6, 2024**: Announcement of the \"Outsmart LLM Arena\".\n",
"2. **June 26, 2024**: Introduction of resources for selecting the right LLM.\n",
"3. **February 7, 2024**: Post on fine-tuning LLMs to simulate personal text styles.\n",
"4. **January 31, 2024**: Continuation of the fine-tuning series focusing on QLoRA.\n",
"\n",
"Overall, the website is a blend of personal exploration in AI, professional endeavors, and educational content centered around large language models."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"display_summary(\"https://edwarddonner.com\")" "display_summary(\"https://edwarddonner.com\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 15,
"id": "45d83403-a24c-44b5-84ac-961449b4008f", "id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"# Website Summary: CNN\n",
"\n",
"CNN is a leading news platform providing breaking news, latest headlines, and video content across a range of topics including politics, business, health, and global events. The site covers significant global issues such as the ongoing Ukraine-Russia war and the Israel-Hamas conflict, with live updates and analyses. \n",
"\n",
"## Recent News Highlights:\n",
"- **Israel-Hamas Conflict**: The Israel Defense Forces (IDF) chief has indicated a potential ground incursion in Lebanon amid rising tensions with Hezbollah, with calls from the US and allies for a 21-day ceasefire to avert regional war.\n",
"- **Geopolitical Developments**: Israel intercepted a Hezbollah missile aimed at Tel Aviv, marking a significant escalation in hostilities.\n",
"- **International Concerns**: There are fears regarding potential Russian attacks on Ukrainian nuclear facilities.\n",
"- **Cultural Updates**: A panda pair was welcomed in Hong Kong as China commemorates 75 years of Communist rule.\n",
"- **Scientific Discoveries**: Astronomers are reassessing their understanding of the universe following the discovery of massive black hole jets.\n",
"\n",
"CNN also covers various other topics including entertainment, health, science, and climate-related news, showcasing a comprehensive view of both domestic and international affairs."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"display_summary(\"https://cnn.com\")" "display_summary(\"https://cnn.com\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 16,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7", "id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"# Anthropic Website Summary\n",
"\n",
"Anthropic is an AI safety and research company based in San Francisco, dedicated to developing AI models that prioritize safety and reliability. Their key AI model currently available is **Claude 3.5 Sonnet**, which is highlighted as their most intelligent model yet, released on **June 21, 2024**. The company also offers an API that allows businesses to utilize Claude for improved efficiency and new revenue streams.\n",
"\n",
"## Notable Announcements:\n",
"- **Claude 3.5 Sonnet Released**: June 21, 2024\n",
"- **Research on Harmlessness from AI Feedback**: December 15, 2022\n",
"- **Core Views on AI Safety**: March 8, 2023\n",
"\n",
"Anthropic emphasizes an interdisciplinary approach, bringing together expertise from machine learning, physics, policy, and product development to further their mission in safe AI research. They actively seek to expand their team through open positions."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"display_summary(\"https://anthropic.com\")" "display_summary(\"https://anthropic.com\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 19,
"id": "49c4315f-340b-4371-b6cd-2a772f4b7bdd", "id": "49c4315f-340b-4371-b6cd-2a772f4b7bdd",
"metadata": {}, "metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Visit Singapore Official Site\n",
"\n",
"The **Visit Singapore Official Site** serves as a comprehensive guide for tourists and locals eager to explore the myriad attractions that Singapore has to offer. The website features detailed information on various categories including:\n",
"\n",
"- **Top Attractions**: Highlights of popular places to visit, such as Gardens by the Bay, Sentosa Island, and Universal Studios Singapore.\n",
"- **Cultural Experiences**: Insights into Singapore's diverse heritage and cultural festivals.\n",
"- **Dining Options**: Recommendations for local cuisine, hawker centers, and fine dining establishments.\n",
"- **Shopping**: Guides on where to shop, including famous shopping streets and malls.\n",
"- **Events and Festivals**: Information on upcoming events and annual festivals that showcase Singapore’s vibrant lifestyle.\n",
"\n",
"The site also emphasizes the city’s safety and cleanliness, making it an appealing destination for travelers.\n",
"\n",
"### News and Announcements\n",
"No specific news or announcements were highlighted in the provided content."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://www.visitsingapore.com\")"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "7586494d-d2d7-4e08-952b-b07420b12edc",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Gardens by the Bay - Summary\n",
"\n",
"Gardens by the Bay is a premier horticultural attraction located in the heart of Singapore, renowned for its diverse collection of over 1.5 million plants from around the world, excluding Antarctica. The site features iconic structures and attractions such as the Flower Dome, Cloud Forest, OCBC Skyway, and Supertree Observatory, creating a unique blend of nature and architecture.\n",
"\n",
"## Highlights\n",
"- **Attractions**: Visitors can explore various themed conservatories, interact with art sculptures, and enjoy panoramic views from the Skyway.\n",
"- **Events**: Noteworthy upcoming events include the \"Carnival of Flowers\" running from September 23 to November 17, 2024, and seasonal craft activities in the Flower Dome.\n",
"- **Sustainability**: The gardens emphasize sustainability through innovative architecture and eco-friendly practices.\n",
"\n",
"## Promotions and Membership\n",
"- Current promotions include a 15% discount on Friends of the Gardens membership for DBS/POSB cardholders until October 31, 2024, and ongoing deals for dining within the attraction.\n",
"- A chance to win air tickets to Europe is offered for new Friends of the Gardens members from September 1, 2024, to May 31, 2025.\n",
"\n",
"The website serves as a comprehensive guide for planning visits, offers educational resources for schools, and encourages engagement through social media platforms."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://www.gardensbythebay.com.sg/\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "79f8471d-46a7-4250-a550-dab379bb9263",
"metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": []
} }

356
week1/day5-Enhanced.ipynb

@ -0,0 +1,356 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa",
"metadata": {},
"source": [
"# A full business solution\n",
"\n",
"Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n",
"\n",
"We will be provided a company name and their primary website."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "0a572211-5fe3-4dd5-9870-849cfb75901f",
"metadata": {},
"outputs": [],
"source": [
"# Import necessary libraries\n",
"import os\n",
"import requests\n",
"import json\n",
"from typing import List, Dict\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"from urllib.parse import urljoin\n",
"\n",
"# Load environment variables from a .env file\n",
"load_dotenv()\n",
"\n",
"# Define constants\n",
"MODEL = 'gpt-4o-mini' # Specify the OpenAI model to use\n",
"OPENAI_API_KEY = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env') # Get API key from environment or use default\n",
"\n",
"# Initialize OpenAI client with the API key\n",
"openai = OpenAI(api_key=OPENAI_API_KEY)\n",
"\n",
"class Website:\n",
" \"\"\"\n",
" A class to represent a website and its contents.\n",
" \"\"\"\n",
" def __init__(self, url: str):\n",
" \"\"\"\n",
" Initialize the Website object with a given URL.\n",
" \n",
" :param url: The URL of the website to scrape\n",
" \"\"\"\n",
" self.url = url\n",
" self.title, self.text, self.links = self._scrape_website()\n",
"\n",
" def _scrape_website(self) -> tuple:\n",
" \"\"\"\n",
" Scrape the website content, extracting title, text, and links.\n",
" \n",
" :return: A tuple containing the title, text content, and links of the website\n",
" \"\"\"\n",
" response = requests.get(self.url)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" \n",
" # Extract title\n",
" title = soup.title.string if soup.title else \"No title found\"\n",
" \n",
" # Extract text content\n",
" if soup.body:\n",
" for tag in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" tag.decompose() # Remove unwanted tags\n",
" text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" text = \"\"\n",
" \n",
" # Extract links\n",
" links = [link.get('href') for link in soup.find_all('a') if link.get('href')]\n",
" \n",
" return title, text, links\n",
"\n",
" def get_contents(self) -> str:\n",
" \"\"\"\n",
" Get a formatted string of the website contents.\n",
" \n",
" :return: A string containing the website title and text content\n",
" \"\"\"\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n",
"\n",
"class LinkAnalyzer:\n",
" \"\"\"\n",
" A class to analyze and categorize links from a website.\n",
" \"\"\"\n",
" # System prompt for the OpenAI model to categorize links\n",
" LINK_SYSTEM_PROMPT = \"\"\"\n",
" You are provided with a list of links found on a webpage. Your task is to first categorize each link into one of the following categories:\n",
" - about page\n",
" - careers page\n",
" - terms of service\n",
" - privacy policy\n",
" - contact page\n",
" - other (please specify).\n",
"\n",
" Once the links are categorized, please choose which links are most relevant to include in a brochure about the company. \n",
" The brochure should only include links such as About pages, Careers pages, or Company Overview pages. Exclude any links related to Terms of Service, Privacy Policy, or email addresses.\n",
"\n",
" Respond in the following JSON format:\n",
" {\n",
" \"categorized_links\": [\n",
" {\"category\": \"about page\", \"url\": \"https://full.url/about\"},\n",
" {\"category\": \"careers page\", \"url\": \"https://full.url/careers\"},\n",
" {\"category\": \"terms of service\", \"url\": \"https://full.url/terms\"},\n",
" {\"category\": \"privacy policy\", \"url\": \"https://full.url/privacy\"},\n",
" {\"category\": \"other\", \"specify\": \"contact page\", \"url\": \"https://full.url/contact\"}\n",
" ],\n",
" \"brochure_links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/about\"},\n",
" {\"type\": \"careers page\", \"url\": \"https://full.url/careers\"}\n",
" ]\n",
" }\n",
"\n",
" Please find the links below and proceed with the task:\n",
"\n",
" Links (some may be relative links):\n",
" [INSERT LINK LIST HERE]\n",
" \"\"\"\n",
"\n",
" @staticmethod\n",
" def get_links(website: Website) -> Dict:\n",
" \"\"\"\n",
" Analyze and categorize links from a given website.\n",
" \n",
" :param website: A Website object containing the links to analyze\n",
" :return: A dictionary containing categorized links and brochure-relevant links\n",
" \"\"\"\n",
" # Prepare the user prompt for the OpenAI model\n",
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
" Do not include Terms of Service, Privacy, email links.\\n\"\n",
" user_prompt += \"Links (some might be relative links):\\n\"\n",
" user_prompt += \"\\n\".join(website.links)\n",
"\n",
" # Make an API call to OpenAI for link analysis\n",
" completion = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": LinkAnalyzer.LINK_SYSTEM_PROMPT},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" return json.loads(completion.choices[0].message.content)\n",
"\n",
"class BrochureGenerator:\n",
" \"\"\"\n",
" A class to generate a company brochure based on website content.\n",
" \"\"\"\n",
" # System prompt for the OpenAI model to generate the brochure\n",
" SYSTEM_PROMPT = \"\"\"\n",
" You are an assistant that analyzes the contents of several relevant pages from a company website \n",
" and creates a brochure about the company for prospective customers, investors and recruits. Respond in markdown.\n",
" Include details of company culture, customers and careers/jobs if you have the information.\n",
" Structure the brochure to include specific sections as follows:\n",
" About Us\n",
" What we do\n",
" How We Do It\n",
" Where We Do It\n",
" Our People\n",
" Our Culture\n",
" Connect with Us.\n",
" Please provide two versions of the brochure, the first in English, the second in Spanish. The contents of the brochure are to be the same for both languages.\n",
" \"\"\"\n",
"\n",
" @staticmethod\n",
" def get_all_details(url: str) -> str:\n",
" \"\"\"\n",
" Gather all relevant details from a company's website.\n",
" \n",
" :param url: The URL of the company's main page\n",
" :return: A string containing all relevant website content\n",
" \"\"\"\n",
" result = \"Landing page:\\n\"\n",
" website = Website(url)\n",
" result += website.get_contents()\n",
"\n",
" # Analyze links and get brochure-relevant ones\n",
" links = LinkAnalyzer.get_links(website)\n",
" brochure_links = links.get('brochure_links', [])\n",
" print(\"Found Brochure links:\", brochure_links)\n",
"\n",
" # Gather content from brochure-relevant pages\n",
" for link in brochure_links:\n",
" result += f\"\\n\\n{link['type']}:\\n\"\n",
" full_url = urljoin(url, link[\"url\"])\n",
" result += Website(full_url).get_contents()\n",
"\n",
" return result\n",
"\n",
" @staticmethod\n",
" def get_brochure_user_prompt(company_name: str, url: str) -> str:\n",
" \"\"\"\n",
" Generate a user prompt for the OpenAI model to create a brochure.\n",
" \n",
" :param company_name: The name of the company\n",
" :param url: The URL of the company's main page\n",
" :return: A string containing the user prompt for brochure generation\n",
" \"\"\"\n",
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n",
" user_prompt += BrochureGenerator.get_all_details(url)\n",
" return user_prompt[:20_000] # Truncate if more than 20,000 characters\n",
"\n",
" @staticmethod\n",
" def stream_brochure(company_name: str, url: str):\n",
" \"\"\"\n",
" Generate and stream a company brochure.\n",
" \n",
" :param company_name: The name of the company\n",
" :param url: The URL of the company's main page\n",
" \"\"\"\n",
" # Make a streaming API call to OpenAI for brochure generation\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": BrochureGenerator.SYSTEM_PROMPT},\n",
" {\"role\": \"user\", \"content\": BrochureGenerator.get_brochure_user_prompt(company_name, url)}\n",
" ],\n",
" stream=True\n",
" )\n",
"\n",
" # Display the generated brochure in real-time\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\", \"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "cc4965cf-f704-4d40-8b7d-f8e50913f87c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found Brochure links: [{'type': 'about page', 'url': 'https://edwarddonner.com/about-me-and-about-nebula/'}, {'type': 'other', 'specify': 'outsourcing', 'url': 'https://edwarddonner.com/outsmart/'}]\n"
]
},
{
"data": {
"text/markdown": [
"\n",
"# Edward Donner Company Brochure\n",
"\n",
"## About Us\n",
"Edward Donner is the creative brain behind Nebula.io, where we leverage Generative AI and advanced machine learning technologies to help recruiters effectively source, understand, engage, and manage talent. Born from a rich history in the AI landscape, our goal is simple yet profound: to aid individuals in discovering their true potential and pursuing their ikigai — their reason for being.\n",
"\n",
"## What We Do\n",
"At Edward Donner, we specialize in an array of tools and services, primarily focusing on a patented matching model that connects people with roles they are optimally suited for — all without the need for keyword searches. Our platform is designed to ensure you find your dream job while having a fulfilling and engaging work experience.\n",
"\n",
"## How We Do It\n",
"We employ groundbreaking, proprietary Large Language Models (LLMs) that are finely tuned to the recruitment industry. Our innovative approach is geared towards real-world application, minimizing the gap between candidates and their ideal roles. By focusing on individual strengths and needs, we drive efficiency and happiness in job placements.\n",
"\n",
"## Where We Do It\n",
"Our operations orbit around the vibrant backdrop of New York City, an epicenter for talent and innovation. We create an inclusive remote work environment that thrives on collaboration, creativity, and technology, ensuring that our team and our customers can engage seamlessly, wherever they are.\n",
"\n",
"## Our People\n",
"Our diverse team consists of experts in software engineering, data science, and technology leadership. Our founder, Ed, brings extensive experience and a love for programming, music, and enthusiastic problem-solving. Each individual contributes unique skills while sharing a passion for harnessing AI to tackle meaningful challenges.\n",
"\n",
"## Our Culture\n",
"At Edward Donner, we pride ourselves on fostering a culture of innovation and collaboration. We aim to create a workspace that inspires creativity, encourages continuous learning, and celebrates the successes of our employees. Our mission to elevate human potential extends to our work culture, where every voice and idea is valued.\n",
"\n",
"## Connect with Us\n",
"We would love to hear from you! To stay connected and explore opportunities, reach out via:\n",
"- Email: ed [at] edwarddonner [dot] com\n",
"- [Our Website](http://www.edwarddonner.com)\n",
"- Follow us on social media: [LinkedIn](#), [Twitter](#), [Facebook](#)\n",
"\n",
"---\n",
"\n",
"# Folleto de la Empresa Edward Donner\n",
"\n",
"## Sobre Nosotros\n",
"Edward Donner es la mente creativa detrás de Nebula.io, donde aprovechamos la IA generativa y tecnologías avanzadas de aprendizaje automático para ayudar a los reclutadores a identificar, comprender, comprometer y gestionar talentos. Nacido de una rica historia en el ámbito de IA, nuestro objetivo es simple pero profundo: ayudar a las personas a descubrir su verdadero potencial y perseguir su ikigai, su razón de ser.\n",
"\n",
"## Lo Que Hacemos\n",
"En Edward Donner, nos especializamos en una variedad de herramientas y servicios, centrados principalmente en un modelo de coincidencia patentado que conecta a las personas con los roles para los que están óptimamente calificadas, todo esto sin necesidad de búsquedas por palabras clave. Nuestra plataforma está diseñada para garantizar que encuentres tu trabajo soñado mientras vives una experiencia laboral satisfactoria y atractiva.\n",
"\n",
"## Cómo Lo Hacemos\n",
"Empleamos modelos de lenguaje de gran tamaño (LLMs) patentados y orientados específicamente a la industria del reclutamiento. Nuestro enfoque innovador está dirigido a la aplicación del mundo real, minimizando la brecha entre los candidatos y sus roles ideales. Al centrarnos en las fortalezas y necesidades individuales, impulsamos la eficiencia y la felicidad en las colocaciones laborales.\n",
"\n",
"## Dónde Lo Hacemos\n",
"Nuestras operaciones giran en torno al vibrante telón de fondo de la ciudad de Nueva York, un epicentro de talento e innovación. Creamos un entorno de trabajo remoto inclusivo que prospera en la colaboración, la creatividad y la tecnología, asegurando que nuestro equipo y nuestros clientes puedan interactuar de manera fluida, donde sea que se encuentren.\n",
"\n",
"## Nuestra Gente\n",
"Nuestro diverso equipo está compuesto por expertos en ingeniería de software, ciencia de datos y liderazgo tecnológico. Nuestro fundador, Ed, aporta una amplia experiencia y un amor por la programación, la música y la resolución entusiasta de problemas. Cada individuo contribuye con habilidades únicas mientras comparte la pasión por aprovechar la IA para abordar desafíos significativos.\n",
"\n",
"## Nuestra Cultura\n",
"En Edward Donner, nos enorgullece fomentar una cultura de innovación y colaboración. Nuestro objetivo es crear un espacio de trabajo que inspire la creatividad, fomente el aprendizaje continuo y celebre los éxitos de nuestros empleados. Nuestra misión de elevar el potencial humano se extiende a nuestra cultura laboral, donde cada voz e idea es valorada.\n",
"\n",
"## Conéctate Con Nosotros\n",
"¡Nos encantaría saber de ti! Para mantener la conexión y explorar oportunidades, contáctanos a través de:\n",
"- Email: ed [at] edwarddonner [dot] com\n",
"- [Nuestro Sitio Web](http://www.edwarddonner.com)\n",
"- Síguenos en redes sociales: [LinkedIn](#), [Twitter](#), [Facebook](#)\n",
"\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Main execution block\n",
"if __name__ == \"__main__\":\n",
" # Generate a brochure\n",
" BrochureGenerator.stream_brochure(\"Edward Donner\", \"https://edwarddonner.com/\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0acb1194-fe89-40e3-8c3b-a10483315d3f",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

4823
week1/day5.ipynb

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save