4 changed files with 5548 additions and 47 deletions
@ -0,0 +1,163 @@ |
|||||||
|
# imports |
||||||
|
|
||||||
|
import os |
||||||
|
import requests |
||||||
|
import json |
||||||
|
from typing import List |
||||||
|
from dotenv import load_dotenv |
||||||
|
from bs4 import BeautifulSoup |
||||||
|
from IPython.display import Markdown, display, update_display |
||||||
|
from openai import OpenAI |
||||||
|
|
||||||
|
|
||||||
|
# Initialize and constants |
||||||
|
|
||||||
|
load_dotenv() |
||||||
|
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env') |
||||||
|
MODEL = 'gpt-4o-mini' |
||||||
|
openai = OpenAI() |
||||||
|
|
||||||
|
|
||||||
|
# A class to represent a Webpage |
||||||
|
|
||||||
|
class Website: |
||||||
|
url: str |
||||||
|
title: str |
||||||
|
body: str |
||||||
|
links: List[str] |
||||||
|
|
||||||
|
def __init__(self, url): |
||||||
|
self.url = url |
||||||
|
response = requests.get(url) |
||||||
|
self.body = response.content |
||||||
|
soup = BeautifulSoup(self.body, 'html.parser') |
||||||
|
self.title = soup.title.string if soup.title else "No title found" |
||||||
|
if soup.body: |
||||||
|
for irrelevant in soup.body(["script", "style", "img", "input"]): |
||||||
|
irrelevant.decompose() |
||||||
|
self.text = soup.body.get_text(separator="\n", strip=True) |
||||||
|
else: |
||||||
|
self.text = "" |
||||||
|
links = [link.get('href') for link in soup.find_all('a')] |
||||||
|
self.links = [link for link in links if link] |
||||||
|
|
||||||
|
def get_contents(self): |
||||||
|
return f"Webpage Title:\n{self.title}\nWebpage Contents:\n{self.text}\n\n" |
||||||
|
|
||||||
|
link_system_prompt = """ |
||||||
|
You are provided with a list of links found on a webpage. Your task is to first categorize each link into one of the following categories: |
||||||
|
- about page |
||||||
|
- careers page |
||||||
|
- terms of service |
||||||
|
- privacy policy |
||||||
|
- contact page |
||||||
|
- other (please specify). |
||||||
|
|
||||||
|
Once the links are categorized, please choose which links are most relevant to include in a brochure about the company. |
||||||
|
The brochure should only include links such as About pages, Careers pages, or Company Overview pages. Exclude any links related to Terms of Service, Privacy Policy, or email addresses. |
||||||
|
|
||||||
|
Respond in the following JSON format: |
||||||
|
{ |
||||||
|
"categorized_links": [ |
||||||
|
{"category": "about page", "url": "https://full.url/about"}, |
||||||
|
{"category": "careers page", "url": "https://full.url/careers"}, |
||||||
|
{"category": "terms of service", "url": "https://full.url/terms"}, |
||||||
|
{"category": "privacy policy", "url": "https://full.url/privacy"}, |
||||||
|
{"category": "other", "specify": "contact page", "url": "https://full.url/contact"} |
||||||
|
], |
||||||
|
"brochure_links": [ |
||||||
|
{"type": "about page", "url": "https://full.url/about"}, |
||||||
|
{"type": "careers page", "url": "https://full.url/careers"} |
||||||
|
] |
||||||
|
} |
||||||
|
|
||||||
|
Please find the links below and proceed with the task: |
||||||
|
|
||||||
|
Links (some may be relative links): |
||||||
|
[INSERT LINK LIST HERE] |
||||||
|
""" |
||||||
|
|
||||||
|
def get_links_user_prompt(website): |
||||||
|
user_prompt = f"Here is the list of links on the website of {website.url} - " |
||||||
|
user_prompt += "please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \ |
||||||
|
Do not include Terms of Service, Privacy, email links.\n" |
||||||
|
user_prompt += "Links (some might be relative links):\n" |
||||||
|
user_prompt += "\n".join(website.links) |
||||||
|
return user_prompt |
||||||
|
|
||||||
|
def get_links(url): |
||||||
|
website = Website(url) |
||||||
|
completion = openai.chat.completions.create( |
||||||
|
model=MODEL, |
||||||
|
messages=[ |
||||||
|
{"role": "system", "content": link_system_prompt}, |
||||||
|
{"role": "user", "content": get_links_user_prompt(website)} |
||||||
|
], |
||||||
|
response_format={"type": "json_object"} |
||||||
|
) |
||||||
|
result = completion.choices[0].message.content |
||||||
|
return json.loads(result) |
||||||
|
|
||||||
|
|
||||||
|
from urllib.parse import urljoin |
||||||
|
|
||||||
|
def get_all_details(url): |
||||||
|
result = "Landing page:\n" |
||||||
|
result += Website(url).get_contents() # Get the landing page content |
||||||
|
|
||||||
|
links = get_links(url) # Retrieve the links JSON |
||||||
|
|
||||||
|
brochure_links = links.get('brochure_links', []) # Get the brochure links list (which is already a list) |
||||||
|
print("Found Brochure links:", brochure_links) # Debug output to show the brochure links |
||||||
|
|
||||||
|
# Iterate over each brochure link |
||||||
|
for link in brochure_links: |
||||||
|
result += f"\n\n{link['type']}:\n" # Add the type of link (about page, careers page, etc.) |
||||||
|
|
||||||
|
# Handle relative URLs by converting them to absolute URLs |
||||||
|
full_url = urljoin(url, link["url"]) |
||||||
|
|
||||||
|
# Fetch and append the content of the brochure link URL |
||||||
|
result += Website(full_url).get_contents() |
||||||
|
|
||||||
|
return result |
||||||
|
|
||||||
|
|
||||||
|
system_prompt = "You are an assistant that analyzes the contents of several relevant pages from a company website \ |
||||||
|
and creates a brochure about the company for prospective customers, investors and recruits. Respond in markdown.\ |
||||||
|
Include details of company culture, customers and careers/jobs if you have the information.\ |
||||||
|
Structure the brochure to include specific sections as follows:\ |
||||||
|
About Us\ |
||||||
|
What we do\ |
||||||
|
How We Do It\ |
||||||
|
Where We Do It\ |
||||||
|
Our People\ |
||||||
|
Our Culture\ |
||||||
|
Connect with Us.\ |
||||||
|
Please provide two versions of the brochure, the first in English, the second in Spanish. The contents of the brochure are to be the same for both languages." |
||||||
|
|
||||||
|
def get_brochure_user_prompt(company_name, url): |
||||||
|
user_prompt = f"You are looking at a company called: {company_name}\n" |
||||||
|
user_prompt += f"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\n" |
||||||
|
user_prompt += get_all_details(url) |
||||||
|
user_prompt = user_prompt[:20_000] # Truncate if more than 20,000 characters |
||||||
|
return user_prompt |
||||||
|
|
||||||
|
def stream_brochure(company_name, url): |
||||||
|
stream = openai.chat.completions.create( |
||||||
|
model=MODEL, |
||||||
|
messages=[ |
||||||
|
{"role": "system", "content": system_prompt}, |
||||||
|
{"role": "user", "content": get_brochure_user_prompt(company_name, url)} |
||||||
|
], |
||||||
|
stream=True |
||||||
|
) |
||||||
|
|
||||||
|
response = "" |
||||||
|
display_handle = display(Markdown(""), display_id=True) |
||||||
|
for chunk in stream: |
||||||
|
response += chunk.choices[0].delta.content or '' |
||||||
|
response = response.replace("```","").replace("markdown", "") |
||||||
|
update_display(Markdown(response), display_id=display_handle.display_id) |
||||||
|
|
||||||
|
stream_brochure("Anthropic", "https://anthropic.com") |
@ -0,0 +1,356 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# A full business solution\n", |
||||||
|
"\n", |
||||||
|
"Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n", |
||||||
|
"\n", |
||||||
|
"We will be provided a company name and their primary website." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 3, |
||||||
|
"id": "0a572211-5fe3-4dd5-9870-849cfb75901f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Import necessary libraries\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"import json\n", |
||||||
|
"from typing import List, Dict\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display, update_display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"from urllib.parse import urljoin\n", |
||||||
|
"\n", |
||||||
|
"# Load environment variables from a .env file\n", |
||||||
|
"load_dotenv()\n", |
||||||
|
"\n", |
||||||
|
"# Define constants\n", |
||||||
|
"MODEL = 'gpt-4o-mini' # Specify the OpenAI model to use\n", |
||||||
|
"OPENAI_API_KEY = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env') # Get API key from environment or use default\n", |
||||||
|
"\n", |
||||||
|
"# Initialize OpenAI client with the API key\n", |
||||||
|
"openai = OpenAI(api_key=OPENAI_API_KEY)\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" A class to represent a website and its contents.\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" def __init__(self, url: str):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Initialize the Website object with a given URL.\n", |
||||||
|
" \n", |
||||||
|
" :param url: The URL of the website to scrape\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" self.url = url\n", |
||||||
|
" self.title, self.text, self.links = self._scrape_website()\n", |
||||||
|
"\n", |
||||||
|
" def _scrape_website(self) -> tuple:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Scrape the website content, extracting title, text, and links.\n", |
||||||
|
" \n", |
||||||
|
" :return: A tuple containing the title, text content, and links of the website\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" response = requests.get(self.url)\n", |
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||||
|
" \n", |
||||||
|
" # Extract title\n", |
||||||
|
" title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" \n", |
||||||
|
" # Extract text content\n", |
||||||
|
" if soup.body:\n", |
||||||
|
" for tag in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" tag.decompose() # Remove unwanted tags\n", |
||||||
|
" text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||||
|
" else:\n", |
||||||
|
" text = \"\"\n", |
||||||
|
" \n", |
||||||
|
" # Extract links\n", |
||||||
|
" links = [link.get('href') for link in soup.find_all('a') if link.get('href')]\n", |
||||||
|
" \n", |
||||||
|
" return title, text, links\n", |
||||||
|
"\n", |
||||||
|
" def get_contents(self) -> str:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Get a formatted string of the website contents.\n", |
||||||
|
" \n", |
||||||
|
" :return: A string containing the website title and text content\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", |
||||||
|
"\n", |
||||||
|
"class LinkAnalyzer:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" A class to analyze and categorize links from a website.\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" # System prompt for the OpenAI model to categorize links\n", |
||||||
|
" LINK_SYSTEM_PROMPT = \"\"\"\n", |
||||||
|
" You are provided with a list of links found on a webpage. Your task is to first categorize each link into one of the following categories:\n", |
||||||
|
" - about page\n", |
||||||
|
" - careers page\n", |
||||||
|
" - terms of service\n", |
||||||
|
" - privacy policy\n", |
||||||
|
" - contact page\n", |
||||||
|
" - other (please specify).\n", |
||||||
|
"\n", |
||||||
|
" Once the links are categorized, please choose which links are most relevant to include in a brochure about the company. \n", |
||||||
|
" The brochure should only include links such as About pages, Careers pages, or Company Overview pages. Exclude any links related to Terms of Service, Privacy Policy, or email addresses.\n", |
||||||
|
"\n", |
||||||
|
" Respond in the following JSON format:\n", |
||||||
|
" {\n", |
||||||
|
" \"categorized_links\": [\n", |
||||||
|
" {\"category\": \"about page\", \"url\": \"https://full.url/about\"},\n", |
||||||
|
" {\"category\": \"careers page\", \"url\": \"https://full.url/careers\"},\n", |
||||||
|
" {\"category\": \"terms of service\", \"url\": \"https://full.url/terms\"},\n", |
||||||
|
" {\"category\": \"privacy policy\", \"url\": \"https://full.url/privacy\"},\n", |
||||||
|
" {\"category\": \"other\", \"specify\": \"contact page\", \"url\": \"https://full.url/contact\"}\n", |
||||||
|
" ],\n", |
||||||
|
" \"brochure_links\": [\n", |
||||||
|
" {\"type\": \"about page\", \"url\": \"https://full.url/about\"},\n", |
||||||
|
" {\"type\": \"careers page\", \"url\": \"https://full.url/careers\"}\n", |
||||||
|
" ]\n", |
||||||
|
" }\n", |
||||||
|
"\n", |
||||||
|
" Please find the links below and proceed with the task:\n", |
||||||
|
"\n", |
||||||
|
" Links (some may be relative links):\n", |
||||||
|
" [INSERT LINK LIST HERE]\n", |
||||||
|
" \"\"\"\n", |
||||||
|
"\n", |
||||||
|
" @staticmethod\n", |
||||||
|
" def get_links(website: Website) -> Dict:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Analyze and categorize links from a given website.\n", |
||||||
|
" \n", |
||||||
|
" :param website: A Website object containing the links to analyze\n", |
||||||
|
" :return: A dictionary containing categorized links and brochure-relevant links\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" # Prepare the user prompt for the OpenAI model\n", |
||||||
|
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||||
|
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||||
|
" Do not include Terms of Service, Privacy, email links.\\n\"\n", |
||||||
|
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||||
|
" user_prompt += \"\\n\".join(website.links)\n", |
||||||
|
"\n", |
||||||
|
" # Make an API call to OpenAI for link analysis\n", |
||||||
|
" completion = openai.chat.completions.create(\n", |
||||||
|
" model=MODEL,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": LinkAnalyzer.LINK_SYSTEM_PROMPT},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||||
|
" ],\n", |
||||||
|
" response_format={\"type\": \"json_object\"}\n", |
||||||
|
" )\n", |
||||||
|
" return json.loads(completion.choices[0].message.content)\n", |
||||||
|
"\n", |
||||||
|
"class BrochureGenerator:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" A class to generate a company brochure based on website content.\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" # System prompt for the OpenAI model to generate the brochure\n", |
||||||
|
" SYSTEM_PROMPT = \"\"\"\n", |
||||||
|
" You are an assistant that analyzes the contents of several relevant pages from a company website \n", |
||||||
|
" and creates a brochure about the company for prospective customers, investors and recruits. Respond in markdown.\n", |
||||||
|
" Include details of company culture, customers and careers/jobs if you have the information.\n", |
||||||
|
" Structure the brochure to include specific sections as follows:\n", |
||||||
|
" About Us\n", |
||||||
|
" What we do\n", |
||||||
|
" How We Do It\n", |
||||||
|
" Where We Do It\n", |
||||||
|
" Our People\n", |
||||||
|
" Our Culture\n", |
||||||
|
" Connect with Us.\n", |
||||||
|
" Please provide two versions of the brochure, the first in English, the second in Spanish. The contents of the brochure are to be the same for both languages.\n", |
||||||
|
" \"\"\"\n", |
||||||
|
"\n", |
||||||
|
" @staticmethod\n", |
||||||
|
" def get_all_details(url: str) -> str:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Gather all relevant details from a company's website.\n", |
||||||
|
" \n", |
||||||
|
" :param url: The URL of the company's main page\n", |
||||||
|
" :return: A string containing all relevant website content\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" result = \"Landing page:\\n\"\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" result += website.get_contents()\n", |
||||||
|
"\n", |
||||||
|
" # Analyze links and get brochure-relevant ones\n", |
||||||
|
" links = LinkAnalyzer.get_links(website)\n", |
||||||
|
" brochure_links = links.get('brochure_links', [])\n", |
||||||
|
" print(\"Found Brochure links:\", brochure_links)\n", |
||||||
|
"\n", |
||||||
|
" # Gather content from brochure-relevant pages\n", |
||||||
|
" for link in brochure_links:\n", |
||||||
|
" result += f\"\\n\\n{link['type']}:\\n\"\n", |
||||||
|
" full_url = urljoin(url, link[\"url\"])\n", |
||||||
|
" result += Website(full_url).get_contents()\n", |
||||||
|
"\n", |
||||||
|
" return result\n", |
||||||
|
"\n", |
||||||
|
" @staticmethod\n", |
||||||
|
" def get_brochure_user_prompt(company_name: str, url: str) -> str:\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Generate a user prompt for the OpenAI model to create a brochure.\n", |
||||||
|
" \n", |
||||||
|
" :param company_name: The name of the company\n", |
||||||
|
" :param url: The URL of the company's main page\n", |
||||||
|
" :return: A string containing the user prompt for brochure generation\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||||
|
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||||
|
" user_prompt += BrochureGenerator.get_all_details(url)\n", |
||||||
|
" return user_prompt[:20_000] # Truncate if more than 20,000 characters\n", |
||||||
|
"\n", |
||||||
|
" @staticmethod\n", |
||||||
|
" def stream_brochure(company_name: str, url: str):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Generate and stream a company brochure.\n", |
||||||
|
" \n", |
||||||
|
" :param company_name: The name of the company\n", |
||||||
|
" :param url: The URL of the company's main page\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" # Make a streaming API call to OpenAI for brochure generation\n", |
||||||
|
" stream = openai.chat.completions.create(\n", |
||||||
|
" model=MODEL,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": BrochureGenerator.SYSTEM_PROMPT},\n", |
||||||
|
" {\"role\": \"user\", \"content\": BrochureGenerator.get_brochure_user_prompt(company_name, url)}\n", |
||||||
|
" ],\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Display the generated brochure in real-time\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" response += chunk.choices[0].delta.content or ''\n", |
||||||
|
" response = response.replace(\"```\", \"\").replace(\"markdown\", \"\")\n", |
||||||
|
" update_display(Markdown(response), display_id=display_handle.display_id)\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 5, |
||||||
|
"id": "cc4965cf-f704-4d40-8b7d-f8e50913f87c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"Found Brochure links: [{'type': 'about page', 'url': 'https://edwarddonner.com/about-me-and-about-nebula/'}, {'type': 'other', 'specify': 'outsourcing', 'url': 'https://edwarddonner.com/outsmart/'}]\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"\n", |
||||||
|
"# Edward Donner Company Brochure\n", |
||||||
|
"\n", |
||||||
|
"## About Us\n", |
||||||
|
"Edward Donner is the creative brain behind Nebula.io, where we leverage Generative AI and advanced machine learning technologies to help recruiters effectively source, understand, engage, and manage talent. Born from a rich history in the AI landscape, our goal is simple yet profound: to aid individuals in discovering their true potential and pursuing their ikigai — their reason for being.\n", |
||||||
|
"\n", |
||||||
|
"## What We Do\n", |
||||||
|
"At Edward Donner, we specialize in an array of tools and services, primarily focusing on a patented matching model that connects people with roles they are optimally suited for — all without the need for keyword searches. Our platform is designed to ensure you find your dream job while having a fulfilling and engaging work experience.\n", |
||||||
|
"\n", |
||||||
|
"## How We Do It\n", |
||||||
|
"We employ groundbreaking, proprietary Large Language Models (LLMs) that are finely tuned to the recruitment industry. Our innovative approach is geared towards real-world application, minimizing the gap between candidates and their ideal roles. By focusing on individual strengths and needs, we drive efficiency and happiness in job placements.\n", |
||||||
|
"\n", |
||||||
|
"## Where We Do It\n", |
||||||
|
"Our operations orbit around the vibrant backdrop of New York City, an epicenter for talent and innovation. We create an inclusive remote work environment that thrives on collaboration, creativity, and technology, ensuring that our team and our customers can engage seamlessly, wherever they are.\n", |
||||||
|
"\n", |
||||||
|
"## Our People\n", |
||||||
|
"Our diverse team consists of experts in software engineering, data science, and technology leadership. Our founder, Ed, brings extensive experience and a love for programming, music, and enthusiastic problem-solving. Each individual contributes unique skills while sharing a passion for harnessing AI to tackle meaningful challenges.\n", |
||||||
|
"\n", |
||||||
|
"## Our Culture\n", |
||||||
|
"At Edward Donner, we pride ourselves on fostering a culture of innovation and collaboration. We aim to create a workspace that inspires creativity, encourages continuous learning, and celebrates the successes of our employees. Our mission to elevate human potential extends to our work culture, where every voice and idea is valued.\n", |
||||||
|
"\n", |
||||||
|
"## Connect with Us\n", |
||||||
|
"We would love to hear from you! To stay connected and explore opportunities, reach out via:\n", |
||||||
|
"- Email: ed [at] edwarddonner [dot] com\n", |
||||||
|
"- [Our Website](http://www.edwarddonner.com)\n", |
||||||
|
"- Follow us on social media: [LinkedIn](#), [Twitter](#), [Facebook](#)\n", |
||||||
|
"\n", |
||||||
|
"---\n", |
||||||
|
"\n", |
||||||
|
"# Folleto de la Empresa Edward Donner\n", |
||||||
|
"\n", |
||||||
|
"## Sobre Nosotros\n", |
||||||
|
"Edward Donner es la mente creativa detrás de Nebula.io, donde aprovechamos la IA generativa y tecnologías avanzadas de aprendizaje automático para ayudar a los reclutadores a identificar, comprender, comprometer y gestionar talentos. Nacido de una rica historia en el ámbito de IA, nuestro objetivo es simple pero profundo: ayudar a las personas a descubrir su verdadero potencial y perseguir su ikigai, su razón de ser.\n", |
||||||
|
"\n", |
||||||
|
"## Lo Que Hacemos\n", |
||||||
|
"En Edward Donner, nos especializamos en una variedad de herramientas y servicios, centrados principalmente en un modelo de coincidencia patentado que conecta a las personas con los roles para los que están óptimamente calificadas, todo esto sin necesidad de búsquedas por palabras clave. Nuestra plataforma está diseñada para garantizar que encuentres tu trabajo soñado mientras vives una experiencia laboral satisfactoria y atractiva.\n", |
||||||
|
"\n", |
||||||
|
"## Cómo Lo Hacemos\n", |
||||||
|
"Empleamos modelos de lenguaje de gran tamaño (LLMs) patentados y orientados específicamente a la industria del reclutamiento. Nuestro enfoque innovador está dirigido a la aplicación del mundo real, minimizando la brecha entre los candidatos y sus roles ideales. Al centrarnos en las fortalezas y necesidades individuales, impulsamos la eficiencia y la felicidad en las colocaciones laborales.\n", |
||||||
|
"\n", |
||||||
|
"## Dónde Lo Hacemos\n", |
||||||
|
"Nuestras operaciones giran en torno al vibrante telón de fondo de la ciudad de Nueva York, un epicentro de talento e innovación. Creamos un entorno de trabajo remoto inclusivo que prospera en la colaboración, la creatividad y la tecnología, asegurando que nuestro equipo y nuestros clientes puedan interactuar de manera fluida, donde sea que se encuentren.\n", |
||||||
|
"\n", |
||||||
|
"## Nuestra Gente\n", |
||||||
|
"Nuestro diverso equipo está compuesto por expertos en ingeniería de software, ciencia de datos y liderazgo tecnológico. Nuestro fundador, Ed, aporta una amplia experiencia y un amor por la programación, la música y la resolución entusiasta de problemas. Cada individuo contribuye con habilidades únicas mientras comparte la pasión por aprovechar la IA para abordar desafíos significativos.\n", |
||||||
|
"\n", |
||||||
|
"## Nuestra Cultura\n", |
||||||
|
"En Edward Donner, nos enorgullece fomentar una cultura de innovación y colaboración. Nuestro objetivo es crear un espacio de trabajo que inspire la creatividad, fomente el aprendizaje continuo y celebre los éxitos de nuestros empleados. Nuestra misión de elevar el potencial humano se extiende a nuestra cultura laboral, donde cada voz e idea es valorada.\n", |
||||||
|
"\n", |
||||||
|
"## Conéctate Con Nosotros\n", |
||||||
|
"¡Nos encantaría saber de ti! Para mantener la conexión y explorar oportunidades, contáctanos a través de:\n", |
||||||
|
"- Email: ed [at] edwarddonner [dot] com\n", |
||||||
|
"- [Nuestro Sitio Web](http://www.edwarddonner.com)\n", |
||||||
|
"- Síguenos en redes sociales: [LinkedIn](#), [Twitter](#), [Facebook](#)\n", |
||||||
|
"\n" |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Main execution block\n", |
||||||
|
"if __name__ == \"__main__\":\n", |
||||||
|
" # Generate a brochure\n", |
||||||
|
" BrochureGenerator.stream_brochure(\"Edward Donner\", \"https://edwarddonner.com/\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0acb1194-fe89-40e3-8c3b-a10483315d3f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.10" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
Loading…
Reference in new issue