From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
218 lines
5.1 KiB
218 lines
5.1 KiB
|
|
|
|
|
|
# imports |
|
|
|
import os |
|
import requests |
|
from dotenv import load_dotenv |
|
from bs4 import BeautifulSoup |
|
from IPython.display import Markdown, display |
|
from openai import OpenAI |
|
from groq import Groq |
|
print("My name is Sameerrr") |
|
|
|
# If you get an error running this cell, then please head over to the troubleshooting notebook! |
|
|
|
|
|
|
|
|
|
|
|
# Load environment variables in a file called .env |
|
|
|
load_dotenv(override=True) |
|
api_key = os.getenv('GROQ_API_KEY') |
|
|
|
# Check the key |
|
|
|
if not api_key: |
|
print("No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!") |
|
elif not api_key.startswith("sk-proj-"): |
|
print("An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook") |
|
elif api_key.strip() != api_key: |
|
print("An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook") |
|
else: |
|
print("API key found and looks good so far!") |
|
|
|
|
|
|
|
client=Groq(api_key=api_key) |
|
|
|
# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down. |
|
# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions |
|
|
|
|
|
|
|
|
|
|
|
# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook. |
|
|
|
message = "Hello, GPT! This is my first ever message to you! Hi!" |
|
response = chat_completion = client.chat.completions.create( |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": "Explain me the reality of china?", |
|
} |
|
], |
|
model="deepseek-r1-distill-llama-70b", |
|
) |
|
print(response.choices[0].message.content) |
|
|
|
|
|
|
|
|
|
|
|
# A class to represent a Webpage |
|
# If you're not familiar with Classes, check out the "Intermediate Python" notebook |
|
|
|
# Some websites need you to use proper headers when fetching them: |
|
headers = { |
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" |
|
} |
|
|
|
class Website: |
|
|
|
def __init__(self, url): |
|
""" |
|
Create this Website object from the given url using the BeautifulSoup library |
|
""" |
|
self.url = url |
|
response = requests.get(url, headers=headers) |
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
self.title = soup.title.string if soup.title else "No title found" |
|
for irrelevant in soup.body(["script", "style", "img", "input"]): |
|
irrelevant.decompose() |
|
self.text = soup.body.get_text(separator="\n", strip=True) |
|
|
|
|
|
# Let's try one out. Change the website and add print statements to follow along. |
|
|
|
ed = Website("https://edwarddonner.com") |
|
print(ed.title) |
|
print(ed.text) |
|
|
|
|
|
|
|
|
|
|
|
# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish." |
|
|
|
system_prompt = "You are an assistant that analyzes the contents of a website \ |
|
and provides a short summary, ignoring text that might be navigation related. \ |
|
Respond in markdown." |
|
|
|
|
|
# A function that writes a User Prompt that asks for summaries of websites: |
|
|
|
def user_prompt_for(website): |
|
user_prompt = f"You are looking at a website titled {website.title}" |
|
user_prompt += "\nThe contents of this website is as follows; \ |
|
please provide a short summary of this website in markdown. \ |
|
If it includes news or announcements, then summarize these too.\n\n" |
|
user_prompt += website.text |
|
return user_prompt |
|
|
|
|
|
print(user_prompt_for(ed)) |
|
|
|
|
|
|
|
|
|
|
|
messages = [ |
|
{"role": "system", "content": "You are a snarky assistant"}, |
|
{"role": "user", "content": "What is 2 + 2?"} |
|
] |
|
|
|
|
|
# To give you a preview -- calling OpenAI with system and user messages: |
|
|
|
response = client.chat.completions.create(model="deepseek-r1-distill-llama-70b", messages=messages) |
|
print(response.choices[0].message.content) |
|
|
|
|
|
|
|
|
|
|
|
# See how this function creates exactly the format above |
|
|
|
def messages_for(website): |
|
return [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt_for(website)} |
|
] |
|
|
|
|
|
# Try this out, and then try for a few more websites |
|
|
|
messages_for(ed) |
|
|
|
|
|
|
|
|
|
|
|
# And now: call the OpenAI API. You will get very familiar with this! |
|
|
|
def summarize(url): |
|
website = Website(url) |
|
response = client.chat.completions.create( |
|
model = "gpt-4o-mini", |
|
messages = messages_for(website) |
|
) |
|
return response.choices[0].message.content |
|
|
|
|
|
summarize("https://edwarddonner.com") |
|
|
|
|
|
# A function to display this nicely in the Jupyter output, using markdown |
|
|
|
def display_summary(url): |
|
summary = summarize(url) |
|
display(Markdown(summary)) |
|
|
|
|
|
display_summary("https://edwarddonner.com") |
|
|
|
|
|
|
|
|
|
|
|
display_summary("https://cnn.com") |
|
|
|
|
|
display_summary("https://anthropic.com") |
|
|
|
|
|
|
|
|
|
|
|
# Step 1: Create your prompts |
|
|
|
system_prompt = "something here" |
|
user_prompt = """ |
|
Lots of text |
|
Can be pasted here |
|
""" |
|
|
|
# Step 2: Make the messages list |
|
|
|
messages = [] # fill this in |
|
|
|
# Step 3: Call OpenAI |
|
|
|
response = |
|
|
|
# Step 4: print the result |
|
|
|
print( |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|