54 changed files with 1004 additions and 153 deletions
@ -0,0 +1,625 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa", |
||||
"metadata": { |
||||
"editable": true, |
||||
"slideshow": { |
||||
"slide_type": "" |
||||
}, |
||||
"tags": [] |
||||
}, |
||||
"source": [ |
||||
"# A full business solution\n", |
||||
"\n", |
||||
"## Now we will take our project from Day 1 to the next level\n", |
||||
"\n", |
||||
"### BUSINESS CHALLENGE:\n", |
||||
"\n", |
||||
"Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n", |
||||
"\n", |
||||
"We will be provided a company name and their primary website.\n", |
||||
"\n", |
||||
"See the end of this notebook for examples of real-world business applications.\n", |
||||
"\n", |
||||
"And remember: I'm always available if you have problems or ideas! Please do reach out." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d5b08506-dc8b-4443-9201-5f1848161363", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# imports\n", |
||||
"# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n", |
||||
"\n", |
||||
"import os\n", |
||||
"import requests\n", |
||||
"import json\n", |
||||
"from typing import List\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from bs4 import BeautifulSoup\n", |
||||
"from IPython.display import Markdown, display, update_display\n", |
||||
"from openai import OpenAI\n", |
||||
"\n", |
||||
"# from Kamran; to use Llama instead of chatgpt;\n", |
||||
"# imports\n", |
||||
"\n", |
||||
"import ollama" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61", |
||||
"metadata": { |
||||
"editable": true, |
||||
"slideshow": { |
||||
"slide_type": "" |
||||
}, |
||||
"tags": [] |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Initialize and constants\n", |
||||
"\n", |
||||
"# Commented out belwo lines;\n", |
||||
"# load_dotenv()\n", |
||||
"# api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"\n", |
||||
"# if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", |
||||
"# print(\"API key looks good so far\")\n", |
||||
"# else:\n", |
||||
"# print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
||||
" \n", |
||||
"# MODEL = 'gpt-4o-mini'\n", |
||||
"# openai = OpenAI()\n", |
||||
"\n", |
||||
"# Added by Kamran.\n", |
||||
"MODEL_LLAMA = 'llama3.2'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "106dd65e-90af-4ca8-86b6-23a41840645b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# A class to represent a Webpage\n", |
||||
"\n", |
||||
"class Website:\n", |
||||
" \"\"\"\n", |
||||
" A utility class to represent a Website that we have scraped, now with links\n", |
||||
" \"\"\"\n", |
||||
"\n", |
||||
" def __init__(self, url):\n", |
||||
" self.url = url\n", |
||||
" response = requests.get(url)\n", |
||||
" self.body = response.content\n", |
||||
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||
" if soup.body:\n", |
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||
" irrelevant.decompose()\n", |
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||
" else:\n", |
||||
" self.text = \"\"\n", |
||||
" links = [link.get('href') for link in soup.find_all('a')]\n", |
||||
" self.links = [link for link in links if link]\n", |
||||
"\n", |
||||
" def get_contents(self):\n", |
||||
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e30d8128-933b-44cc-81c8-ab4c9d86589a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"ed = Website(\"https://edwarddonner.com\")\n", |
||||
"ed.links" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "1771af9c-717a-4fca-bbbe-8a95893312c3", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## First step: Have GPT-4o-mini figure out which links are relevant\n", |
||||
"\n", |
||||
"### Use a call to gpt-4o-mini to read the links on a webpage, and respond in structured JSON. \n", |
||||
"It should decide which links are relevant, and replace relative links such as \"/about\" with \"https://company.com/about\". \n", |
||||
"We will use \"one shot prompting\" in which we provide an example of how it should respond in the prompt.\n", |
||||
"\n", |
||||
"This is an excellent use case for an LLM, because it requires nuanced understanding. Imagine trying to code this without LLMs by parsing and analyzing the webpage - it would be very hard!\n", |
||||
"\n", |
||||
"Sidenote: there is a more advanced technique called \"Structured Outputs\" in which we require the model to respond according to a spec. We cover this technique in Week 8 during our autonomous Agentic AI project." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6957b079-0d96-45f7-a26a-3487510e9b35", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", |
||||
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", |
||||
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", |
||||
"link_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||
"link_system_prompt += \"\"\"\n", |
||||
"{\n", |
||||
" \"links\": [\n", |
||||
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
||||
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
||||
" ]\n", |
||||
"}\n", |
||||
"\"\"\"" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b97e4068-97ed-4120-beae-c42105e4d59a", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(link_system_prompt)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "8e1f601b-2eaf-499d-b6b8-c99050c9d6b3", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_links_user_prompt(website):\n", |
||||
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", |
||||
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", |
||||
"Do not include Terms of Service, Privacy, email links.\\n\"\n", |
||||
" user_prompt += \"Links (some might be relative links):\\n\"\n", |
||||
" user_prompt += \"\\n\".join(website.links)\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6bcbfa78-6395-4685-b92c-22d592050fd7", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_links_user_prompt(ed))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "a29aca19-ca13-471c-a4b4-5abbfa813f69", |
||||
"metadata": { |
||||
"editable": true, |
||||
"slideshow": { |
||||
"slide_type": "" |
||||
}, |
||||
"tags": [] |
||||
}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Get Llama 3.2 to answer\n", |
||||
"\n", |
||||
"# def get_links(url):\n", |
||||
"# website = Website(url)\n", |
||||
"# response = openai.chat.completions.create(\n", |
||||
"# model=MODEL,\n", |
||||
"# messages=[\n", |
||||
"# {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||
"# {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
"# ],\n", |
||||
"# response_format={\"type\": \"json_object\"}\n", |
||||
"# )\n", |
||||
"# result = response.choices[0].message.content\n", |
||||
"# return json.loads(result)\n", |
||||
"\n", |
||||
"def get_links(url):\n", |
||||
" website = Website(url)\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL_LLAMA,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": link_system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", |
||||
" ]\n", |
||||
" )\n", |
||||
" result = response['message']['content']\n", |
||||
" print(f\"About to parse this into json: {result}\")\n", |
||||
" return json.loads(result)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"anthropic = Website(\"https://anthropic.com\")\n", |
||||
"anthropic.links" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_links(\"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "0d74128e-dfb6-47ec-9549-288b621c838c", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Second step: make the brochure!\n", |
||||
"\n", |
||||
"Assemble all the details into another prompt to GPT4-o" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "85a5b6e2-e7ef-44a9-bc7f-59ede71037b5", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_all_details(url):\n", |
||||
" result = \"Landing page:\\n\"\n", |
||||
" result += Website(url).get_contents()\n", |
||||
" links = get_links(url)\n", |
||||
" print(\"Found links:\", links)\n", |
||||
" for link in links[\"links\"]:\n", |
||||
" result += f\"\\n\\n{link['type']}\\n\"\n", |
||||
" result += Website(link[\"url\"]).get_contents()\n", |
||||
" return result" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5099bd14-076d-4745-baf3-dac08d8e5ab2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"print(get_all_details(\"https://anthropic.com\"))" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "9b863a55-f86c-4e3f-8a79-94e24c1a8cf2", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"Include details of company culture, customers and careers/jobs if you have the information.\"\n", |
||||
"\n", |
||||
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n", |
||||
"\n", |
||||
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", |
||||
"# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", |
||||
"# Include details of company culture, customers and careers/jobs if you have the information.\"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "6ab83d92-d36b-4ce0-8bcc-5bb4c2f8ff23", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def get_brochure_user_prompt(company_name, url):\n", |
||||
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", |
||||
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", |
||||
" user_prompt += get_all_details(url)\n", |
||||
" user_prompt = user_prompt[:20_000] # Truncate if more than 20,000 characters\n", |
||||
" return user_prompt" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "cd909e0b-1312-4ce2-a553-821e795d7572", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"get_brochure_user_prompt(\"Anthropic\", \"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e44de579-4a1a-4e6a-a510-20ea3e4b8d46", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# def create_brochure(company_name, url):\n", |
||||
"# response = openai.chat.completions.create(\n", |
||||
"# model=MODEL,\n", |
||||
"# messages=[\n", |
||||
"# {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
"# {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
"# ],\n", |
||||
"# )\n", |
||||
"# result = response.choices[0].message.content\n", |
||||
"# display(Markdown(result))\n", |
||||
"\n", |
||||
"def create_brochure(company_name, url):\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL_LLAMA,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ]\n", |
||||
" )\n", |
||||
" result = response['message']['content']\n", |
||||
" display(Markdown(result))\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "e093444a-9407-42ae-924a-145730591a39", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"create_brochure(\"Anthropic\", \"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "61eaaab7-0b47-4b29-82d4-75d474ad8d18", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"## Finally - a minor improvement\n", |
||||
"\n", |
||||
"With a small adjustment, we can change this so that the results stream back from OpenAI,\n", |
||||
"with the familiar typewriter animation" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "51db0e49-f261-4137-aabe-92dd601f7725", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# def stream_brochure(company_name, url):\n", |
||||
"# stream = openai.chat.completions.create(\n", |
||||
"# model=MODEL,\n", |
||||
"# messages=[\n", |
||||
"# {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
"# {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
"# ],\n", |
||||
"# stream=True\n", |
||||
"# )\n", |
||||
"\n", |
||||
"# # For just a simple output you can do the following two lines;\n", |
||||
"# # for chunk in stream:\n", |
||||
"# # print(chunk.choices[0].delta.content or '',end='')\n", |
||||
" \n", |
||||
"# response = \"\"\n", |
||||
"# display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
"# for chunk in stream:\n", |
||||
"# response += chunk.choices[0].delta.content or ''\n", |
||||
"# response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||
"# update_display(Markdown(response), display_id=display_handle.display_id)\n", |
||||
"\n", |
||||
"def stream_brochure(company_name, url):\n", |
||||
" stream = ollama.chat(\n", |
||||
" model=MODEL_LLAMA,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ],\n", |
||||
" stream=True\n", |
||||
" )\n", |
||||
"\n", |
||||
" # For just a simple output you can do the following two lines;\n", |
||||
" # for chunk in stream:\n", |
||||
" # print(chunk['message']['content'] or '', end='')\n", |
||||
"\n", |
||||
" response = \"\"\n", |
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk['message']['content'] or ''\n", |
||||
" response = response.replace(\"```\", \"\").replace(\"markdown\", \"\")\n", |
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"stream_brochure(\"Anthropic\", \"https://anthropic.com\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "fdb3f8d8-a3eb-41c8-b1aa-9f60686a653b", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Try changing the system prompt to the humorous version when you make the Brochure for Hugging Face:\n", |
||||
"\n", |
||||
"stream_brochure(\"HuggingFace\", \"https://huggingface.co\")\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "5567d103-74ee-4a7a-997c-eaf2c3baf7f4", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def test_llama_response_basic(company_name, url):\n", |
||||
" try:\n", |
||||
" response = ollama.chat(\n", |
||||
" model=MODEL_LLAMA,\n", |
||||
" messages=[\n", |
||||
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", |
||||
" ]\n", |
||||
" )\n", |
||||
"\n", |
||||
" # Print the entire raw response for debugging purposes\n", |
||||
" print(\"Raw response received:\", response)\n", |
||||
"\n", |
||||
" # Check if the response contains 'message' and 'content'\n", |
||||
" if 'message' in response and 'content' in response['message']:\n", |
||||
" response_content = response['message']['content']\n", |
||||
" print(\"Content from response:\", response_content)\n", |
||||
" return response_content\n", |
||||
" else:\n", |
||||
" print(\"Response does not contain expected 'message' or 'content'\")\n", |
||||
" return response\n", |
||||
"\n", |
||||
" except Exception as e:\n", |
||||
" print(f\"An error occurred: {e}\")\n", |
||||
" return {}\n", |
||||
"\n", |
||||
"# Example usage\n", |
||||
"test_llama_response_basic(\"HuggingFace\", \"https://huggingface.co\")\n", |
||||
"\n", |
||||
"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "a27bf9e0-665f-4645-b66b-9725e2a959b5", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#181;\">Business applications</h2>\n", |
||||
" <span style=\"color:#181;\">In this exercise we extended the Day 1 code to make multiple LLM calls, and generate a document.\n", |
||||
"\n", |
||||
"This is perhaps the first example of Agentic AI design patterns, as we combined multiple calls to LLMs. This will feature more in Week 2, and then we will return to Agentic AI in a big way in Week 8 when we build a fully autonomous Agent solution.\n", |
||||
"\n", |
||||
"Generating content in this way is one of the very most common Use Cases. As with summarization, this can be applied to any business vertical. Write marketing content, generate a product tutorial from a spec, create personalized email content, and so much more. Explore how you can apply content generation to your business, and try making yourself a proof-of-concept prototype.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "14b2454b-8ef8-4b5c-b928-053a15e0d553", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#900;\">Before you move to Week 2 (which is tons of fun)</h2>\n", |
||||
" <span style=\"color:#900;\">Please see the week1 EXERCISE notebook for your challenge for the end of week 1. This will give you some essential practice working with Frontier APIs, and prepare you well for Week 2.</span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"id": "17b64f0f-7d33-4493-985a-033d06e8db08", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"<table style=\"margin: 0; text-align: left;\">\n", |
||||
" <tr>\n", |
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||
" </td>\n", |
||||
" <td>\n", |
||||
" <h2 style=\"color:#f71;\">A reminder on 2 useful resources</h2>\n", |
||||
" <span style=\"color:#f71;\">1. The resources for the course are available <a href=\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\">here.</a><br/>\n", |
||||
" 2. I'm on LinkedIn <a href=\"https://www.linkedin.com/in/eddonner/\">here</a> and I love connecting with people taking the course!\n", |
||||
" </span>\n", |
||||
" </td>\n", |
||||
" </tr>\n", |
||||
"</table>" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "b8fbce9d-51e5-4e8c-a7a9-c88ad02fffdf", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import requests\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"\n", |
||||
"load_dotenv()\n", |
||||
"hf_token=os.getenv(\"HF_TOKEN\")\n", |
||||
"print(f\"Using this HF Token: {hf_token}\")\n", |
||||
"\n", |
||||
"API_URL = \"https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-1B\"\n", |
||||
"headers = {\"Authorization\": f\"Bearer {hf_token}\"}\n", |
||||
"\n", |
||||
"def query(payload):\n", |
||||
"\tresponse = requests.post(API_URL, headers=headers, json=payload)\n", |
||||
"\treturn response.json()\n", |
||||
"\t\n", |
||||
"output = query({\n", |
||||
"\t\"inputs\": \"2 + 2 is \",\n", |
||||
"})\n", |
||||
"print(output)" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"id": "ec2b37af-566e-4b0b-ad4a-8b46cc346e46", |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "Python 3 (ipykernel)", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 5 |
||||
} |
Binary file not shown.
@ -1,51 +1,72 @@
|
||||
#include <iostream> |
||||
#include <random> |
||||
#include <vector> |
||||
#include <chrono> |
||||
#include <limits> |
||||
#include <iomanip> |
||||
|
||||
// Function to generate random numbers using Mersenne Twister
|
||||
std::mt19937 gen(42); |
||||
using namespace std; |
||||
using namespace chrono; |
||||
|
||||
class LCG { |
||||
private: |
||||
uint64_t value; |
||||
static const uint64_t a = 1664525; |
||||
static const uint64_t c = 1013904223; |
||||
static const uint64_t m = 1ULL << 32; |
||||
|
||||
public: |
||||
LCG(uint64_t seed) : value(seed) {} |
||||
|
||||
uint64_t next() { |
||||
value = (a * value + c) % m; |
||||
return value; |
||||
} |
||||
}; |
||||
|
||||
int64_t max_subarray_sum(int n, uint64_t seed, int min_val, int max_val) { |
||||
LCG lcg(seed); |
||||
vector<int64_t> random_numbers(n); |
||||
for (int i = 0; i < n; ++i) { |
||||
random_numbers[i] = lcg.next() % (max_val - min_val + 1) + min_val; |
||||
} |
||||
|
||||
int64_t max_sum = numeric_limits<int64_t>::min(); |
||||
int64_t current_sum = 0; |
||||
int64_t min_sum = 0; |
||||
|
||||
// Function to calculate maximum subarray sum
|
||||
int max_subarray_sum(int n, int min_val, int max_val) { |
||||
std::uniform_int_distribution<> dis(min_val, max_val); |
||||
int max_sum = std::numeric_limits<int>::min(); |
||||
int current_sum = 0; |
||||
for (int i = 0; i < n; ++i) { |
||||
current_sum += dis(gen); |
||||
if (current_sum > max_sum) { |
||||
max_sum = current_sum; |
||||
} |
||||
if (current_sum < 0) { |
||||
current_sum = 0; |
||||
} |
||||
current_sum += random_numbers[i]; |
||||
max_sum = max(max_sum, current_sum - min_sum); |
||||
min_sum = min(min_sum, current_sum); |
||||
} |
||||
|
||||
return max_sum; |
||||
} |
||||
|
||||
// Function to calculate total maximum subarray sum
|
||||
int total_max_subarray_sum(int n, int initial_seed, int min_val, int max_val) { |
||||
gen.seed(initial_seed); |
||||
int total_sum = 0; |
||||
int64_t total_max_subarray_sum(int n, uint64_t initial_seed, int min_val, int max_val) { |
||||
int64_t total_sum = 0; |
||||
LCG lcg(initial_seed); |
||||
for (int i = 0; i < 20; ++i) { |
||||
total_sum += max_subarray_sum(n, min_val, max_val); |
||||
uint64_t seed = lcg.next(); |
||||
total_sum += max_subarray_sum(n, seed, min_val, max_val); |
||||
} |
||||
return total_sum; |
||||
} |
||||
|
||||
int main() { |
||||
int n = 10000; // Number of random numbers
|
||||
int initial_seed = 42; // Initial seed for the Mersenne Twister
|
||||
int min_val = -10; // Minimum value of random numbers
|
||||
int max_val = 10; // Maximum value of random numbers
|
||||
|
||||
// Timing the function
|
||||
auto start_time = std::chrono::high_resolution_clock::now(); |
||||
int result = total_max_subarray_sum(n, initial_seed, min_val, max_val); |
||||
auto end_time = std::chrono::high_resolution_clock::now(); |
||||
|
||||
std::cout << "Total Maximum Subarray Sum (20 runs): " << result << std::endl; |
||||
std::cout << "Execution Time: " << std::setprecision(6) << std::fixed << std::chrono::duration<double>(end_time - start_time).count() << " seconds" << std::endl; |
||||
const int n = 10000; |
||||
const uint64_t initial_seed = 42; |
||||
const int min_val = -10; |
||||
const int max_val = 10; |
||||
|
||||
auto start_time = high_resolution_clock::now(); |
||||
int64_t result = total_max_subarray_sum(n, initial_seed, min_val, max_val); |
||||
auto end_time = high_resolution_clock::now(); |
||||
|
||||
auto duration = duration_cast<microseconds>(end_time - start_time); |
||||
|
||||
cout << "Total Maximum Subarray Sum (20 runs): " << result << endl; |
||||
cout << "Execution Time: " << fixed << setprecision(6) << duration.count() / 1e6 << " seconds" << endl; |
||||
|
||||
return 0; |
||||
} |
Loading…
Reference in new issue