12 changed files with 1269 additions and 175 deletions
@ -0,0 +1,15 @@ |
|||||||
|
Collecting onnxruntime |
||||||
|
Downloading onnxruntime-1.20.1-cp311-cp311-win_amd64.whl.metadata (4.7 kB) |
||||||
|
Requirement already satisfied: coloredlogs in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (15.0.1) |
||||||
|
Requirement already satisfied: flatbuffers in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (24.3.25) |
||||||
|
Requirement already satisfied: numpy>=1.21.6 in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (1.26.4) |
||||||
|
Requirement already satisfied: packaging in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (24.2) |
||||||
|
Requirement already satisfied: protobuf in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (4.25.3) |
||||||
|
Requirement already satisfied: sympy in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (1.13.3) |
||||||
|
Requirement already satisfied: humanfriendly>=9.1 in d:\anaconda\envs\llms\lib\site-packages (from coloredlogs->onnxruntime) (10.0) |
||||||
|
Requirement already satisfied: mpmath<1.4,>=1.1.0 in d:\anaconda\envs\llms\lib\site-packages (from sympy->onnxruntime) (1.3.0) |
||||||
|
Requirement already satisfied: pyreadline3 in d:\anaconda\envs\llms\lib\site-packages (from humanfriendly>=9.1->coloredlogs->onnxruntime) (0.0.0) |
||||||
|
Downloading onnxruntime-1.20.1-cp311-cp311-win_amd64.whl (11.3 MB) |
||||||
|
---------------------------------------- 11.3/11.3 MB 18.7 MB/s eta 0:00:00 |
||||||
|
Installing collected packages: onnxruntime |
||||||
|
Successfully installed onnxruntime-1.20.1 |
@ -0,0 +1,97 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "8b1b8557-7551-4b34-97f0-1734505078c7", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Hello everyone.\n", |
||||||
|
"Here's my solution to the Day 2 Exercise, where I use Ollama (locally hosted) instead of OpenAI gpt-4o-mini to summarize a given website. This code is all in the same block for ease of running (we are all familiar with the process by this point, but guiding comments have been made). Furtnermore, I added a bit of user interactivity by asking the user to provide the website themselves instead of hardcoding a string in memory that the developer changes everytime. Enjoy and have fun, fellow programmers! \n", |
||||||
|
"\\- Batikan Iscan" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "18cd56af-aacd-4f15-9c8f-e6e141671d10", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Imports\n", |
||||||
|
"import requests\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"import ollama\n", |
||||||
|
"\n", |
||||||
|
"# Constants\n", |
||||||
|
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||||
|
"HEADERS = {\"Content-Type\": \"application/json\"}\n", |
||||||
|
"MODEL = \"llama3.2\"\n", |
||||||
|
"\n", |
||||||
|
"# Code\n", |
||||||
|
"class Website:\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url)\n", |
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_for(website):\n", |
||||||
|
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||||
|
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||||
|
" please provide a short summary of this website in markdown. \\\n", |
||||||
|
" If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||||
|
" user_prompt += website.text\n", |
||||||
|
" return user_prompt\n", |
||||||
|
"\n", |
||||||
|
"def summarize(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" messages = [\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||||
|
" ]\n", |
||||||
|
" response = ollama.chat(model=MODEL, messages=messages, stream=False)\n", |
||||||
|
" # display(Markdown(response.choices[0].message.content))\n", |
||||||
|
" display(Markdown(response.message.content))\n", |
||||||
|
"\n", |
||||||
|
"# User interaction\n", |
||||||
|
"user_website = input(\"Enter a website: \")\n", |
||||||
|
"summarize(user_website)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "73991ad8-9c93-4c38-8daa-f93502b56740", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.10" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,154 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "1c14de02-8bd2-4f75-bcd8-d4f2e58e2a24", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Hi everyone\n", |
||||||
|
"I wanted to be able to use Llama3.2 in streaming mode with all the other paid frontier models, so as a demonstration, here's the Company Brochure Generator with Gradio, enhanched with Llama3.2 (using ollama library)!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2e02ac9c-7034-4aa1-9626-a7049168f096", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from typing import List\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import google.generativeai\n", |
||||||
|
"import anthropic\n", |
||||||
|
"import ollama\n", |
||||||
|
"import gradio as gr\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv()\n", |
||||||
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"claude = anthropic.Anthropic()\n", |
||||||
|
"\n", |
||||||
|
"system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n", |
||||||
|
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
" url: str\n", |
||||||
|
" title: str\n", |
||||||
|
" text: str\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url)\n", |
||||||
|
" self.body = response.content\n", |
||||||
|
" soup = BeautifulSoup(self.body, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", |
||||||
|
"\n", |
||||||
|
" def get_contents(self):\n", |
||||||
|
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"def stream_gpt(prompt):\n", |
||||||
|
" messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_message},\n", |
||||||
|
" {\"role\": \"user\", \"content\": prompt}\n", |
||||||
|
" ]\n", |
||||||
|
" stream = openai.chat.completions.create(\n", |
||||||
|
" model='gpt-4o-mini',\n", |
||||||
|
" messages=messages,\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" result = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" result += chunk.choices[0].delta.content or \"\"\n", |
||||||
|
" yield result\n", |
||||||
|
"\n", |
||||||
|
"def stream_claude(prompt):\n", |
||||||
|
" result = claude.messages.stream(\n", |
||||||
|
" model=\"claude-3-haiku-20240307\",\n", |
||||||
|
" max_tokens=1000,\n", |
||||||
|
" temperature=0.7,\n", |
||||||
|
" system=system_message,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"user\", \"content\": prompt},\n", |
||||||
|
" ],\n", |
||||||
|
" )\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" with result as stream:\n", |
||||||
|
" for text in stream.text_stream:\n", |
||||||
|
" response += text or \"\"\n", |
||||||
|
" yield response\n", |
||||||
|
"\n", |
||||||
|
"def stream_llama(prompt):\n", |
||||||
|
" messages = [\n", |
||||||
|
" {\"role\": \"user\", \"content\": prompt}\n", |
||||||
|
" ]\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" for chunk in ollama.chat(\n", |
||||||
|
" model=\"llama3.2\", \n", |
||||||
|
" messages=messages, \n", |
||||||
|
" stream=True\n", |
||||||
|
" ):\n", |
||||||
|
" # Check if the chunk contains text\n", |
||||||
|
" if chunk.get('message', {}).get('content'):\n", |
||||||
|
" # Append the new text to the response\n", |
||||||
|
" response += chunk['message']['content']\n", |
||||||
|
" # Yield the incrementally built response\n", |
||||||
|
" yield response\n", |
||||||
|
"\n", |
||||||
|
"def stream_brochure(company_name, url, model):\n", |
||||||
|
" prompt = f\"Please generate a company brochure for {company_name}. Here is their landing page:\\n\"\n", |
||||||
|
" prompt += Website(url).get_contents()\n", |
||||||
|
" if model==\"GPT\":\n", |
||||||
|
" result = stream_gpt(prompt)\n", |
||||||
|
" elif model==\"Claude\":\n", |
||||||
|
" result = stream_claude(prompt)\n", |
||||||
|
" elif model==\"Llama\":\n", |
||||||
|
" result = stream_llama(prompt)\n", |
||||||
|
" else:\n", |
||||||
|
" raise ValueError(\"Unknown model\")\n", |
||||||
|
" yield from result\n", |
||||||
|
"\n", |
||||||
|
"view = gr.Interface(\n", |
||||||
|
" fn=stream_brochure,\n", |
||||||
|
" inputs=[\n", |
||||||
|
" gr.Textbox(label=\"Company name:\"),\n", |
||||||
|
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", |
||||||
|
" gr.Dropdown([\"GPT\", \"Claude\", \"Llama\"], label=\"Select model\")],\n", |
||||||
|
" outputs=[gr.Markdown(label=\"Brochure:\")],\n", |
||||||
|
" flagging_mode=\"never\"\n", |
||||||
|
")\n", |
||||||
|
"view.launch()" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.10" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
Loading…
Reference in new issue