Browse Source

rebase and cleanup

pull/125/head
266367 3 months ago
parent
commit
26e2b74727
  1. 250
      week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb
  2. 118
      week1/community-contributions/wk1-day2-ollama-exer.ipynb

250
week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb

@ -1,131 +1,125 @@
{ {
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "a767b6bc-65fe-42b2-988f-efd54125114f", "id": "a767b6bc-65fe-42b2-988f-efd54125114f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import os\n", "import os\n",
"import requests\n", "import requests\n",
"from dotenv import load_dotenv\n", "from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n", "from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, clear_output\n", "from IPython.display import Markdown, display, clear_output\n",
"from openai import OpenAI\n", "from openai import OpenAI\n",
"\n", "\n",
"load_dotenv(override=True)\n", "load_dotenv(override=True)\n",
"# Deep seek API payload\n", "api_key = os.getenv('DEEPSEEK_API_KEY')\n",
"# api_key = os.getenv('DEEPSEEK_API_KEY')\n", "base_url=os.getenv('DEEPSEEK_BASE_URL')\n",
"# base_url=os.getenv('DEEPSEEK_BASE_URL')\n", "MODEL = \"deepseek-chat\"\n",
"# MODEL = \"deepseek-chat\"\n", "\n",
"\n", "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"# Day 2 Exercise with Ollama API\n", "and provides a short summary, ignoring text that might be navigation related. \\\n",
"api_key = os.getenv('OLLAMA_API_KEY')\n", "Respond in markdown.\"\n",
"base_url = os.getenv('OLLAMA_BASE_URL')\n", "\n",
"MODEL = \"llama3.2\"\n", "messages = [\n",
"\n", " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n", "]\n",
"Respond in markdown.\"\n", " \n",
"\n", "# Check the key\n",
"messages = [\n", "if not api_key:\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", "elif not api_key.startswith(\"sk-proj-\"):\n",
"]\n", " print(\"An API key was found, but it doesn't start sk-proj-; Looks like you are using DeepSeek (R1) model.\")\n",
" \n", "elif api_key.strip() != api_key:\n",
"# Check the key\n", " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"if not api_key:\n", "else:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", " print(\"API key found and looks good so far!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n", " \n",
" print(\"An API key was found, but it doesn't start sk-proj-; Looks like you are using DeepSeek (R1) model.\")\n", "openai = OpenAI(api_key=api_key, base_url=base_url)\n",
"elif api_key.strip() != api_key:\n", "\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", "headers = {\n",
"else:\n", " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
" print(\"API key found and looks good so far!\")\n", "}\n",
" \n", "\n",
"openai = OpenAI(api_key=api_key, base_url=base_url)\n", "class Website:\n",
"\n", "\n",
"headers = {\n", " def __init__(self, url):\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", " \"\"\"\n",
"}\n", " Create this Website object from the given url using the BeautifulSoup library\n",
"\n", " \"\"\"\n",
"class Website:\n", " self.url = url\n",
"\n", " response = requests.get(url, headers=headers)\n",
" def __init__(self, url):\n", " soup = BeautifulSoup(response.content, 'html.parser')\n",
" \"\"\"\n", " self.title = soup.title.string if soup.title else \"No title found\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n", " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" \"\"\"\n", " irrelevant.decompose()\n",
" self.url = url\n", " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" response = requests.get(url, headers=headers)\n", " \n",
" soup = BeautifulSoup(response.content, 'html.parser')\n", "def user_prompt_for(website):\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n", " user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", " user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" irrelevant.decompose()\n", " user_prompt += website.text\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", " return user_prompt\n",
" \n", "\n",
"def user_prompt_for(website):\n", "def messages_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", " return [\n",
" user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n", " {\"role\": \"system\", \"content\": system_prompt},\n",
" user_prompt += website.text\n", " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" return user_prompt\n", " ]\n",
"\n", " \n",
"def messages_for(website):\n", "def summarize(url):\n",
" return [\n", " website = Website(url)\n",
" {\"role\": \"system\", \"content\": system_prompt},\n", " response = openai.chat.completions.create(\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", " model=MODEL,\n",
" ]\n", " messages=messages_for(website),\n",
" \n", " stream=True\n",
"def summarize(url):\n", " )\n",
" website = Website(url)\n", " print(\"Streaming response:\")\n",
" response = openai.chat.completions.create(\n", " accumulated_content = \"\" # Accumulate the content here\n",
" model=MODEL,\n", " for chunk in response:\n",
" messages=messages_for(website),\n", " if chunk.choices[0].delta.content: # Check if there's content in the chunk\n",
" stream=True\n", " accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n",
" )\n", " clear_output(wait=True) # Clear the previous output\n",
" print(\"Streaming response:\")\n", " display(Markdown(accumulated_content)) # Display the updated content\n",
" accumulated_content = \"\" # Accumulate the content here\n", "\n",
" for chunk in response:\n", "def display_summary():\n",
" if chunk.choices[0].delta.content: # Check if there's content in the chunk\n", " url = str(input(\"Enter the URL of the website you want to summarize: \"))\n",
" accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n", " summarize(url)\n",
" clear_output(wait=True) # Clear the previous output\n", "\n",
" display(Markdown(accumulated_content)) # Display the updated content\n", "display_summary()"
"\n", ]
"def display_summary():\n",
" url = str(input(\"Enter the URL of the website you want to summarize: \"))\n",
" summarize(url)\n",
"\n",
"display_summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01c9e5e7-7510-43ef-bb9c-aa44b15d39a7",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
}, },
"nbformat": 4, {
"nbformat_minor": 5 "cell_type": "code",
"execution_count": null,
"id": "01c9e5e7-7510-43ef-bb9c-aa44b15d39a7",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
} }

118
week1/community-contributions/wk1-day2-ollama-exer.ipynb

@ -0,0 +1,118 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, clear_output\n",
"from openai import OpenAI\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"# Day 2 Exercise with Ollama API\n",
"api_key = os.getenv('OLLAMA_API_KEY')\n",
"base_url = os.getenv('OLLAMA_BASE_URL')\n",
"MODEL = \"llama3.2\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"]\n",
" \n",
"# Check the key\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; Looks like you are using DeepSeek (R1) model.\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n",
" \n",
"openai = OpenAI(api_key=api_key, base_url=base_url)\n",
"\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" \n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
" \n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=messages_for(website),\n",
" stream=True\n",
" )\n",
" print(\"Streaming response:\")\n",
" accumulated_content = \"\" # Accumulate the content here\n",
" for chunk in response:\n",
" if chunk.choices[0].delta.content: # Check if there's content in the chunk\n",
" accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n",
" clear_output(wait=True) # Clear the previous output\n",
" display(Markdown(accumulated_content)) # Display the updated content\n",
" \n",
"def display_summary():\n",
" url = str(input(\"Enter the URL of the website you want to summarize: \"))\n",
" summarize(url)\n",
"\n",
"display_summary()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Loading…
Cancel
Save