From 3f7b0004c4652a93833251fbdc753c47551f2ea0 Mon Sep 17 00:00:00 2001 From: 266367 <266367@nttdata.com> Date: Mon, 27 Jan 2025 14:46:40 -0500 Subject: [PATCH 1/4] Wk1 Day 1 - Summarize website using deepseek-chat and stream the response realtime --- .../wk1-day1-deepseek-stream-summarize.ipynb | 119 ++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb diff --git a/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb new file mode 100644 index 0000000..95ee6ca --- /dev/null +++ b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb @@ -0,0 +1,119 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "a767b6bc-65fe-42b2-988f-efd54125114f", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "import time\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('DEEPSEEK_API_KEY')\n", + "base_url=os.getenv('DEEPSEEK_BASE_URL')\n", + "start_time = time.time()\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"\n", + "\n", + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]\n", + " \n", + "# Check the key\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; Looks like you are using DeepSeek (R1) model.\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n", + " \n", + "openai = OpenAI(api_key=api_key, base_url=base_url)\n", + "\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " \n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]\n", + " \n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model=\"deepseek-chat\",\n", + " messages=messages_for(website),\n", + " stream=True\n", + " )\n", + " print(\"Streaming response:\")\n", + " accumulated_content = \"\" # Accumulate the content here\n", + " for chunk in response:\n", + " if chunk.choices[0].delta.content: # Check if there's content in the chunk\n", + " accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n", + " \n", + " # Display the accumulated content as a single Markdown block\n", + " display(Markdown(accumulated_content))\n", + "\n", + "def display_summary():\n", + " url = str(input(\"Enter the URL of the website you want to summarize: \"))\n", + " summarize(url)\n", + "\n", + "display_summary()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llms", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From daf9b36e28ed0923d3f3eecbdef9bf2f1137d55a Mon Sep 17 00:00:00 2001 From: 266367 <266367@nttdata.com> Date: Mon, 27 Jan 2025 15:03:45 -0500 Subject: [PATCH 2/4] add markdown --- .../wk1-day1-deepseek-stream-summarize.ipynb | 49 +++++++++++++++++-- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb index 95ee6ca..1c641f5 100644 --- a/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb +++ b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb @@ -2,16 +2,53 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "a767b6bc-65fe-42b2-988f-efd54125114f", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "```markdown\n", + "# Summary of \"DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning\"\n", + "\n", + "## Overview\n", + "The paper introduces **DeepSeek-R1**, a first-generation reasoning model developed by DeepSeek-AI. The model is designed to enhance reasoning capabilities in large language models (LLMs) using reinforcement learning (RL). Two versions are presented:\n", + "- **DeepSeek-R1-Zero**: A model trained via large-scale RL without supervised fine-tuning (SFT), showcasing strong reasoning abilities but facing challenges like poor readability and language mixing.\n", + "- **DeepSeek-R1**: An improved version incorporating multi-stage training and cold-start data before RL, achieving performance comparable to OpenAI's models on reasoning tasks.\n", + "\n", + "## Key Contributions\n", + "- Open-sourcing of **DeepSeek-R1-Zero**, **DeepSeek-R1**, and six dense models (1.5B, 7B, 8B, 14B, 32B, 70B) distilled from DeepSeek-R1 based on Qwen and Llama architectures.\n", + "- The models are made available to support the research community.\n", + "\n", + "## Community Engagement\n", + "- The paper has been widely discussed and recommended, with 216 upvotes and 45 models citing it.\n", + "- Additional resources, including a video review and articles, are available through external links provided by the community.\n", + "\n", + "## Related Research\n", + "The paper is part of a broader trend in enhancing LLMs' reasoning abilities, with related works such as:\n", + "- **Improving Multi-Step Reasoning Abilities of Large Language Models with Direct Advantage Policy Optimization (2024)**\n", + "- **Offline Reinforcement Learning for LLM Multi-Step Reasoning (2024)**\n", + "- **Reasoning Language Models: A Blueprint (2025)**\n", + "\n", + "## Availability\n", + "- The paper and models are accessible on [GitHub](https://github.com/deepseek-ai/DeepSeek-R1) and the [arXiv page](https://arxiv.org/abs/2501.12948).\n", + "```" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "import os\n", "import requests\n", "from dotenv import load_dotenv\n", "from bs4 import BeautifulSoup\n", - "from IPython.display import Markdown, display\n", + "from IPython.display import Markdown, display, clear_output\n", "from openai import OpenAI\n", "import time\n", "\n", @@ -83,9 +120,11 @@ " for chunk in response:\n", " if chunk.choices[0].delta.content: # Check if there's content in the chunk\n", " accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n", + " clear_output(wait=True) # Clear the previous output\n", + " display(Markdown(accumulated_content)) # Display the updated content\n", " \n", - " # Display the accumulated content as a single Markdown block\n", - " display(Markdown(accumulated_content))\n", + " # # Final display (optional, as the loop already displays the content)\n", + " # display(Markdown(accumulated_content))\n", "\n", "def display_summary():\n", " url = str(input(\"Enter the URL of the website you want to summarize: \"))\n", From ebf36008875dc0f85521c5cef3267263bf583de1 Mon Sep 17 00:00:00 2001 From: 266367 <266367@nttdata.com> Date: Mon, 27 Jan 2025 15:06:31 -0500 Subject: [PATCH 3/4] last commit --- .../wk1-day1-deepseek-stream-summarize.ipynb | 51 ++++--------------- 1 file changed, 11 insertions(+), 40 deletions(-) diff --git a/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb index 1c641f5..6904a66 100644 --- a/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb +++ b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb @@ -2,47 +2,10 @@ "cells": [ { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "a767b6bc-65fe-42b2-988f-efd54125114f", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "```markdown\n", - "# Summary of \"DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning\"\n", - "\n", - "## Overview\n", - "The paper introduces **DeepSeek-R1**, a first-generation reasoning model developed by DeepSeek-AI. The model is designed to enhance reasoning capabilities in large language models (LLMs) using reinforcement learning (RL). Two versions are presented:\n", - "- **DeepSeek-R1-Zero**: A model trained via large-scale RL without supervised fine-tuning (SFT), showcasing strong reasoning abilities but facing challenges like poor readability and language mixing.\n", - "- **DeepSeek-R1**: An improved version incorporating multi-stage training and cold-start data before RL, achieving performance comparable to OpenAI's models on reasoning tasks.\n", - "\n", - "## Key Contributions\n", - "- Open-sourcing of **DeepSeek-R1-Zero**, **DeepSeek-R1**, and six dense models (1.5B, 7B, 8B, 14B, 32B, 70B) distilled from DeepSeek-R1 based on Qwen and Llama architectures.\n", - "- The models are made available to support the research community.\n", - "\n", - "## Community Engagement\n", - "- The paper has been widely discussed and recommended, with 216 upvotes and 45 models citing it.\n", - "- Additional resources, including a video review and articles, are available through external links provided by the community.\n", - "\n", - "## Related Research\n", - "The paper is part of a broader trend in enhancing LLMs' reasoning abilities, with related works such as:\n", - "- **Improving Multi-Step Reasoning Abilities of Large Language Models with Direct Advantage Policy Optimization (2024)**\n", - "- **Offline Reinforcement Learning for LLM Multi-Step Reasoning (2024)**\n", - "- **Reasoning Language Models: A Blueprint (2025)**\n", - "\n", - "## Availability\n", - "- The paper and models are accessible on [GitHub](https://github.com/deepseek-ai/DeepSeek-R1) and the [arXiv page](https://arxiv.org/abs/2501.12948).\n", - "```" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import os\n", "import requests\n", @@ -132,11 +95,19 @@ "\n", "display_summary()" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "01c9e5e7-7510-43ef-bb9c-aa44b15d39a7", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "llms", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, From b9723acee1bfb3f68c9be6fbf168f8ecb30de34d Mon Sep 17 00:00:00 2001 From: 266367 <266367@nttdata.com> Date: Mon, 27 Jan 2025 20:01:32 -0500 Subject: [PATCH 4/4] remove unwanted comments --- .../wk1-day1-deepseek-stream-summarize.ipynb | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb index 6904a66..2e615ed 100644 --- a/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb +++ b/week1/community-contributions/wk1-day1-deepseek-stream-summarize.ipynb @@ -13,12 +13,11 @@ "from bs4 import BeautifulSoup\n", "from IPython.display import Markdown, display, clear_output\n", "from openai import OpenAI\n", - "import time\n", "\n", "load_dotenv(override=True)\n", "api_key = os.getenv('DEEPSEEK_API_KEY')\n", "base_url=os.getenv('DEEPSEEK_BASE_URL')\n", - "start_time = time.time()\n", + "MODEL = \"deepseek-chat\"\n", "\n", "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", "and provides a short summary, ignoring text that might be navigation related. \\\n", @@ -74,7 +73,7 @@ "def summarize(url):\n", " website = Website(url)\n", " response = openai.chat.completions.create(\n", - " model=\"deepseek-chat\",\n", + " model=MODEL,\n", " messages=messages_for(website),\n", " stream=True\n", " )\n", @@ -85,9 +84,6 @@ " accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n", " clear_output(wait=True) # Clear the previous output\n", " display(Markdown(accumulated_content)) # Display the updated content\n", - " \n", - " # # Final display (optional, as the loop already displays the content)\n", - " # display(Markdown(accumulated_content))\n", "\n", "def display_summary():\n", " url = str(input(\"Enter the URL of the website you want to summarize: \"))\n",