From 9d21d9f060b20e488311fc40f36cabae8ab019e5 Mon Sep 17 00:00:00 2001 From: ken Date: Wed, 12 Mar 2025 15:37:17 +0800 Subject: [PATCH 01/19] day 1 exercise for LLM engineer certification --- .../day2_exercise.ipynb | 274 ++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100644 week1/community-contributions/day2_exercise.ipynb diff --git a/week1/community-contributions/day2_exercise.ipynb b/week1/community-contributions/day2_exercise.ipynb new file mode 100644 index 0000000..63cd4ee --- /dev/null +++ b/week1/community-contributions/day2_exercise.ipynb @@ -0,0 +1,274 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "41136d6f-07bc-4f6f-acba-784b8e5707b1", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8612b4f7-5c31-48f3-8423-261914509617", + "metadata": {}, + "outputs": [], + "source": [ + "# Constants\n", + "\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "MODEL = \"llama3.2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "508bd442-7860-4215-b0f2-57f7adefd807", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a messages list using the same format that we used for OpenAI\n", + "\n", + "messages = [\n", + " {\"role\": \"user\", \"content\": \"Describe some of the business applications of Generative AI\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc7e8ada-4f8d-4090-be64-4aa72e03ac58", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's just make sure the model is loaded\n", + "\n", + "!ollama pull llama3.2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4afd2e56-191a-4e31-949e-9b9376a39b5a", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# There's actually an alternative approach that some people might prefer\n", + "# You can use the OpenAI client python library to call Ollama:\n", + "\n", + "from openai import OpenAI\n", + "ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + "\n", + "response = ollama_via_openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=messages\n", + ")\n", + "\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "365f3d83-2601-42fb-89cc-98a4e1f79e0d", + "metadata": {}, + "outputs": [], + "source": [ + "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", + "response = ollama_via_openai.chat.completions.create(model=MODEL, messages=[{\"role\":\"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29c383ae-bf5b-41bc-b5af-a22f851745dc", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc61e30f-653f-4554-b1cd-6e61a0e2430a", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "ed = Website(\"https://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db2066fb-3079-4775-832a-dcc0f19beb6e", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af81b070-b6fe-4b18-aa0b-c03cd76a0adf", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e66291b-23b1-4915-b6a3-11a4b6a4db66", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "67c92f47-4a3b-491f-af00-07fda470087e", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db1b9085-e5e7-4ec9-a264-acc389085ada", + "metadata": {}, + "outputs": [], + "source": [ + "messages_for(ed)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "677bfc2f-19ac-46a0-b67e-a2b2ddf9cf6b", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize(url):\n", + " website = Website(url)\n", + " response = ollama_via_openai.chat.completions.create(\n", + " model = MODEL,\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee3242ba-b695-4b1e-8a91-2fdeb536c2e7", + "metadata": {}, + "outputs": [], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "85142cb8-ce0c-4c31-8b26-bb1744cf99ec", + "metadata": {}, + "outputs": [], + "source": [ + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63db51a7-dd03-4514-8954-57156967f82c", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "display_summary(\"https://app.daily.dev/posts/bregman-arie-devops-exercises-linux-jenkins-aws-sre-prometheus-docker-python-ansible-git-k-yli9wthnf\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:base] *", + "language": "python", + "name": "conda-base-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 9bb7e9dedeee000293733e480fc2b4dd8c9bc583 Mon Sep 17 00:00:00 2001 From: ken Date: Fri, 28 Mar 2025 16:05:39 +0800 Subject: [PATCH 02/19] feat(day5): ollama version --- .../day5_ollama_version.ipynb | 3876 +++++++++++++++++ 1 file changed, 3876 insertions(+) create mode 100644 week1/community-contributions/day5_ollama_version.ipynb diff --git a/week1/community-contributions/day5_ollama_version.ipynb b/week1/community-contributions/day5_ollama_version.ipynb new file mode 100644 index 0000000..e49702c --- /dev/null +++ b/week1/community-contributions/day5_ollama_version.ipynb @@ -0,0 +1,3876 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "425f8a60-d923-4c15-8c24-7f0a5cc7579c", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "c00a960b-c9b0-4b9a-a19b-333ba4cc7d95", + "metadata": {}, + "outputs": [], + "source": [ + "# Constants\n", + "\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "MODEL = \"llama3.2\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "e5106d3b-20c0-44a3-9148-4caa0ef45341", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ‹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ™ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¸ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¼ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ´ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¦ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â § \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ‡ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â � \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ‹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ™ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest \u001b[K\n", + "pulling dde5aa3fc5ff... 100% ▕████████████████â–� 2.0 GB \u001b[K\n", + "pulling 966de95ca8a6... 100% ▕████████████████â–� 1.4 KB \u001b[K\n", + "pulling fcc5a6bec9da... 100% ▕████████████████â–� 7.7 KB \u001b[K\n", + "pulling a70ff7e570d9... 100% ▕████████████████â–� 6.0 KB \u001b[K\n", + "pulling 56bb8bd477a5... 100% ▕████████████████â–� 96 B \u001b[K\n", + "pulling 34bb5ab01051... 100% ▕████████████████â–� 561 B \u001b[K\n", + "verifying sha256 digest \u001b[K\n", + "writing manifest \u001b[K\n", + "success \u001b[K\u001b[?25h\u001b[?2026l\n" + ] + } + ], + "source": [ + "# Let's just make sure the model is loaded\n", + "\n", + "!ollama pull llama3.2" + ] + }, + { + "cell_type": "code", + "execution_count": 97, + "id": "664ce0aa-fc53-4ead-88ae-7899ba2fa9d0", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to represent a Website that we have scraped, now with links\n", + " \"\"\"\n", + "\n", + " def __init__(self, url):\n", + " try:\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = \"\"\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " self.links = [link for link in links if link]\n", + " except:\n", + " print(\"website error\")\n", + " def get_contents(self):\n", + " try:\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n", + " except:\n", + " return \"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "37a829c6-e2c2-4af3-ae78-7f5d0c9929e9", + "metadata": {}, + "outputs": [], + "source": [ + "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", + "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", + "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + "link_system_prompt += \"You should respond in JSON as in this example:\"\n", + "link_system_prompt += \"\"\"\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "c5bfc644-3d3e-4a8f-8962-09efadc89272", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\n", + "You should respond in JSON as in this example:\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\n" + ] + } + ], + "source": [ + "print(link_system_prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "40c62f22-2b8e-452f-892b-d4ec53b0d9cf", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links_user_prompt(website):\n", + " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", + "Do not include Terms of Service, Privacy, email links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"\\n\".join(website.links)\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "id": "233c1d40-0e3a-4b1d-b532-53f11c45e071", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['/',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/posts',\n", + " '/docs',\n", + " '/enterprise',\n", + " '/pricing',\n", + " '/login',\n", + " '/join',\n", + " '/spaces',\n", + " '/models',\n", + " '/deepseek-ai/DeepSeek-V3-0324',\n", + " '/Qwen/Qwen2.5-Omni-7B',\n", + " '/manycore-research/SpatialLM-Llama-1B',\n", + " '/ByteDance/InfiniteYou',\n", + " '/ds4sd/SmolDocling-256M-preview',\n", + " '/models',\n", + " '/spaces/ByteDance/InfiniteYou-FLUX',\n", + " '/spaces/Trudy/gemini-codrawing',\n", + " '/spaces/3DAIGC/LHM',\n", + " '/spaces/stabilityai/stable-virtual-camera',\n", + " '/spaces/tencent/Hunyuan-T1',\n", + " '/spaces',\n", + " '/datasets/nvidia/Llama-Nemotron-Post-Training-Dataset-v1',\n", + " '/datasets/glaiveai/reasoning-v1-20m',\n", + " '/datasets/FreedomIntelligence/medical-o1-reasoning-SFT',\n", + " '/datasets/facebook/collaborative_agent_bench',\n", + " '/datasets/a-m-team/AM-DeepSeek-R1-Distilled-1.4M',\n", + " '/datasets',\n", + " '/join',\n", + " '/pricing#endpoints',\n", + " '/pricing#spaces',\n", + " '/pricing',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/allenai',\n", + " '/facebook',\n", + " '/amazon',\n", + " '/google',\n", + " '/Intel',\n", + " '/microsoft',\n", + " '/grammarly',\n", + " '/Writer',\n", + " '/docs/transformers',\n", + " '/docs/diffusers',\n", + " '/docs/safetensors',\n", + " '/docs/huggingface_hub',\n", + " '/docs/tokenizers',\n", + " '/docs/trl',\n", + " '/docs/transformers.js',\n", + " '/docs/smolagents',\n", + " '/docs/peft',\n", + " '/docs/datasets',\n", + " '/docs/text-generation-inference',\n", + " '/docs/accelerate',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/tasks',\n", + " 'https://ui.endpoints.huggingface.co',\n", + " '/chat',\n", + " '/huggingface',\n", + " '/brand',\n", + " '/terms-of-service',\n", + " '/privacy',\n", + " 'https://apply.workable.com/huggingface/',\n", + " 'mailto:press@huggingface.co',\n", + " '/learn',\n", + " '/docs',\n", + " '/blog',\n", + " 'https://discuss.huggingface.co',\n", + " 'https://status.huggingface.co/',\n", + " 'https://github.com/huggingface',\n", + " 'https://twitter.com/huggingface',\n", + " 'https://www.linkedin.com/company/huggingface/',\n", + " '/join/discord',\n", + " 'https://www.zhihu.com/org/huggingface',\n", + " 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/chinese-language-blog/wechat.jpg']" + ] + }, + "execution_count": 49, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ed = Website(\"https://huggingface.co\")\n", + "ed.links" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "4f0a8cd3-1053-45fe-9f59-783c7f91d160", + "metadata": {}, + "outputs": [], + "source": [ + "from openai import OpenAI\n", + "openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "fbe0ccac-d46f-4da2-8dbc-dde484ddacfc", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here is the list of links on the website of https://huggingface.co - please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\n", + "Links (some might be relative links):\n", + "/\n", + "/models\n", + "/datasets\n", + "/spaces\n", + "/posts\n", + "/docs\n", + "/enterprise\n", + "/pricing\n", + "/login\n", + "/join\n", + "/spaces\n", + "/models\n", + "/deepseek-ai/DeepSeek-V3-0324\n", + "/Qwen/Qwen2.5-Omni-7B\n", + "/manycore-research/SpatialLM-Llama-1B\n", + "/ByteDance/InfiniteYou\n", + "/ds4sd/SmolDocling-256M-preview\n", + "/models\n", + "/spaces/ByteDance/InfiniteYou-FLUX\n", + "/spaces/Trudy/gemini-codrawing\n", + "/spaces/3DAIGC/LHM\n", + "/spaces/stabilityai/stable-virtual-camera\n", + "/spaces/tencent/Hunyuan-T1\n", + "/spaces\n", + "/datasets/nvidia/Llama-Nemotron-Post-Training-Dataset-v1\n", + "/datasets/glaiveai/reasoning-v1-20m\n", + "/datasets/FreedomIntelligence/medical-o1-reasoning-SFT\n", + "/datasets/facebook/collaborative_agent_bench\n", + "/datasets/a-m-team/AM-DeepSeek-R1-Distilled-1.4M\n", + "/datasets\n", + "/join\n", + "/pricing#endpoints\n", + "/pricing#spaces\n", + "/pricing\n", + "/enterprise\n", + "/enterprise\n", + "/enterprise\n", + "/enterprise\n", + "/enterprise\n", + "/enterprise\n", + "/enterprise\n", + "/allenai\n", + "/facebook\n", + "/amazon\n", + "/google\n", + "/Intel\n", + "/microsoft\n", + "/grammarly\n", + "/Writer\n", + "/docs/transformers\n", + "/docs/diffusers\n", + "/docs/safetensors\n", + "/docs/huggingface_hub\n", + "/docs/tokenizers\n", + "/docs/trl\n", + "/docs/transformers.js\n", + "/docs/smolagents\n", + "/docs/peft\n", + "/docs/datasets\n", + "/docs/text-generation-inference\n", + "/docs/accelerate\n", + "/models\n", + "/datasets\n", + "/spaces\n", + "/tasks\n", + "https://ui.endpoints.huggingface.co\n", + "/chat\n", + "/huggingface\n", + "/brand\n", + "/terms-of-service\n", + "/privacy\n", + "https://apply.workable.com/huggingface/\n", + "mailto:press@huggingface.co\n", + "/learn\n", + "/docs\n", + "/blog\n", + "https://discuss.huggingface.co\n", + "https://status.huggingface.co/\n", + "https://github.com/huggingface\n", + "https://twitter.com/huggingface\n", + "https://www.linkedin.com/company/huggingface/\n", + "/join/discord\n", + "https://www.zhihu.com/org/huggingface\n", + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/chinese-language-blog/wechat.jpg\n" + ] + } + ], + "source": [ + "print(get_links_user_prompt(ed))" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "a3cc38a3-dd28-46bf-977d-df7edb6909d6", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", + " ],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "73499a57-025d-490c-b62e-c007885d00a9", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['/',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/posts',\n", + " '/docs',\n", + " '/enterprise',\n", + " '/pricing',\n", + " '/login',\n", + " '/join',\n", + " '/spaces',\n", + " '/models',\n", + " '/deepseek-ai/DeepSeek-V3-0324',\n", + " '/Qwen/Qwen2.5-Omni-7B',\n", + " '/manycore-research/SpatialLM-Llama-1B',\n", + " '/ByteDance/InfiniteYou',\n", + " '/ds4sd/SmolDocling-256M-preview',\n", + " '/models',\n", + " '/spaces/ByteDance/InfiniteYou-FLUX',\n", + " '/spaces/Trudy/gemini-codrawing',\n", + " '/spaces/3DAIGC/LHM',\n", + " '/spaces/stabilityai/stable-virtual-camera',\n", + " '/spaces/tencent/Hunyuan-T1',\n", + " '/spaces',\n", + " '/datasets/nvidia/Llama-Nemotron-Post-Training-Dataset-v1',\n", + " '/datasets/glaiveai/reasoning-v1-20m',\n", + " '/datasets/FreedomIntelligence/medical-o1-reasoning-SFT',\n", + " '/datasets/facebook/collaborative_agent_bench',\n", + " '/datasets/a-m-team/AM-DeepSeek-R1-Distilled-1.4M',\n", + " '/datasets',\n", + " '/join',\n", + " '/pricing#endpoints',\n", + " '/pricing#spaces',\n", + " '/pricing',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/allenai',\n", + " '/facebook',\n", + " '/amazon',\n", + " '/google',\n", + " '/Intel',\n", + " '/microsoft',\n", + " '/grammarly',\n", + " '/Writer',\n", + " '/docs/transformers',\n", + " '/docs/diffusers',\n", + " '/docs/safetensors',\n", + " '/docs/huggingface_hub',\n", + " '/docs/tokenizers',\n", + " '/docs/trl',\n", + " '/docs/transformers.js',\n", + " '/docs/smolagents',\n", + " '/docs/peft',\n", + " '/docs/datasets',\n", + " '/docs/text-generation-inference',\n", + " '/docs/accelerate',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/tasks',\n", + " 'https://ui.endpoints.huggingface.co',\n", + " '/chat',\n", + " '/huggingface',\n", + " '/brand',\n", + " '/terms-of-service',\n", + " '/privacy',\n", + " 'https://apply.workable.com/huggingface/',\n", + " 'mailto:press@huggingface.co',\n", + " '/learn',\n", + " '/docs',\n", + " '/blog',\n", + " 'https://discuss.huggingface.co',\n", + " 'https://status.huggingface.co/',\n", + " 'https://github.com/huggingface',\n", + " 'https://twitter.com/huggingface',\n", + " 'https://www.linkedin.com/company/huggingface/',\n", + " '/join/discord']" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", + "\n", + "huggingface = Website(\"https://huggingface.co\")\n", + "huggingface.links" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "d8b6e56d-cbaf-47f8-a055-c08293564af3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'links': [{'type': 'About page', 'url': 'https://graphql.huggingface.co/'},\n", + " {'type': 'Company page', 'url': 'https://apply.workable.com/huggingface/'},\n", + " {'type': 'Careers/Jobs page',\n", + " 'url': \"https://apply.workable.com/huggingface/'\"},\n", + " {'type': 'Blog', 'url': 'https://blog.huggingface.co/'}]}" + ] + }, + "execution_count": 79, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_links(\"https://huggingface.co\")" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "id": "fd58cc22-8f4a-444a-a53f-0287581b1153", + "metadata": {}, + "outputs": [], + "source": [ + "def get_all_details(url):\n", + " result = \"Landing page:\\n\"\n", + " result += Website(url).get_contents()\n", + " links = get_links(url)\n", + " print(\"Found links:\", links)\n", + " for link in links[\"links\"]:\n", + " result += f\"\\n\\n{link['type']}\\n\"\n", + " result += Website(link[\"url\"]).get_contents()\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "id": "a6ca5912-d370-48d4-9125-69a62bff453b", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'About page', 'url': 'https://huggingface.co'}, {'type': 'Company page', 'url': 'https://huggingface.co/brand'}, {'type': 'Allenai team member (likely CEO)', 'url': 'https://github.com/huggingface'}, {'type': 'Microsoft partnership', 'url': 'https://www.linkedin.com/company/huggingface/'}, {'type': 'Intel partnership', 'url': 'https://www.linkedin.com/company/huggingface/'}, {'type': 'Amazon partnership', 'url': 'https://www.linkedin.com/company/huggingface/'}, {'type': 'Grammarly integration', 'url': 'https://grammarly.github.io/'}, {'type': 'Writer product page', 'url': 'https://writer.huggingface.co/'}, {'type': 'GitHub repository', 'url': 'https://github.com/huggingface'}, {'type': 'Discord server for community discussion', 'url': 'https://join.discord.huggingface.co/ '}]}\n", + "website error\n", + "website error\n", + "Landing page:\n", + "Webpage Title:\n", + "Hugging Face – The AI community building the future.\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "The AI community building the future.\n", + "The platform where the machine learning community collaborates on models, datasets, and applications.\n", + "Explore AI Apps\n", + "or\n", + "Browse 1M+ models\n", + "Trending on\n", + "this week\n", + "Models\n", + "deepseek-ai/DeepSeek-V3-0324\n", + "Updated\n", + "1 day ago\n", + "•\n", + "32.5k\n", + "•\n", + "1.88k\n", + "Qwen/Qwen2.5-Omni-7B\n", + "Updated\n", + "1 day ago\n", + "•\n", + "3.03k\n", + "•\n", + "662\n", + "manycore-research/SpatialLM-Llama-1B\n", + "Updated\n", + "7 days ago\n", + "•\n", + "6.14k\n", + "•\n", + "754\n", + "ByteDance/InfiniteYou\n", + "Updated\n", + "3 days ago\n", + "•\n", + "436\n", + "ds4sd/SmolDocling-256M-preview\n", + "Updated\n", + "5 days ago\n", + "•\n", + "40.8k\n", + "•\n", + "994\n", + "Browse 1M+ models\n", + "Spaces\n", + "Running\n", + "on\n", + "Zero\n", + "473\n", + "473\n", + "InfiniteYou-FLUX\n", + "📸\n", + "Flexible Photo Recrafting While Preserving Your Identity\n", + "Running\n", + "323\n", + "323\n", + "Gemini Co-Drawing\n", + "✏\n", + "Gemini 2.0 native image generation co-doodling\n", + "Running\n", + "on\n", + "Zero\n", + "204\n", + "204\n", + "LHM\n", + "⚡\n", + "Large Animatable Human Model\n", + "Running\n", + "on\n", + "L40S\n", + "325\n", + "325\n", + "Stable Virtual Camera\n", + "⚡\n", + "Generate virtual camera views from input images\n", + "Running\n", + "163\n", + "163\n", + "Hunyuan T1\n", + "💬\n", + "Hunyuan T1模型体验\n", + "Browse 400k+ applications\n", + "Datasets\n", + "nvidia/Llama-Nemotron-Post-Training-Dataset-v1\n", + "Updated\n", + "10 days ago\n", + "•\n", + "6.96k\n", + "•\n", + "256\n", + "glaiveai/reasoning-v1-20m\n", + "Updated\n", + "9 days ago\n", + "•\n", + "5.71k\n", + "•\n", + "116\n", + "FreedomIntelligence/medical-o1-reasoning-SFT\n", + "Updated\n", + "Feb 22\n", + "•\n", + "27k\n", + "•\n", + "566\n", + "facebook/collaborative_agent_bench\n", + "Updated\n", + "8 days ago\n", + "•\n", + "83\n", + "•\n", + "47\n", + "a-m-team/AM-DeepSeek-R1-Distilled-1.4M\n", + "Updated\n", + "about 18 hours ago\n", + "•\n", + "2.26k\n", + "•\n", + "68\n", + "Browse 250k+ datasets\n", + "The Home of Machine Learning\n", + "Create, discover and collaborate on ML better.\n", + "The collaboration platform\n", + "Host and collaborate on unlimited public models, datasets and applications.\n", + "Move faster\n", + "With the HF Open source stack.\n", + "Explore all modalities\n", + "Text, image, video, audio or even 3D.\n", + "Build your portfolio\n", + "Share your work with the world and build your ML profile.\n", + "Sign Up\n", + "Accelerate your ML\n", + "We provide paid Compute and Enterprise solutions.\n", + "Compute\n", + "Deploy on optimized\n", + "Inference Endpoints\n", + "or update your\n", + "Spaces applications\n", + "to a GPU in a few clicks.\n", + "View pricing\n", + "Starting at $0.60/hour for GPU\n", + "Enterprise\n", + "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", + "\t\t\tdedicated support.\n", + "Getting started\n", + "Starting at $20/user/month\n", + "Single Sign-On\n", + "Regions\n", + "Priority Support\n", + "Audit Logs\n", + "Resource Groups\n", + "Private Datasets Viewer\n", + "More than 50,000 organizations are using Hugging Face\n", + "Ai2\n", + "Enterprise\n", + "non-profit\n", + "•\n", + "396 models\n", + "•\n", + "2.97k followers\n", + "AI at Meta\n", + "Enterprise\n", + "company\n", + "•\n", + "2.07k models\n", + "•\n", + "5.27k followers\n", + "Amazon\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "2.91k followers\n", + "Google\n", + "company\n", + "•\n", + "974 models\n", + "•\n", + "10.6k followers\n", + "Intel\n", + "company\n", + "•\n", + "219 models\n", + "•\n", + "2.37k followers\n", + "Microsoft\n", + "company\n", + "•\n", + "365 models\n", + "•\n", + "10.7k followers\n", + "Grammarly\n", + "Enterprise\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "145 followers\n", + "Writer\n", + "Enterprise\n", + "company\n", + "•\n", + "21 models\n", + "•\n", + "253 followers\n", + "Our Open Source\n", + "We are building the foundation of ML tooling with the community.\n", + "Transformers\n", + "142,056\n", + "State-of-the-art ML for PyTorch, TensorFlow, JAX\n", + "Diffusers\n", + "28,292\n", + "State-of-the-art Diffusion models in PyTorch\n", + "Safetensors\n", + "3,189\n", + "Safe way to store/distribute neural network weights\n", + "Hub Python Library\n", + "2,469\n", + "Python client to interact with the Hugging Face Hub\n", + "Tokenizers\n", + "9,538\n", + "Fast tokenizers optimized for research & production\n", + "TRL\n", + "12,887\n", + "Train transformers LMs with reinforcement learning\n", + "Transformers.js\n", + "13,301\n", + "State-of-the-art ML running directly in your browser\n", + "smolagents\n", + "15,893\n", + "Smol library to build great agents in Python\n", + "PEFT\n", + "17,927\n", + "Parameter-efficient finetuning for large language models\n", + "Datasets\n", + "19,888\n", + "Access & share datasets for any ML tasks\n", + "Text Generation Inference\n", + "9,937\n", + "Serve language models with TGI optimized toolkit\n", + "Accelerate\n", + "8,542\n", + "Train PyTorch models with multi-GPU, TPU, mixed precision\n", + "System theme\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "About page\n", + "Webpage Title:\n", + "Hugging Face – The AI community building the future.\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "The AI community building the future.\n", + "The platform where the machine learning community collaborates on models, datasets, and applications.\n", + "Explore AI Apps\n", + "or\n", + "Browse 1M+ models\n", + "Trending on\n", + "this week\n", + "Models\n", + "deepseek-ai/DeepSeek-V3-0324\n", + "Updated\n", + "1 day ago\n", + "•\n", + "32.5k\n", + "•\n", + "1.88k\n", + "Qwen/Qwen2.5-Omni-7B\n", + "Updated\n", + "1 day ago\n", + "•\n", + "3.03k\n", + "•\n", + "662\n", + "manycore-research/SpatialLM-Llama-1B\n", + "Updated\n", + "7 days ago\n", + "•\n", + "6.14k\n", + "•\n", + "754\n", + "ByteDance/InfiniteYou\n", + "Updated\n", + "3 days ago\n", + "•\n", + "436\n", + "ds4sd/SmolDocling-256M-preview\n", + "Updated\n", + "5 days ago\n", + "•\n", + "40.8k\n", + "•\n", + "994\n", + "Browse 1M+ models\n", + "Spaces\n", + "Running\n", + "on\n", + "Zero\n", + "473\n", + "473\n", + "InfiniteYou-FLUX\n", + "📸\n", + "Flexible Photo Recrafting While Preserving Your Identity\n", + "Running\n", + "323\n", + "323\n", + "Gemini Co-Drawing\n", + "✏\n", + "Gemini 2.0 native image generation co-doodling\n", + "Running\n", + "on\n", + "Zero\n", + "204\n", + "204\n", + "LHM\n", + "⚡\n", + "Large Animatable Human Model\n", + "Running\n", + "on\n", + "L40S\n", + "325\n", + "325\n", + "Stable Virtual Camera\n", + "⚡\n", + "Generate virtual camera views from input images\n", + "Running\n", + "163\n", + "163\n", + "Hunyuan T1\n", + "💬\n", + "Hunyuan T1模型体验\n", + "Browse 400k+ applications\n", + "Datasets\n", + "nvidia/Llama-Nemotron-Post-Training-Dataset-v1\n", + "Updated\n", + "10 days ago\n", + "•\n", + "6.96k\n", + "•\n", + "256\n", + "glaiveai/reasoning-v1-20m\n", + "Updated\n", + "9 days ago\n", + "•\n", + "5.71k\n", + "•\n", + "116\n", + "FreedomIntelligence/medical-o1-reasoning-SFT\n", + "Updated\n", + "Feb 22\n", + "•\n", + "27k\n", + "•\n", + "566\n", + "facebook/collaborative_agent_bench\n", + "Updated\n", + "8 days ago\n", + "•\n", + "83\n", + "•\n", + "47\n", + "a-m-team/AM-DeepSeek-R1-Distilled-1.4M\n", + "Updated\n", + "about 18 hours ago\n", + "•\n", + "2.26k\n", + "•\n", + "68\n", + "Browse 250k+ datasets\n", + "The Home of Machine Learning\n", + "Create, discover and collaborate on ML better.\n", + "The collaboration platform\n", + "Host and collaborate on unlimited public models, datasets and applications.\n", + "Move faster\n", + "With the HF Open source stack.\n", + "Explore all modalities\n", + "Text, image, video, audio or even 3D.\n", + "Build your portfolio\n", + "Share your work with the world and build your ML profile.\n", + "Sign Up\n", + "Accelerate your ML\n", + "We provide paid Compute and Enterprise solutions.\n", + "Compute\n", + "Deploy on optimized\n", + "Inference Endpoints\n", + "or update your\n", + "Spaces applications\n", + "to a GPU in a few clicks.\n", + "View pricing\n", + "Starting at $0.60/hour for GPU\n", + "Enterprise\n", + "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", + "\t\t\tdedicated support.\n", + "Getting started\n", + "Starting at $20/user/month\n", + "Single Sign-On\n", + "Regions\n", + "Priority Support\n", + "Audit Logs\n", + "Resource Groups\n", + "Private Datasets Viewer\n", + "More than 50,000 organizations are using Hugging Face\n", + "Ai2\n", + "Enterprise\n", + "non-profit\n", + "•\n", + "396 models\n", + "•\n", + "2.97k followers\n", + "AI at Meta\n", + "Enterprise\n", + "company\n", + "•\n", + "2.07k models\n", + "•\n", + "5.27k followers\n", + "Amazon\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "2.91k followers\n", + "Google\n", + "company\n", + "•\n", + "974 models\n", + "•\n", + "10.6k followers\n", + "Intel\n", + "company\n", + "•\n", + "219 models\n", + "•\n", + "2.37k followers\n", + "Microsoft\n", + "company\n", + "•\n", + "365 models\n", + "•\n", + "10.7k followers\n", + "Grammarly\n", + "Enterprise\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "145 followers\n", + "Writer\n", + "Enterprise\n", + "company\n", + "•\n", + "21 models\n", + "•\n", + "253 followers\n", + "Our Open Source\n", + "We are building the foundation of ML tooling with the community.\n", + "Transformers\n", + "142,056\n", + "State-of-the-art ML for PyTorch, TensorFlow, JAX\n", + "Diffusers\n", + "28,292\n", + "State-of-the-art Diffusion models in PyTorch\n", + "Safetensors\n", + "3,189\n", + "Safe way to store/distribute neural network weights\n", + "Hub Python Library\n", + "2,469\n", + "Python client to interact with the Hugging Face Hub\n", + "Tokenizers\n", + "9,538\n", + "Fast tokenizers optimized for research & production\n", + "TRL\n", + "12,887\n", + "Train transformers LMs with reinforcement learning\n", + "Transformers.js\n", + "13,301\n", + "State-of-the-art ML running directly in your browser\n", + "smolagents\n", + "15,893\n", + "Smol library to build great agents in Python\n", + "PEFT\n", + "17,927\n", + "Parameter-efficient finetuning for large language models\n", + "Datasets\n", + "19,888\n", + "Access & share datasets for any ML tasks\n", + "Text Generation Inference\n", + "9,937\n", + "Serve language models with TGI optimized toolkit\n", + "Accelerate\n", + "8,542\n", + "Train PyTorch models with multi-GPU, TPU, mixed precision\n", + "System theme\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "Company page\n", + "Webpage Title:\n", + "Brand assets - Hugging Face\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Hugging Face · Brand assets\n", + "HF Logos\n", + ".svg\n", + ".png\n", + ".ai\n", + ".svg\n", + ".png\n", + ".ai\n", + ".svg\n", + ".png\n", + ".ai\n", + "HF Colors\n", + "#FFD21E\n", + "#FF9D00\n", + "#6B7280\n", + "HF Bio\n", + "Hugging Face is the collaboration platform for the machine learning community.\n", + "\n", + "The Hugging Face Hub works as a central place where anyone can share, explore, discover, and experiment with open-source ML. HF empowers the next generation of machine learning engineers, scientists, and end users to learn, collaborate and share their work to build an open and ethical AI future together.\n", + "\n", + "With the fast-growing community, some of the most used open-source ML libraries and tools, and a talented science team exploring the edge of tech, Hugging Face is at the heart of the AI revolution.\n", + "Copy to clipboard\n", + "HF Universe\n", + "Find other assets available for use from the Hugging Face brand universe\n", + "here\n", + ".\n", + "System theme\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "Allenai team member (likely CEO)\n", + "Webpage Title:\n", + "Hugging Face · GitHub\n", + "Webpage Contents:\n", + "Skip to content\n", + "Navigation Menu\n", + "Toggle navigation\n", + "Sign in\n", + "huggingface\n", + "Product\n", + "GitHub Copilot\n", + "Write better code with AI\n", + "Security\n", + "Find and fix vulnerabilities\n", + "Actions\n", + "Automate any workflow\n", + "Codespaces\n", + "Instant dev environments\n", + "Issues\n", + "Plan and track work\n", + "Code Review\n", + "Manage code changes\n", + "Discussions\n", + "Collaborate outside of code\n", + "Code Search\n", + "Find more, search less\n", + "Explore\n", + "All features\n", + "Documentation\n", + "GitHub Skills\n", + "Blog\n", + "Solutions\n", + "By company size\n", + "Enterprises\n", + "Small and medium teams\n", + "Startups\n", + "Nonprofits\n", + "By use case\n", + "DevSecOps\n", + "DevOps\n", + "CI/CD\n", + "View all use cases\n", + "By industry\n", + "Healthcare\n", + "Financial services\n", + "Manufacturing\n", + "Government\n", + "View all industries\n", + "View all solutions\n", + "Resources\n", + "Topics\n", + "AI\n", + "DevOps\n", + "Security\n", + "Software Development\n", + "View all\n", + "Explore\n", + "Learning Pathways\n", + "Events & Webinars\n", + "Ebooks & Whitepapers\n", + "Customer Stories\n", + "Partners\n", + "Executive Insights\n", + "Open Source\n", + "GitHub Sponsors\n", + "Fund open source developers\n", + "The ReadME Project\n", + "GitHub community articles\n", + "Repositories\n", + "Topics\n", + "Trending\n", + "Collections\n", + "Enterprise\n", + "Enterprise platform\n", + "AI-powered developer platform\n", + "Available add-ons\n", + "Advanced Security\n", + "Enterprise-grade security features\n", + "Copilot for business\n", + "Enterprise-grade AI features\n", + "Premium Support\n", + "Enterprise-grade 24/7 support\n", + "Pricing\n", + "Search or jump to...\n", + "Search code, repositories, users, issues, pull requests...\n", + "Search\n", + "Clear\n", + "Search syntax tips\n", + "Provide feedback\n", + "We read every piece of feedback, and take your input very seriously.\n", + "Include my email address so I can be contacted\n", + "Cancel\n", + "Submit feedback\n", + "Saved searches\n", + "Use saved searches to filter your results more quickly\n", + "Cancel\n", + "Create saved search\n", + "Sign in\n", + "Sign up\n", + "Reseting focus\n", + "You signed in with another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You signed out in another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You switched accounts on another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "Dismiss alert\n", + "Hugging Face\n", + "The AI community building the future.\n", + "Verified\n", + "We've verified that the organization\n", + "huggingface\n", + "controls the domain:\n", + "huggingface.co\n", + "Learn more about verified organizations\n", + "46.8k\n", + "followers\n", + "NYC + Paris\n", + "https://huggingface.co/\n", + "X\n", + "@huggingface\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "Pinned\n", + "Loading\n", + "transformers\n", + "transformers\n", + "Public\n", + "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", + "Python\n", + "142k\n", + "28.4k\n", + "diffusers\n", + "diffusers\n", + "Public\n", + "🤗 Diffusers: State-of-the-art diffusion models for image, video, and audio generation in PyTorch and FLAX.\n", + "Python\n", + "28.3k\n", + "5.8k\n", + "datasets\n", + "datasets\n", + "Public\n", + "🤗 The largest hub of ready-to-use datasets for ML models with fast, easy-to-use and efficient data manipulation tools\n", + "Python\n", + "19.9k\n", + "2.8k\n", + "peft\n", + "peft\n", + "Public\n", + "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.\n", + "Python\n", + "17.9k\n", + "1.8k\n", + "accelerate\n", + "accelerate\n", + "Public\n", + "🚀 A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support\n", + "Python\n", + "8.5k\n", + "1.1k\n", + "optimum\n", + "optimum\n", + "Public\n", + "🚀 Accelerate inference and training of 🤗 Transformers, Diffusers, TIMM and Sentence Transformers with easy to use hardware optimization tools\n", + "Python\n", + "2.8k\n", + "516\n", + "Repositories\n", + "Loading\n", + "Type\n", + "Select type\n", + "Forks\n", + "Archived\n", + "Mirrors\n", + "Templates\n", + "Language\n", + "Select language\n", + "All\n", + "C\n", + "C#\n", + "C++\n", + "Cuda\n", + "Dockerfile\n", + "Go\n", + "Handlebars\n", + "HTML\n", + "Java\n", + "JavaScript\n", + "Jupyter Notebook\n", + "Kotlin\n", + "Lua\n", + "MDX\n", + "Mustache\n", + "Nix\n", + "Python\n", + "Rust\n", + "Shell\n", + "Smarty\n", + "Swift\n", + "TypeScript\n", + "Sort\n", + "Select order\n", + "Last updated\n", + "Name\n", + "Stars\n", + "Showing 10 of 301 repositories\n", + "kernel-builder\n", + "Public\n", + "👷 Build compute kernels\n", + "huggingface/kernel-builder’s past year of commit activity\n", + "Nix\n", + "22\n", + "6\n", + "7\n", + "4\n", + "Updated\n", + "Mar 28, 2025\n", + "lerobot\n", + "Public\n", + "🤗 LeRobot: Making AI for Robotics more accessible with end-to-end learning\n", + "huggingface/lerobot’s past year of commit activity\n", + "Python\n", + "11,165\n", + "Apache-2.0\n", + "1,225\n", + "153\n", + "118\n", + "Updated\n", + "Mar 28, 2025\n", + "smolagents\n", + "Public\n", + "🤗 smolagents: a barebones library for agents that think in python code.\n", + "huggingface/smolagents’s past year of commit activity\n", + "Python\n", + "15,893\n", + "Apache-2.0\n", + "1,404\n", + "105\n", + "99\n", + "Updated\n", + "Mar 28, 2025\n", + "notebooks\n", + "Public\n", + "Notebooks using the Hugging Face libraries 🤗\n", + "huggingface/notebooks’s past year of commit activity\n", + "Jupyter Notebook\n", + "3,973\n", + "Apache-2.0\n", + "1,630\n", + "131\n", + "69\n", + "Updated\n", + "Mar 28, 2025\n", + "meshgen\n", + "Public\n", + "A blender addon for generating meshes with AI\n", + "huggingface/meshgen’s past year of commit activity\n", + "Python\n", + "520\n", + "MIT\n", + "29\n", + "9\n", + "0\n", + "Updated\n", + "Mar 28, 2025\n", + "huggingface_hub\n", + "Public\n", + "The official Python client for the Huggingface Hub.\n", + "huggingface/huggingface_hub’s past year of commit activity\n", + "Python\n", + "2,469\n", + "Apache-2.0\n", + "658\n", + "146\n", + "(4 issues need help)\n", + "19\n", + "Updated\n", + "Mar 28, 2025\n", + "lighteval\n", + "Public\n", + "Lighteval is your all-in-one toolkit for evaluating LLMs across multiple backends\n", + "huggingface/lighteval’s past year of commit activity\n", + "Python\n", + "1,349\n", + "MIT\n", + "211\n", + "90\n", + "(1 issue needs help)\n", + "33\n", + "Updated\n", + "Mar 28, 2025\n", + "trl\n", + "Public\n", + "Train transformer language models with reinforcement learning.\n", + "huggingface/trl’s past year of commit activity\n", + "Python\n", + "12,887\n", + "Apache-2.0\n", + "1,734\n", + "329\n", + "73\n", + "Updated\n", + "Mar 27, 2025\n", + "optimum-neuron\n", + "Public\n", + "Easy, fast and very cheap training and inference on AWS Trainium and Inferentia chips.\n", + "huggingface/optimum-neuron’s past year of commit activity\n", + "Jupyter Notebook\n", + "222\n", + "Apache-2.0\n", + "71\n", + "19\n", + "8\n", + "Updated\n", + "Mar 27, 2025\n", + "hub-docs\n", + "Public\n", + "Docs of the Hugging Face Hub\n", + "huggingface/hub-docs’s past year of commit activity\n", + "Handlebars\n", + "363\n", + "Apache-2.0\n", + "286\n", + "100\n", + "35\n", + "Updated\n", + "Mar 27, 2025\n", + "View all repositories\n", + "People\n", + "View all\n", + "Top languages\n", + "Loading…\n", + "Most used topics\n", + "Loading…\n", + "Footer\n", + "© 2025 GitHub, Inc.\n", + "Footer navigation\n", + "Terms\n", + "Privacy\n", + "Security\n", + "Status\n", + "Docs\n", + "Contact\n", + "Manage cookies\n", + "Do not share my personal information\n", + "You can’t perform that action at this time.\n", + "\n", + "\n", + "\n", + "Microsoft partnership\n", + "Webpage Title:\n", + "Hugging Face | LinkedIn\n", + "Webpage Contents:\n", + "Skip to main content\n", + "LinkedIn\n", + "Articles\n", + "People\n", + "Learning\n", + "Jobs\n", + "Games\n", + "Get the app\n", + "Join now\n", + "Sign in\n", + "Hugging Face\n", + "Software Development\n", + "The AI community building the future.\n", + "See jobs\n", + "Follow\n", + "View all 513 employees\n", + "Report this company\n", + "About us\n", + "The AI community building the future.\n", + "Website\n", + "https://huggingface.co\n", + "External link for Hugging Face\n", + "Industry\n", + "Software Development\n", + "Company size\n", + "51-200 employees\n", + "Type\n", + "Privately Held\n", + "Founded\n", + "2016\n", + "Specialties\n", + "machine learning, natural language processing, and deep learning\n", + "Products\n", + "Hugging Face\n", + "Hugging Face\n", + "Natural Language Processing (NLP) Software\n", + "We‚Äôre on a journey to solve and democratize artificial intelligence through natural language.\n", + "Locations\n", + "Primary\n", + "Get directions\n", + "Paris, FR\n", + "Get directions\n", + "Employees at Hugging Face\n", + "Ludovic Huraux\n", + "Bassem ASSEH\n", + "Rajat Arya\n", + "Tech Lead & Software Engineer @ HF | prev: co-founder XetHub, Apple, Turi, AWS, Microsoft\n", + "Jeff Boudier\n", + "Product + Growth at Hugging Face\n", + "See all employees\n", + "Updates\n", + "Hugging Face\n", + "reposted this\n", + "Freddy Boulton\n", + "Software Engineer @ ü§ó\n", + "14h\n", + "Report this post\n", + "Generate lifelike audio in real-time without a GPU! üöÄ\n", + "\n", + "Check out orpheus-cpp: a\n", + "llama.cpp\n", + "port of orpheus 3b text-to-speech model with built-in support for sync and async streaming.\n", + "\n", + "ùöôùöíùöô ùöíùöóùöúùöùùöäùöïùöï ùöòùöõùöôùöëùöéùöûùöú-ùöåùöôùöô\n", + "ùöôùö¢ùöùùöëùöòùöó -ùöñ ùöòùöõùöôùöëùöéùöûùöú_ùöåùöôùöô\n", + "\n", + "Project code:\n", + "https://lnkd.in/ekPpN9mc\n", + "‚Ķmore\n", + "227\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Report this post\n", + "We just turned the humble dataframe into a superweapon‚ö°Ô∏è\n", + "dashboarding will never be the same!! üìä\n", + "\n", + "new Gradio Dataframe has:\n", + "- multi-cell selection\n", + "- column pinning\n", + "- search + filtering\n", + "- fullscreen mode\n", + "- accessibility upgrades, and more\n", + "‚Ķmore\n", + "88\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Merve Noyan\n", + "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", + "1d\n", + "Report this post\n", + "is your vision LM in prod even safe? üëÄ\n", + "\n", + "ShieldGemma 2 is the first ever safety model for multimodal vision LMs in production by\n", + "Google DeepMind\n", + ", came with Gemma 3 üî•\n", + "\n", + "I saw confusion around how to use it, so I put together a notebook and a demo, find it in the comments üí¨\n", + "364\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Merve Noyan\n", + "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", + "1d\n", + "Report this post\n", + "is your vision LM in prod even safe? üëÄ\n", + "\n", + "ShieldGemma 2 is the first ever safety model for multimodal vision LMs in production by\n", + "Google DeepMind\n", + ", came with Gemma 3 üî•\n", + "\n", + "I saw confusion around how to use it, so I put together a notebook and a demo, find it in the comments üí¨\n", + "364\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Sergio Paniego Blanco\n", + "ML Engineer @ Hugging Face ü§ó | AI PhD | Google Summer of Code '18-'24\n", + "2d\n", + "Report this post\n", + "The Bonus Unit 2, \"AI Agent Observability & Evaluation,\" is now live on our\n", + "Hugging Face\n", + "agents course! üéì\n", + "\n", + "You'll learn to:\n", + "üîß Instrument agents with OpenTelemetry\n", + "üìä Track token usage, latency & errors\n", + "üìà Evaluate with LLM-as-a-judge\n", + "üìö Benchmark with GSM8K\n", + "\n", + "üëâ Check out the course here:\n", + "https://lnkd.in/d2jiTx6j\n", + "199\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Edited\n", + "Report this post\n", + "Create Infinite Photographs of You with InfiniteYou-Flux!\n", + "\n", + "Flexible photo recreation that better preserves identity compared to current solutions like Pulid, IP Adapter, etc. üî• üí™ \n", + "\n", + "Current full-performance bf16 model inference requires a peak VRAM of around 43 GB.\n", + "\n", + "You can build InfU on your own hardware:\n", + "https://lnkd.in/g9dc_vVh\n", + "Or Play for free on\n", + "Hugging Face\n", + ":\n", + "https://lnkd.in/gzF7rikZ\n", + "147\n", + "5 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Edited\n", + "Report this post\n", + "ü§Ø Generate high-quality podcasts with the voices you want!\n", + "\n", + "MoonCast is an open sourced, multi-lingual, and zeroshot model.\n", + "\n", + "You just need to upload two sample voices, create a script, and that's it, run the model--You get a üî• notebooklm-like podcast.\n", + "\n", + "Model and App are released on\n", + "Hugging Face\n", + ":\n", + "https://lnkd.in/gUk2EssP\n", + "119\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Daniel Vila Suero\n", + "Building data tools @ Hugging Face ü§ó\n", + "2d\n", + "Edited\n", + "Report this post\n", + "üî• Big news for GPU poors: thanks to\n", + "Hyperbolic\n", + "and\n", + "Fireworks AI\n", + ", you can run¬†\n", + "DeepSeek AI\n", + "'s¬†new model using Hugging Face Inference Providers. What has changed since V3? Here's my quick home experiment üëá \n", + "\n", + "DeepSeek silently dropped an update to V3 yesterday. Benchmark results are available, showing significant improvements over V3. \n", + "\n", + "Still, it is always a good idea to run new models on data you care about and see more detailed, fine-grained results.\n", + "\n", + "Now that we can all run these new models from Day 0 with no GPUs required, I wanted to share my approach with an example I created this morning:\n", + "\n", + "1. I got a sample from the LIMA dataset (containing high-quality general instructions).\n", + "2. Run the instructions with V3 and the new version V3-0324.\n", + "3. Define and run a simple judge with Llama3.3-70B to compare the model responses.\n", + "4. Push the dataset and pipeline so you can check and run similar experiments! (see first comment)\n", + "5. Extracted the results with\n", + "Hugging Face\n", + "Data Studio.\n", + "\n", + "Results summary\n", + "- LIMA is not very challenging, but it is still interesting to see the differences between the two models.\n", + "- A majority of Ties indicate that both models are close for this domain and task.\n", + "- But still, V3-0324 consistently wins over V3 (33 times vs 6 times).\n", + "\n", + "As usual, the dataset, prompts, and pipeline are open-source (see first comment).\n", + "\n", + "What other experiments you'd like to see?\n", + "191\n", + "4 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Report this post\n", + "StarVector is a multimodal vision-language model for generating SVG (Scalable Vector Graphics). üëá \n", + "\n", + "It can be used to perform image2SVG and text2SVG generation. Live demo shows how the image generation is treated similar to a code generation task, using the power of StarVector multimodal VLM! ü§© \n", + "\n", + "üöÄ Play with the app on Huggingface:\n", + "https://lnkd.in/gCzdEbvj\n", + "ü•≥ If you want to build the model locally with a gradio app:\n", + "https://lnkd.in/gDzCpdDN\n", + "‚Ķmore\n", + "1,365\n", + "39 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Giada Pistilli\n", + "Principal Ethicist at Hugging Face | PhD in Philosophy at Sorbonne Universit√©\n", + "3d\n", + "Report this post\n", + "Excited to share our latest piece on the double-edged sword of AI agents published in\n", + "MIT Technology Review\n", + "! ü§ñ It builds on our research paper that's been making waves lately -- pretty cool to see all the attention it's getting!\n", + "\n", + "As these systems move beyond chat windows to navigate applications and execute complex tasks independently, we need to ask ourselves: how much control are we willing to surrender, and at what cost?\n", + "\n", + "In our recent op-ed,\n", + "Margaret Mitchell\n", + ",\n", + "Avijit Ghosh, PhD\n", + ",\n", + "Dr. Sasha Luccioni\n", + ", and I explore why the very feature being sold (reduced human oversight) is actually the primary vulnerability. When AI systems can control multiple information sources simultaneously, the potential for harm explodes exponentially. \n", + "\n", + "We imagine that \"It wasn't me‚Äîit was my agent!!\" will soon be a common refrain to excuse bad outcomes.\n", + "\n", + "The benefits of AI agents are undeniable, from assisting people with mobility challenges to coordinating emergency responses. But these benefits don't require surrendering complete human control.\n", + "\n", + "At\n", + "Hugging Face\n", + ", we're developing frameworks like smolagents that prioritize transparency and appropriate human oversight. Because human judgment, with all its imperfections, remains the fundamental component in ensuring these systems serve rather than subvert our interests.\n", + "123\n", + "15 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Join now to see what you are missing\n", + "Find people you know at Hugging Face\n", + "Browse recommended jobs for you\n", + "View all updates, news, and articles\n", + "Join now\n", + "Similar pages\n", + "Anthropic\n", + "Research Services\n", + "Mistral AI\n", + "Technology, Information and Internet\n", + "Paris, France\n", + "Perplexity\n", + "Software Development\n", + "San Francisco, California\n", + "OpenAI\n", + "Research Services\n", + "San Francisco, CA\n", + "LangChain\n", + "Technology, Information and Internet\n", + "Generative AI\n", + "Technology, Information and Internet\n", + "DeepLearning.AI\n", + "Software Development\n", + "Palo Alto, California\n", + "Google DeepMind\n", + "Research Services\n", + "London, London\n", + "LlamaIndex\n", + "Technology, Information and Internet\n", + "San Francisco, California\n", + "Cohere\n", + "Software Development\n", + "Toronto, Ontario\n", + "Show more similar pages\n", + "Show fewer similar pages\n", + "Browse jobs\n", + "Engineer jobs\n", + "555,845 open jobs\n", + "Machine Learning Engineer jobs\n", + "148,937 open jobs\n", + "Scientist jobs\n", + "48,969 open jobs\n", + "Software Engineer jobs\n", + "300,699 open jobs\n", + "Analyst jobs\n", + "694,057 open jobs\n", + "Intern jobs\n", + "71,196 open jobs\n", + "Developer jobs\n", + "258,935 open jobs\n", + "Manager jobs\n", + "1,880,925 open jobs\n", + "Product Manager jobs\n", + "199,941 open jobs\n", + "Director jobs\n", + "1,220,357 open jobs\n", + "Python Developer jobs\n", + "46,642 open jobs\n", + "Data Scientist jobs\n", + "264,158 open jobs\n", + "Data Analyst jobs\n", + "329,009 open jobs\n", + "Senior Software Engineer jobs\n", + "78,145 open jobs\n", + "Project Manager jobs\n", + "253,048 open jobs\n", + "Researcher jobs\n", + "195,654 open jobs\n", + "Associate jobs\n", + "1,091,945 open jobs\n", + "Data Engineer jobs\n", + "192,126 open jobs\n", + "Vice President jobs\n", + "235,270 open jobs\n", + "Specialist jobs\n", + "768,666 open jobs\n", + "Show more jobs like this\n", + "Show fewer jobs like this\n", + "Funding\n", + "Hugging Face\n", + "8 total rounds\n", + "Last Round\n", + "Series unknown\n", + "Sep 1, 2024\n", + "External Crunchbase Link for last round of funding\n", + "See more info on\n", + "crunchbase\n", + "More searches\n", + "More searches\n", + "Engineer jobs\n", + "Scientist jobs\n", + "Machine Learning Engineer jobs\n", + "Software Engineer jobs\n", + "Intern jobs\n", + "Developer jobs\n", + "Analyst jobs\n", + "Manager jobs\n", + "Senior Software Engineer jobs\n", + "Data Scientist jobs\n", + "Researcher jobs\n", + "Product Manager jobs\n", + "Director jobs\n", + "Associate jobs\n", + "Intelligence Specialist jobs\n", + "Data Analyst jobs\n", + "Data Science Specialist jobs\n", + "Python Developer jobs\n", + "Quantitative Analyst jobs\n", + "Project Manager jobs\n", + "Account Executive jobs\n", + "Specialist jobs\n", + "Data Engineer jobs\n", + "Designer jobs\n", + "Quantitative Researcher jobs\n", + "Consultant jobs\n", + "Solutions Architect jobs\n", + "Vice President jobs\n", + "User Experience Designer jobs\n", + "Head jobs\n", + "Full Stack Engineer jobs\n", + "Engineering Manager jobs\n", + "Software Engineer Intern jobs\n", + "Junior Software Engineer jobs\n", + "Software Intern jobs\n", + "Product Designer jobs\n", + "Solutions Engineer jobs\n", + "Staff Software Engineer jobs\n", + "Program Manager jobs\n", + "Senior Scientist jobs\n", + "Writer jobs\n", + "Research Intern jobs\n", + "Senior Product Manager jobs\n", + "Summer Intern jobs\n", + "Account Manager jobs\n", + "Recruiter jobs\n", + "Lead jobs\n", + "Research Engineer jobs\n", + "Computer Science Intern jobs\n", + "Platform Engineer jobs\n", + "Junior Developer jobs\n", + "Android Developer jobs\n", + "User Experience Researcher jobs\n", + "Java Software Engineer jobs\n", + "Site Reliability Engineer jobs\n", + "Graduate jobs\n", + "Software Engineering Manager jobs\n", + "Representative jobs\n", + "Business Development Specialist jobs\n", + "Computer Engineer jobs\n", + "LinkedIn\n", + "© 2025\n", + "About\n", + "Accessibility\n", + "User Agreement\n", + "Privacy Policy\n", + "Cookie Policy\n", + "Copyright Policy\n", + "Brand Policy\n", + "Guest Controls\n", + "Community Guidelines\n", + "ÿߟÑÿπÿ±ÿ®Ÿäÿ© (Arabic)\n", + "‡¶¨‡¶æ‡¶Ç‡¶≤‡¶æ (Bangla)\n", + "ƒåe≈°tina (Czech)\n", + "Dansk (Danish)\n", + "Deutsch (German)\n", + "ŒïŒªŒªŒ∑ŒΩŒπŒ∫Œ¨ (Greek)\n", + "English (English)\n", + "Espa√±ol (Spanish)\n", + "ŸÅÿßÿ±ÿ≥€å (Persian)\n", + "Suomi (Finnish)\n", + "Fran√ßais (French)\n", + "‡§π‡§ø‡§Ç‡§¶‡•Ä (Hindi)\n", + "Magyar (Hungarian)\n", + "Bahasa Indonesia (Indonesian)\n", + "Italiano (Italian)\n", + "◊¢◊ë◊®◊ô◊™ (Hebrew)\n", + "Êó•Êú¨Ë™û (Japanese)\n", + "Ìïú͵≠Ïñ¥ (Korean)\n", + "‡§Æ‡§∞‡§æ‡§†‡•Ä (Marathi)\n", + "Bahasa Malaysia (Malay)\n", + "Nederlands (Dutch)\n", + "Norsk (Norwegian)\n", + "‡®™‡©∞‡®ú‡®æ‡®¨‡©Ä (Punjabi)\n", + "Polski (Polish)\n", + "Portugu√™s (Portuguese)\n", + "Rom√¢nƒÉ (Romanian)\n", + "–†—É—Å—Å–∫–∏–π (Russian)\n", + "Svenska (Swedish)\n", + "‡∞§‡±Ü‡∞≤‡±Å‡∞ó‡±Å (Telugu)\n", + "‡∏†‡∏≤‡∏©‡∏≤‡πч∏ó‡∏¢ (Thai)\n", + "Tagalog (Tagalog)\n", + "T√ºrk√ße (Turkish)\n", + "–£–∫—Ä–∞—ó–Ω—Å—å–∫–∞ (Ukrainian)\n", + "Ti·∫øng Vi·ªát (Vietnamese)\n", + "ÁÆÄ‰Ωì‰∏≠Êñá (Chinese (Simplified))\n", + "Ê≠£È´î‰∏≠Êñá (Chinese (Traditional))\n", + "Language\n", + "Agree & Join LinkedIn\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "Sign in to see who you already know at Hugging Face\n", + "Sign in\n", + "Welcome back\n", + "Email or phone\n", + "Password\n", + "Show\n", + "Forgot password?\n", + "Sign in\n", + "or\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "New to LinkedIn?\n", + "Join now\n", + "or\n", + "New to LinkedIn?\n", + "Join now\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "LinkedIn\n", + "LinkedIn is better on the app\n", + "Don‚Äôt have the app? Get it in the Microsoft Store.\n", + "Open the app\n", + "\n", + "\n", + "\n", + "Intel partnership\n", + "Webpage Title:\n", + "Hugging Face | LinkedIn\n", + "Webpage Contents:\n", + "Skip to main content\n", + "LinkedIn\n", + "Articles\n", + "People\n", + "Learning\n", + "Jobs\n", + "Games\n", + "Get the app\n", + "Join now\n", + "Sign in\n", + "Hugging Face\n", + "Software Development\n", + "The AI community building the future.\n", + "See jobs\n", + "Follow\n", + "Discover all 513 employees\n", + "Report this company\n", + "About us\n", + "The AI community building the future.\n", + "Website\n", + "https://huggingface.co\n", + "External link for Hugging Face\n", + "Industry\n", + "Software Development\n", + "Company size\n", + "51-200 employees\n", + "Type\n", + "Privately Held\n", + "Founded\n", + "2016\n", + "Specialties\n", + "machine learning, natural language processing, and deep learning\n", + "Products\n", + "Hugging Face\n", + "Hugging Face\n", + "Natural Language Processing (NLP) Software\n", + "We‚Äôre on a journey to solve and democratize artificial intelligence through natural language.\n", + "Locations\n", + "Primary\n", + "Get directions\n", + "Paris, FR\n", + "Get directions\n", + "Employees at Hugging Face\n", + "Ludovic Huraux\n", + "Bassem ASSEH\n", + "Rajat Arya\n", + "Tech Lead & Software Engineer @ HF | prev: co-founder XetHub, Apple, Turi, AWS, Microsoft\n", + "Jeff Boudier\n", + "Product + Growth at Hugging Face\n", + "See all employees\n", + "Updates\n", + "Hugging Face\n", + "reposted this\n", + "Freddy Boulton\n", + "Software Engineer @ ü§ó\n", + "14h\n", + "Report this post\n", + "Generate lifelike audio in real-time without a GPU! üöÄ\n", + "\n", + "Check out orpheus-cpp: a\n", + "llama.cpp\n", + "port of orpheus 3b text-to-speech model with built-in support for sync and async streaming.\n", + "\n", + "ùöôùöíùöô ùöíùöóùöúùöùùöäùöïùöï ùöòùöõùöôùöëùöéùöûùöú-ùöåùöôùöô\n", + "ùöôùö¢ùöùùöëùöòùöó -ùöñ ùöòùöõùöôùöëùöéùöûùöú_ùöåùöôùöô\n", + "\n", + "Project code:\n", + "https://lnkd.in/ekPpN9mc\n", + "‚Ķmore\n", + "227\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Report this post\n", + "We just turned the humble dataframe into a superweapon‚ö°Ô∏è\n", + "dashboarding will never be the same!! üìä\n", + "\n", + "new Gradio Dataframe has:\n", + "- multi-cell selection\n", + "- column pinning\n", + "- search + filtering\n", + "- fullscreen mode\n", + "- accessibility upgrades, and more\n", + "‚Ķmore\n", + "88\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Merve Noyan\n", + "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", + "1d\n", + "Report this post\n", + "is your vision LM in prod even safe? üëÄ\n", + "\n", + "ShieldGemma 2 is the first ever safety model for multimodal vision LMs in production by\n", + "Google DeepMind\n", + ", came with Gemma 3 üî•\n", + "\n", + "I saw confusion around how to use it, so I put together a notebook and a demo, find it in the comments üí¨\n", + "364\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Merve Noyan\n", + "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", + "1d\n", + "Report this post\n", + "is your vision LM in prod even safe? üëÄ\n", + "\n", + "ShieldGemma 2 is the first ever safety model for multimodal vision LMs in production by\n", + "Google DeepMind\n", + ", came with Gemma 3 üî•\n", + "\n", + "I saw confusion around how to use it, so I put together a notebook and a demo, find it in the comments üí¨\n", + "364\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Sergio Paniego Blanco\n", + "ML Engineer @ Hugging Face ü§ó | AI PhD | Google Summer of Code '18-'24\n", + "2d\n", + "Report this post\n", + "The Bonus Unit 2, \"AI Agent Observability & Evaluation,\" is now live on our\n", + "Hugging Face\n", + "agents course! üéì\n", + "\n", + "You'll learn to:\n", + "üîß Instrument agents with OpenTelemetry\n", + "üìä Track token usage, latency & errors\n", + "üìà Evaluate with LLM-as-a-judge\n", + "üìö Benchmark with GSM8K\n", + "\n", + "üëâ Check out the course here:\n", + "https://lnkd.in/d2jiTx6j\n", + "199\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Edited\n", + "Report this post\n", + "Create Infinite Photographs of You with InfiniteYou-Flux!\n", + "\n", + "Flexible photo recreation that better preserves identity compared to current solutions like Pulid, IP Adapter, etc. üî• üí™ \n", + "\n", + "Current full-performance bf16 model inference requires a peak VRAM of around 43 GB.\n", + "\n", + "You can build InfU on your own hardware:\n", + "https://lnkd.in/g9dc_vVh\n", + "Or Play for free on\n", + "Hugging Face\n", + ":\n", + "https://lnkd.in/gzF7rikZ\n", + "147\n", + "5 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Edited\n", + "Report this post\n", + "ü§Ø Generate high-quality podcasts with the voices you want!\n", + "\n", + "MoonCast is an open sourced, multi-lingual, and zeroshot model.\n", + "\n", + "You just need to upload two sample voices, create a script, and that's it, run the model--You get a üî• notebooklm-like podcast.\n", + "\n", + "Model and App are released on\n", + "Hugging Face\n", + ":\n", + "https://lnkd.in/gUk2EssP\n", + "119\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Daniel Vila Suero\n", + "Building data tools @ Hugging Face ü§ó\n", + "2d\n", + "Edited\n", + "Report this post\n", + "üî• Big news for GPU poors: thanks to\n", + "Hyperbolic\n", + "and\n", + "Fireworks AI\n", + ", you can run¬†\n", + "DeepSeek AI\n", + "'s¬†new model using Hugging Face Inference Providers. What has changed since V3? Here's my quick home experiment üëá \n", + "\n", + "DeepSeek silently dropped an update to V3 yesterday. Benchmark results are available, showing significant improvements over V3. \n", + "\n", + "Still, it is always a good idea to run new models on data you care about and see more detailed, fine-grained results.\n", + "\n", + "Now that we can all run these new models from Day 0 with no GPUs required, I wanted to share my approach with an example I created this morning:\n", + "\n", + "1. I got a sample from the LIMA dataset (containing high-quality general instructions).\n", + "2. Run the instructions with V3 and the new version V3-0324.\n", + "3. Define and run a simple judge with Llama3.3-70B to compare the model responses.\n", + "4. Push the dataset and pipeline so you can check and run similar experiments! (see first comment)\n", + "5. Extracted the results with\n", + "Hugging Face\n", + "Data Studio.\n", + "\n", + "Results summary\n", + "- LIMA is not very challenging, but it is still interesting to see the differences between the two models.\n", + "- A majority of Ties indicate that both models are close for this domain and task.\n", + "- But still, V3-0324 consistently wins over V3 (33 times vs 6 times).\n", + "\n", + "As usual, the dataset, prompts, and pipeline are open-source (see first comment).\n", + "\n", + "What other experiments you'd like to see?\n", + "191\n", + "4 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Report this post\n", + "StarVector is a multimodal vision-language model for generating SVG (Scalable Vector Graphics). üëá \n", + "\n", + "It can be used to perform image2SVG and text2SVG generation. Live demo shows how the image generation is treated similar to a code generation task, using the power of StarVector multimodal VLM! ü§© \n", + "\n", + "üöÄ Play with the app on Huggingface:\n", + "https://lnkd.in/gCzdEbvj\n", + "ü•≥ If you want to build the model locally with a gradio app:\n", + "https://lnkd.in/gDzCpdDN\n", + "‚Ķmore\n", + "1,365\n", + "39 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Giada Pistilli\n", + "Principal Ethicist at Hugging Face | PhD in Philosophy at Sorbonne Universit√©\n", + "3d\n", + "Report this post\n", + "Excited to share our latest piece on the double-edged sword of AI agents published in\n", + "MIT Technology Review\n", + "! ü§ñ It builds on our research paper that's been making waves lately -- pretty cool to see all the attention it's getting!\n", + "\n", + "As these systems move beyond chat windows to navigate applications and execute complex tasks independently, we need to ask ourselves: how much control are we willing to surrender, and at what cost?\n", + "\n", + "In our recent op-ed,\n", + "Margaret Mitchell\n", + ",\n", + "Avijit Ghosh, PhD\n", + ",\n", + "Dr. Sasha Luccioni\n", + ", and I explore why the very feature being sold (reduced human oversight) is actually the primary vulnerability. When AI systems can control multiple information sources simultaneously, the potential for harm explodes exponentially. \n", + "\n", + "We imagine that \"It wasn't me‚Äîit was my agent!!\" will soon be a common refrain to excuse bad outcomes.\n", + "\n", + "The benefits of AI agents are undeniable, from assisting people with mobility challenges to coordinating emergency responses. But these benefits don't require surrendering complete human control.\n", + "\n", + "At\n", + "Hugging Face\n", + ", we're developing frameworks like smolagents that prioritize transparency and appropriate human oversight. Because human judgment, with all its imperfections, remains the fundamental component in ensuring these systems serve rather than subvert our interests.\n", + "123\n", + "15 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Join now to see what you are missing\n", + "Find people you know at Hugging Face\n", + "Browse recommended jobs for you\n", + "View all updates, news, and articles\n", + "Join now\n", + "Similar pages\n", + "Anthropic\n", + "Research Services\n", + "Mistral AI\n", + "Technology, Information and Internet\n", + "Paris, France\n", + "Perplexity\n", + "Software Development\n", + "San Francisco, California\n", + "OpenAI\n", + "Research Services\n", + "San Francisco, CA\n", + "LangChain\n", + "Technology, Information and Internet\n", + "Generative AI\n", + "Technology, Information and Internet\n", + "DeepLearning.AI\n", + "Software Development\n", + "Palo Alto, California\n", + "Google DeepMind\n", + "Research Services\n", + "London, London\n", + "LlamaIndex\n", + "Technology, Information and Internet\n", + "San Francisco, California\n", + "Cohere\n", + "Software Development\n", + "Toronto, Ontario\n", + "Show more similar pages\n", + "Show fewer similar pages\n", + "Browse jobs\n", + "Engineer jobs\n", + "555,845 open jobs\n", + "Machine Learning Engineer jobs\n", + "148,937 open jobs\n", + "Scientist jobs\n", + "48,969 open jobs\n", + "Software Engineer jobs\n", + "300,699 open jobs\n", + "Analyst jobs\n", + "694,057 open jobs\n", + "Intern jobs\n", + "71,196 open jobs\n", + "Developer jobs\n", + "258,935 open jobs\n", + "Manager jobs\n", + "1,880,925 open jobs\n", + "Product Manager jobs\n", + "199,941 open jobs\n", + "Director jobs\n", + "1,220,357 open jobs\n", + "Python Developer jobs\n", + "46,642 open jobs\n", + "Data Scientist jobs\n", + "264,158 open jobs\n", + "Data Analyst jobs\n", + "329,009 open jobs\n", + "Senior Software Engineer jobs\n", + "78,145 open jobs\n", + "Project Manager jobs\n", + "253,048 open jobs\n", + "Researcher jobs\n", + "195,654 open jobs\n", + "Associate jobs\n", + "1,091,945 open jobs\n", + "Data Engineer jobs\n", + "192,126 open jobs\n", + "Vice President jobs\n", + "235,270 open jobs\n", + "Specialist jobs\n", + "768,666 open jobs\n", + "Show more jobs like this\n", + "Show fewer jobs like this\n", + "Funding\n", + "Hugging Face\n", + "8 total rounds\n", + "Last Round\n", + "Series unknown\n", + "Sep 1, 2024\n", + "External Crunchbase Link for last round of funding\n", + "See more info on\n", + "crunchbase\n", + "More searches\n", + "More searches\n", + "Engineer jobs\n", + "Scientist jobs\n", + "Machine Learning Engineer jobs\n", + "Software Engineer jobs\n", + "Intern jobs\n", + "Developer jobs\n", + "Analyst jobs\n", + "Manager jobs\n", + "Senior Software Engineer jobs\n", + "Data Scientist jobs\n", + "Researcher jobs\n", + "Product Manager jobs\n", + "Director jobs\n", + "Associate jobs\n", + "Intelligence Specialist jobs\n", + "Data Analyst jobs\n", + "Data Science Specialist jobs\n", + "Python Developer jobs\n", + "Quantitative Analyst jobs\n", + "Project Manager jobs\n", + "Account Executive jobs\n", + "Specialist jobs\n", + "Data Engineer jobs\n", + "Designer jobs\n", + "Quantitative Researcher jobs\n", + "Consultant jobs\n", + "Solutions Architect jobs\n", + "Vice President jobs\n", + "User Experience Designer jobs\n", + "Head jobs\n", + "Full Stack Engineer jobs\n", + "Engineering Manager jobs\n", + "Software Engineer Intern jobs\n", + "Junior Software Engineer jobs\n", + "Software Intern jobs\n", + "Product Designer jobs\n", + "Solutions Engineer jobs\n", + "Staff Software Engineer jobs\n", + "Program Manager jobs\n", + "Senior Scientist jobs\n", + "Writer jobs\n", + "Research Intern jobs\n", + "Senior Product Manager jobs\n", + "Summer Intern jobs\n", + "Account Manager jobs\n", + "Recruiter jobs\n", + "Lead jobs\n", + "Research Engineer jobs\n", + "Computer Science Intern jobs\n", + "Platform Engineer jobs\n", + "Junior Developer jobs\n", + "Android Developer jobs\n", + "User Experience Researcher jobs\n", + "Java Software Engineer jobs\n", + "Site Reliability Engineer jobs\n", + "Graduate jobs\n", + "Software Engineering Manager jobs\n", + "Representative jobs\n", + "Business Development Specialist jobs\n", + "Computer Engineer jobs\n", + "LinkedIn\n", + "© 2025\n", + "About\n", + "Accessibility\n", + "User Agreement\n", + "Privacy Policy\n", + "Cookie Policy\n", + "Copyright Policy\n", + "Brand Policy\n", + "Guest Controls\n", + "Community Guidelines\n", + "ÿߟÑÿπÿ±ÿ®Ÿäÿ© (Arabic)\n", + "‡¶¨‡¶æ‡¶Ç‡¶≤‡¶æ (Bangla)\n", + "ƒåe≈°tina (Czech)\n", + "Dansk (Danish)\n", + "Deutsch (German)\n", + "ŒïŒªŒªŒ∑ŒΩŒπŒ∫Œ¨ (Greek)\n", + "English (English)\n", + "Espa√±ol (Spanish)\n", + "ŸÅÿßÿ±ÿ≥€å (Persian)\n", + "Suomi (Finnish)\n", + "Fran√ßais (French)\n", + "‡§π‡§ø‡§Ç‡§¶‡•Ä (Hindi)\n", + "Magyar (Hungarian)\n", + "Bahasa Indonesia (Indonesian)\n", + "Italiano (Italian)\n", + "◊¢◊ë◊®◊ô◊™ (Hebrew)\n", + "Êó•Êú¨Ë™û (Japanese)\n", + "Ìïú͵≠Ïñ¥ (Korean)\n", + "‡§Æ‡§∞‡§æ‡§†‡•Ä (Marathi)\n", + "Bahasa Malaysia (Malay)\n", + "Nederlands (Dutch)\n", + "Norsk (Norwegian)\n", + "‡®™‡©∞‡®ú‡®æ‡®¨‡©Ä (Punjabi)\n", + "Polski (Polish)\n", + "Portugu√™s (Portuguese)\n", + "Rom√¢nƒÉ (Romanian)\n", + "–†—É—Å—Å–∫–∏–π (Russian)\n", + "Svenska (Swedish)\n", + "‡∞§‡±Ü‡∞≤‡±Å‡∞ó‡±Å (Telugu)\n", + "‡∏†‡∏≤‡∏©‡∏≤‡πч∏ó‡∏¢ (Thai)\n", + "Tagalog (Tagalog)\n", + "T√ºrk√ße (Turkish)\n", + "–£–∫—Ä–∞—ó–Ω—Å—å–∫–∞ (Ukrainian)\n", + "Ti·∫øng Vi·ªát (Vietnamese)\n", + "ÁÆÄ‰Ωì‰∏≠Êñá (Chinese (Simplified))\n", + "Ê≠£È´î‰∏≠Êñá (Chinese (Traditional))\n", + "Language\n", + "Agree & Join LinkedIn\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "Sign in to see who you already know at Hugging Face\n", + "Sign in\n", + "Welcome back\n", + "Email or phone\n", + "Password\n", + "Show\n", + "Forgot password?\n", + "Sign in\n", + "or\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "New to LinkedIn?\n", + "Join now\n", + "or\n", + "New to LinkedIn?\n", + "Join now\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "LinkedIn\n", + "LinkedIn is better on the app\n", + "Don‚Äôt have the app? Get it in the Microsoft Store.\n", + "Open the app\n", + "\n", + "\n", + "\n", + "Amazon partnership\n", + "Webpage Title:\n", + "Hugging Face | LinkedIn\n", + "Webpage Contents:\n", + "Skip to main content\n", + "LinkedIn\n", + "Articles\n", + "People\n", + "Learning\n", + "Jobs\n", + "Games\n", + "Get the app\n", + "Join now\n", + "Sign in\n", + "Hugging Face\n", + "Software Development\n", + "The AI community building the future.\n", + "See jobs\n", + "Follow\n", + "View all 513 employees\n", + "Report this company\n", + "About us\n", + "The AI community building the future.\n", + "Website\n", + "https://huggingface.co\n", + "External link for Hugging Face\n", + "Industry\n", + "Software Development\n", + "Company size\n", + "51-200 employees\n", + "Type\n", + "Privately Held\n", + "Founded\n", + "2016\n", + "Specialties\n", + "machine learning, natural language processing, and deep learning\n", + "Products\n", + "Hugging Face\n", + "Hugging Face\n", + "Natural Language Processing (NLP) Software\n", + "We‚Äôre on a journey to solve and democratize artificial intelligence through natural language.\n", + "Locations\n", + "Primary\n", + "Get directions\n", + "Paris, FR\n", + "Get directions\n", + "Employees at Hugging Face\n", + "Ludovic Huraux\n", + "Bassem ASSEH\n", + "Rajat Arya\n", + "Tech Lead & Software Engineer @ HF | prev: co-founder XetHub, Apple, Turi, AWS, Microsoft\n", + "Jeff Boudier\n", + "Product + Growth at Hugging Face\n", + "See all employees\n", + "Updates\n", + "Hugging Face\n", + "reposted this\n", + "Freddy Boulton\n", + "Software Engineer @ ü§ó\n", + "14h\n", + "Report this post\n", + "Generate lifelike audio in real-time without a GPU! üöÄ\n", + "\n", + "Check out orpheus-cpp: a\n", + "llama.cpp\n", + "port of orpheus 3b text-to-speech model with built-in support for sync and async streaming.\n", + "\n", + "ùöôùöíùöô ùöíùöóùöúùöùùöäùöïùöï ùöòùöõùöôùöëùöéùöûùöú-ùöåùöôùöô\n", + "ùöôùö¢ùöùùöëùöòùöó -ùöñ ùöòùöõùöôùöëùöéùöûùöú_ùöåùöôùöô\n", + "\n", + "Project code:\n", + "https://lnkd.in/ekPpN9mc\n", + "‚Ķmore\n", + "227\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Report this post\n", + "We just turned the humble dataframe into a superweapon‚ö°Ô∏è\n", + "dashboarding will never be the same!! üìä\n", + "\n", + "new Gradio Dataframe has:\n", + "- multi-cell selection\n", + "- column pinning\n", + "- search + filtering\n", + "- fullscreen mode\n", + "- accessibility upgrades, and more\n", + "‚Ķmore\n", + "88\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Merve Noyan\n", + "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", + "1d\n", + "Report this post\n", + "is your vision LM in prod even safe? üëÄ\n", + "\n", + "ShieldGemma 2 is the first ever safety model for multimodal vision LMs in production by\n", + "Google DeepMind\n", + ", came with Gemma 3 üî•\n", + "\n", + "I saw confusion around how to use it, so I put together a notebook and a demo, find it in the comments üí¨\n", + "364\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Merve Noyan\n", + "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", + "1d\n", + "Report this post\n", + "is your vision LM in prod even safe? üëÄ\n", + "\n", + "ShieldGemma 2 is the first ever safety model for multimodal vision LMs in production by\n", + "Google DeepMind\n", + ", came with Gemma 3 üî•\n", + "\n", + "I saw confusion around how to use it, so I put together a notebook and a demo, find it in the comments üí¨\n", + "364\n", + "10 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Sergio Paniego Blanco\n", + "ML Engineer @ Hugging Face ü§ó | AI PhD | Google Summer of Code '18-'24\n", + "2d\n", + "Report this post\n", + "The Bonus Unit 2, \"AI Agent Observability & Evaluation,\" is now live on our\n", + "Hugging Face\n", + "agents course! üéì\n", + "\n", + "You'll learn to:\n", + "üîß Instrument agents with OpenTelemetry\n", + "üìä Track token usage, latency & errors\n", + "üìà Evaluate with LLM-as-a-judge\n", + "üìö Benchmark with GSM8K\n", + "\n", + "üëâ Check out the course here:\n", + "https://lnkd.in/d2jiTx6j\n", + "199\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Edited\n", + "Report this post\n", + "Create Infinite Photographs of You with InfiniteYou-Flux!\n", + "\n", + "Flexible photo recreation that better preserves identity compared to current solutions like Pulid, IP Adapter, etc. üî• üí™ \n", + "\n", + "Current full-performance bf16 model inference requires a peak VRAM of around 43 GB.\n", + "\n", + "You can build InfU on your own hardware:\n", + "https://lnkd.in/g9dc_vVh\n", + "Or Play for free on\n", + "Hugging Face\n", + ":\n", + "https://lnkd.in/gzF7rikZ\n", + "147\n", + "5 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Edited\n", + "Report this post\n", + "ü§Ø Generate high-quality podcasts with the voices you want!\n", + "\n", + "MoonCast is an open sourced, multi-lingual, and zeroshot model.\n", + "\n", + "You just need to upload two sample voices, create a script, and that's it, run the model--You get a üî• notebooklm-like podcast.\n", + "\n", + "Model and App are released on\n", + "Hugging Face\n", + ":\n", + "https://lnkd.in/gUk2EssP\n", + "119\n", + "7 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Daniel Vila Suero\n", + "Building data tools @ Hugging Face ü§ó\n", + "2d\n", + "Edited\n", + "Report this post\n", + "üî• Big news for GPU poors: thanks to\n", + "Hyperbolic\n", + "and\n", + "Fireworks AI\n", + ", you can run¬†\n", + "DeepSeek AI\n", + "'s¬†new model using Hugging Face Inference Providers. What has changed since V3? Here's my quick home experiment üëá \n", + "\n", + "DeepSeek silently dropped an update to V3 yesterday. Benchmark results are available, showing significant improvements over V3. \n", + "\n", + "Still, it is always a good idea to run new models on data you care about and see more detailed, fine-grained results.\n", + "\n", + "Now that we can all run these new models from Day 0 with no GPUs required, I wanted to share my approach with an example I created this morning:\n", + "\n", + "1. I got a sample from the LIMA dataset (containing high-quality general instructions).\n", + "2. Run the instructions with V3 and the new version V3-0324.\n", + "3. Define and run a simple judge with Llama3.3-70B to compare the model responses.\n", + "4. Push the dataset and pipeline so you can check and run similar experiments! (see first comment)\n", + "5. Extracted the results with\n", + "Hugging Face\n", + "Data Studio.\n", + "\n", + "Results summary\n", + "- LIMA is not very challenging, but it is still interesting to see the differences between the two models.\n", + "- A majority of Ties indicate that both models are close for this domain and task.\n", + "- But still, V3-0324 consistently wins over V3 (33 times vs 6 times).\n", + "\n", + "As usual, the dataset, prompts, and pipeline are open-source (see first comment).\n", + "\n", + "What other experiments you'd like to see?\n", + "191\n", + "4 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "60,588 followers\n", + "2d\n", + "Report this post\n", + "StarVector is a multimodal vision-language model for generating SVG (Scalable Vector Graphics). üëá \n", + "\n", + "It can be used to perform image2SVG and text2SVG generation. Live demo shows how the image generation is treated similar to a code generation task, using the power of StarVector multimodal VLM! ü§© \n", + "\n", + "üöÄ Play with the app on Huggingface:\n", + "https://lnkd.in/gCzdEbvj\n", + "ü•≥ If you want to build the model locally with a gradio app:\n", + "https://lnkd.in/gDzCpdDN\n", + "‚Ķmore\n", + "1,365\n", + "39 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Giada Pistilli\n", + "Principal Ethicist at Hugging Face | PhD in Philosophy at Sorbonne Universit√©\n", + "3d\n", + "Report this post\n", + "Excited to share our latest piece on the double-edged sword of AI agents published in\n", + "MIT Technology Review\n", + "! ü§ñ It builds on our research paper that's been making waves lately -- pretty cool to see all the attention it's getting!\n", + "\n", + "As these systems move beyond chat windows to navigate applications and execute complex tasks independently, we need to ask ourselves: how much control are we willing to surrender, and at what cost?\n", + "\n", + "In our recent op-ed,\n", + "Margaret Mitchell\n", + ",\n", + "Avijit Ghosh, PhD\n", + ",\n", + "Dr. Sasha Luccioni\n", + ", and I explore why the very feature being sold (reduced human oversight) is actually the primary vulnerability. When AI systems can control multiple information sources simultaneously, the potential for harm explodes exponentially. \n", + "\n", + "We imagine that \"It wasn't me‚Äîit was my agent!!\" will soon be a common refrain to excuse bad outcomes.\n", + "\n", + "The benefits of AI agents are undeniable, from assisting people with mobility challenges to coordinating emergency responses. But these benefits don't require surrendering complete human control.\n", + "\n", + "At\n", + "Hugging Face\n", + ", we're developing frameworks like smolagents that prioritize transparency and appropriate human oversight. Because human judgment, with all its imperfections, remains the fundamental component in ensuring these systems serve rather than subvert our interests.\n", + "123\n", + "15 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Join now to see what you are missing\n", + "Find people you know at Hugging Face\n", + "Browse recommended jobs for you\n", + "View all updates, news, and articles\n", + "Join now\n", + "Similar pages\n", + "Anthropic\n", + "Research Services\n", + "Mistral AI\n", + "Technology, Information and Internet\n", + "Paris, France\n", + "Perplexity\n", + "Software Development\n", + "San Francisco, California\n", + "OpenAI\n", + "Research Services\n", + "San Francisco, CA\n", + "LangChain\n", + "Technology, Information and Internet\n", + "Generative AI\n", + "Technology, Information and Internet\n", + "DeepLearning.AI\n", + "Software Development\n", + "Palo Alto, California\n", + "Google DeepMind\n", + "Research Services\n", + "London, London\n", + "LlamaIndex\n", + "Technology, Information and Internet\n", + "San Francisco, California\n", + "Cohere\n", + "Software Development\n", + "Toronto, Ontario\n", + "Show more similar pages\n", + "Show fewer similar pages\n", + "Browse jobs\n", + "Engineer jobs\n", + "555,845 open jobs\n", + "Machine Learning Engineer jobs\n", + "148,937 open jobs\n", + "Scientist jobs\n", + "48,969 open jobs\n", + "Software Engineer jobs\n", + "300,699 open jobs\n", + "Analyst jobs\n", + "694,057 open jobs\n", + "Intern jobs\n", + "71,196 open jobs\n", + "Developer jobs\n", + "258,935 open jobs\n", + "Manager jobs\n", + "1,880,925 open jobs\n", + "Product Manager jobs\n", + "199,941 open jobs\n", + "Director jobs\n", + "1,220,357 open jobs\n", + "Python Developer jobs\n", + "46,642 open jobs\n", + "Data Scientist jobs\n", + "264,158 open jobs\n", + "Data Analyst jobs\n", + "329,009 open jobs\n", + "Senior Software Engineer jobs\n", + "78,145 open jobs\n", + "Project Manager jobs\n", + "253,048 open jobs\n", + "Researcher jobs\n", + "195,654 open jobs\n", + "Associate jobs\n", + "1,091,945 open jobs\n", + "Data Engineer jobs\n", + "192,126 open jobs\n", + "Vice President jobs\n", + "235,270 open jobs\n", + "Specialist jobs\n", + "768,666 open jobs\n", + "Show more jobs like this\n", + "Show fewer jobs like this\n", + "Funding\n", + "Hugging Face\n", + "8 total rounds\n", + "Last Round\n", + "Series unknown\n", + "Sep 1, 2024\n", + "External Crunchbase Link for last round of funding\n", + "See more info on\n", + "crunchbase\n", + "More searches\n", + "More searches\n", + "Engineer jobs\n", + "Scientist jobs\n", + "Machine Learning Engineer jobs\n", + "Software Engineer jobs\n", + "Intern jobs\n", + "Developer jobs\n", + "Analyst jobs\n", + "Manager jobs\n", + "Senior Software Engineer jobs\n", + "Data Scientist jobs\n", + "Researcher jobs\n", + "Product Manager jobs\n", + "Director jobs\n", + "Associate jobs\n", + "Intelligence Specialist jobs\n", + "Data Analyst jobs\n", + "Data Science Specialist jobs\n", + "Python Developer jobs\n", + "Quantitative Analyst jobs\n", + "Project Manager jobs\n", + "Account Executive jobs\n", + "Specialist jobs\n", + "Data Engineer jobs\n", + "Designer jobs\n", + "Quantitative Researcher jobs\n", + "Consultant jobs\n", + "Solutions Architect jobs\n", + "Vice President jobs\n", + "User Experience Designer jobs\n", + "Head jobs\n", + "Full Stack Engineer jobs\n", + "Engineering Manager jobs\n", + "Software Engineer Intern jobs\n", + "Junior Software Engineer jobs\n", + "Software Intern jobs\n", + "Product Designer jobs\n", + "Solutions Engineer jobs\n", + "Staff Software Engineer jobs\n", + "Program Manager jobs\n", + "Senior Scientist jobs\n", + "Writer jobs\n", + "Research Intern jobs\n", + "Senior Product Manager jobs\n", + "Summer Intern jobs\n", + "Account Manager jobs\n", + "Recruiter jobs\n", + "Lead jobs\n", + "Research Engineer jobs\n", + "Computer Science Intern jobs\n", + "Platform Engineer jobs\n", + "Junior Developer jobs\n", + "Android Developer jobs\n", + "User Experience Researcher jobs\n", + "Java Software Engineer jobs\n", + "Site Reliability Engineer jobs\n", + "Graduate jobs\n", + "Software Engineering Manager jobs\n", + "Representative jobs\n", + "Business Development Specialist jobs\n", + "Computer Engineer jobs\n", + "LinkedIn\n", + "© 2025\n", + "About\n", + "Accessibility\n", + "User Agreement\n", + "Privacy Policy\n", + "Cookie Policy\n", + "Copyright Policy\n", + "Brand Policy\n", + "Guest Controls\n", + "Community Guidelines\n", + "ÿߟÑÿπÿ±ÿ®Ÿäÿ© (Arabic)\n", + "‡¶¨‡¶æ‡¶Ç‡¶≤‡¶æ (Bangla)\n", + "ƒåe≈°tina (Czech)\n", + "Dansk (Danish)\n", + "Deutsch (German)\n", + "ŒïŒªŒªŒ∑ŒΩŒπŒ∫Œ¨ (Greek)\n", + "English (English)\n", + "Espa√±ol (Spanish)\n", + "ŸÅÿßÿ±ÿ≥€å (Persian)\n", + "Suomi (Finnish)\n", + "Fran√ßais (French)\n", + "‡§π‡§ø‡§Ç‡§¶‡•Ä (Hindi)\n", + "Magyar (Hungarian)\n", + "Bahasa Indonesia (Indonesian)\n", + "Italiano (Italian)\n", + "◊¢◊ë◊®◊ô◊™ (Hebrew)\n", + "Êó•Êú¨Ë™û (Japanese)\n", + "Ìïú͵≠Ïñ¥ (Korean)\n", + "‡§Æ‡§∞‡§æ‡§†‡•Ä (Marathi)\n", + "Bahasa Malaysia (Malay)\n", + "Nederlands (Dutch)\n", + "Norsk (Norwegian)\n", + "‡®™‡©∞‡®ú‡®æ‡®¨‡©Ä (Punjabi)\n", + "Polski (Polish)\n", + "Portugu√™s (Portuguese)\n", + "Rom√¢nƒÉ (Romanian)\n", + "–†—É—Å—Å–∫–∏–π (Russian)\n", + "Svenska (Swedish)\n", + "‡∞§‡±Ü‡∞≤‡±Å‡∞ó‡±Å (Telugu)\n", + "‡∏†‡∏≤‡∏©‡∏≤‡πч∏ó‡∏¢ (Thai)\n", + "Tagalog (Tagalog)\n", + "T√ºrk√ße (Turkish)\n", + "–£–∫—Ä–∞—ó–Ω—Å—å–∫–∞ (Ukrainian)\n", + "Ti·∫øng Vi·ªát (Vietnamese)\n", + "ÁÆÄ‰Ωì‰∏≠Êñá (Chinese (Simplified))\n", + "Ê≠£È´î‰∏≠Êñá (Chinese (Traditional))\n", + "Language\n", + "Agree & Join LinkedIn\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "Sign in to see who you already know at Hugging Face\n", + "Sign in\n", + "Welcome back\n", + "Email or phone\n", + "Password\n", + "Show\n", + "Forgot password?\n", + "Sign in\n", + "or\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "New to LinkedIn?\n", + "Join now\n", + "or\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "New to LinkedIn?\n", + "Join now\n", + "LinkedIn\n", + "LinkedIn is better on the app\n", + "Don‚Äôt have the app? Get it in the Microsoft Store.\n", + "Open the app\n", + "\n", + "\n", + "\n", + "Grammarly integration\n", + "Webpage Title:\n", + "Site not found · GitHub Pages\n", + "Webpage Contents:\n", + "404\n", + "There isn't a GitHub Pages site here.\n", + "If you're trying to publish one,\n", + "read the full documentation\n", + "to learn how to set up\n", + "GitHub Pages\n", + "for your repository, organization, or user account.\n", + "GitHub Status\n", + "—\n", + "@githubstatus\n", + "\n", + "\n", + "\n", + "Writer product page\n", + "\n", + "\n", + "GitHub repository\n", + "Webpage Title:\n", + "Hugging Face · GitHub\n", + "Webpage Contents:\n", + "Skip to content\n", + "Navigation Menu\n", + "Toggle navigation\n", + "Sign in\n", + "huggingface\n", + "Product\n", + "GitHub Copilot\n", + "Write better code with AI\n", + "Security\n", + "Find and fix vulnerabilities\n", + "Actions\n", + "Automate any workflow\n", + "Codespaces\n", + "Instant dev environments\n", + "Issues\n", + "Plan and track work\n", + "Code Review\n", + "Manage code changes\n", + "Discussions\n", + "Collaborate outside of code\n", + "Code Search\n", + "Find more, search less\n", + "Explore\n", + "All features\n", + "Documentation\n", + "GitHub Skills\n", + "Blog\n", + "Solutions\n", + "By company size\n", + "Enterprises\n", + "Small and medium teams\n", + "Startups\n", + "Nonprofits\n", + "By use case\n", + "DevSecOps\n", + "DevOps\n", + "CI/CD\n", + "View all use cases\n", + "By industry\n", + "Healthcare\n", + "Financial services\n", + "Manufacturing\n", + "Government\n", + "View all industries\n", + "View all solutions\n", + "Resources\n", + "Topics\n", + "AI\n", + "DevOps\n", + "Security\n", + "Software Development\n", + "View all\n", + "Explore\n", + "Learning Pathways\n", + "Events & Webinars\n", + "Ebooks & Whitepapers\n", + "Customer Stories\n", + "Partners\n", + "Executive Insights\n", + "Open Source\n", + "GitHub Sponsors\n", + "Fund open source developers\n", + "The ReadME Project\n", + "GitHub community articles\n", + "Repositories\n", + "Topics\n", + "Trending\n", + "Collections\n", + "Enterprise\n", + "Enterprise platform\n", + "AI-powered developer platform\n", + "Available add-ons\n", + "Advanced Security\n", + "Enterprise-grade security features\n", + "Copilot for business\n", + "Enterprise-grade AI features\n", + "Premium Support\n", + "Enterprise-grade 24/7 support\n", + "Pricing\n", + "Search or jump to...\n", + "Search code, repositories, users, issues, pull requests...\n", + "Search\n", + "Clear\n", + "Search syntax tips\n", + "Provide feedback\n", + "We read every piece of feedback, and take your input very seriously.\n", + "Include my email address so I can be contacted\n", + "Cancel\n", + "Submit feedback\n", + "Saved searches\n", + "Use saved searches to filter your results more quickly\n", + "Cancel\n", + "Create saved search\n", + "Sign in\n", + "Sign up\n", + "Reseting focus\n", + "You signed in with another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You signed out in another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You switched accounts on another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "Dismiss alert\n", + "Hugging Face\n", + "The AI community building the future.\n", + "Verified\n", + "We've verified that the organization\n", + "huggingface\n", + "controls the domain:\n", + "huggingface.co\n", + "Learn more about verified organizations\n", + "46.8k\n", + "followers\n", + "NYC + Paris\n", + "https://huggingface.co/\n", + "X\n", + "@huggingface\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "Pinned\n", + "Loading\n", + "transformers\n", + "transformers\n", + "Public\n", + "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", + "Python\n", + "142k\n", + "28.4k\n", + "diffusers\n", + "diffusers\n", + "Public\n", + "🤗 Diffusers: State-of-the-art diffusion models for image, video, and audio generation in PyTorch and FLAX.\n", + "Python\n", + "28.3k\n", + "5.8k\n", + "datasets\n", + "datasets\n", + "Public\n", + "🤗 The largest hub of ready-to-use datasets for ML models with fast, easy-to-use and efficient data manipulation tools\n", + "Python\n", + "19.9k\n", + "2.8k\n", + "peft\n", + "peft\n", + "Public\n", + "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.\n", + "Python\n", + "17.9k\n", + "1.8k\n", + "accelerate\n", + "accelerate\n", + "Public\n", + "🚀 A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support\n", + "Python\n", + "8.5k\n", + "1.1k\n", + "optimum\n", + "optimum\n", + "Public\n", + "🚀 Accelerate inference and training of 🤗 Transformers, Diffusers, TIMM and Sentence Transformers with easy to use hardware optimization tools\n", + "Python\n", + "2.8k\n", + "516\n", + "Repositories\n", + "Loading\n", + "Type\n", + "Select type\n", + "Forks\n", + "Archived\n", + "Mirrors\n", + "Templates\n", + "Language\n", + "Select language\n", + "All\n", + "C\n", + "C#\n", + "C++\n", + "Cuda\n", + "Dockerfile\n", + "Go\n", + "Handlebars\n", + "HTML\n", + "Java\n", + "JavaScript\n", + "Jupyter Notebook\n", + "Kotlin\n", + "Lua\n", + "MDX\n", + "Mustache\n", + "Nix\n", + "Python\n", + "Rust\n", + "Shell\n", + "Smarty\n", + "Swift\n", + "TypeScript\n", + "Sort\n", + "Select order\n", + "Last updated\n", + "Name\n", + "Stars\n", + "Showing 10 of 301 repositories\n", + "kernel-builder\n", + "Public\n", + "👷 Build compute kernels\n", + "huggingface/kernel-builder’s past year of commit activity\n", + "Nix\n", + "22\n", + "6\n", + "7\n", + "4\n", + "Updated\n", + "Mar 28, 2025\n", + "lerobot\n", + "Public\n", + "🤗 LeRobot: Making AI for Robotics more accessible with end-to-end learning\n", + "huggingface/lerobot’s past year of commit activity\n", + "Python\n", + "11,165\n", + "Apache-2.0\n", + "1,225\n", + "153\n", + "118\n", + "Updated\n", + "Mar 28, 2025\n", + "smolagents\n", + "Public\n", + "🤗 smolagents: a barebones library for agents that think in python code.\n", + "huggingface/smolagents’s past year of commit activity\n", + "Python\n", + "15,893\n", + "Apache-2.0\n", + "1,404\n", + "105\n", + "99\n", + "Updated\n", + "Mar 28, 2025\n", + "notebooks\n", + "Public\n", + "Notebooks using the Hugging Face libraries 🤗\n", + "huggingface/notebooks’s past year of commit activity\n", + "Jupyter Notebook\n", + "3,973\n", + "Apache-2.0\n", + "1,630\n", + "131\n", + "69\n", + "Updated\n", + "Mar 28, 2025\n", + "meshgen\n", + "Public\n", + "A blender addon for generating meshes with AI\n", + "huggingface/meshgen’s past year of commit activity\n", + "Python\n", + "520\n", + "MIT\n", + "29\n", + "9\n", + "0\n", + "Updated\n", + "Mar 28, 2025\n", + "huggingface_hub\n", + "Public\n", + "The official Python client for the Huggingface Hub.\n", + "huggingface/huggingface_hub’s past year of commit activity\n", + "Python\n", + "2,469\n", + "Apache-2.0\n", + "658\n", + "146\n", + "(4 issues need help)\n", + "19\n", + "Updated\n", + "Mar 28, 2025\n", + "lighteval\n", + "Public\n", + "Lighteval is your all-in-one toolkit for evaluating LLMs across multiple backends\n", + "huggingface/lighteval’s past year of commit activity\n", + "Python\n", + "1,349\n", + "MIT\n", + "211\n", + "90\n", + "(1 issue needs help)\n", + "33\n", + "Updated\n", + "Mar 28, 2025\n", + "trl\n", + "Public\n", + "Train transformer language models with reinforcement learning.\n", + "huggingface/trl’s past year of commit activity\n", + "Python\n", + "12,887\n", + "Apache-2.0\n", + "1,734\n", + "329\n", + "73\n", + "Updated\n", + "Mar 27, 2025\n", + "optimum-neuron\n", + "Public\n", + "Easy, fast and very cheap training and inference on AWS Trainium and Inferentia chips.\n", + "huggingface/optimum-neuron’s past year of commit activity\n", + "Jupyter Notebook\n", + "222\n", + "Apache-2.0\n", + "71\n", + "19\n", + "8\n", + "Updated\n", + "Mar 27, 2025\n", + "hub-docs\n", + "Public\n", + "Docs of the Hugging Face Hub\n", + "huggingface/hub-docs’s past year of commit activity\n", + "Handlebars\n", + "363\n", + "Apache-2.0\n", + "286\n", + "100\n", + "35\n", + "Updated\n", + "Mar 27, 2025\n", + "View all repositories\n", + "People\n", + "View all\n", + "Top languages\n", + "Loading…\n", + "Most used topics\n", + "Loading…\n", + "Footer\n", + "© 2025 GitHub, Inc.\n", + "Footer navigation\n", + "Terms\n", + "Privacy\n", + "Security\n", + "Status\n", + "Docs\n", + "Contact\n", + "Manage cookies\n", + "Do not share my personal information\n", + "You can’t perform that action at this time.\n", + "\n", + "\n", + "\n", + "Discord server for community discussion\n", + "\n" + ] + } + ], + "source": [ + "print(get_all_details(\"https://huggingface.co\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "id": "78ed1710-303f-45e4-9ce7-42d00247dc8a", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", + "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", + "Include details of company culture, customers and careers/jobs if you have the information.\"\n", + "\n", + "# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n", + "\n", + "# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", + "# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", + "# Include details of company culture, customers and careers/jobs if you have the information.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "id": "ad673f38-dd5c-40a5-9c79-768689c74488", + "metadata": {}, + "outputs": [], + "source": [ + "def get_brochure_user_prompt(company_name, url):\n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", + " user_prompt += get_all_details(url)\n", + " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 99, + "id": "7d2278ff-3cd4-460c-af4d-eed6a5d4c78b", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'Company Page', 'url': 'https://huggingface.co'}, {'type': 'About page', 'url': 'https://docs.huggingface.co/'}, {'type': 'GitHub', 'url': 'https://github.com/huggingface'}, {'type': 'LinkedIn Company Page', 'url': 'https://www.linkedin.com/company/huggingface/'}, {'type': 'Twitter Company Handle', 'url': 'https://twitter.com/huggingface'}, {'type': 'Blog', 'url': 'https://blog.huggingface.co'}, {'type': 'Discord Community', 'url': 'https://join.huggingface.codiscord'}, {'type': 'Join Careers', 'url': 'https://apply.workable.com/huggingface/'}]}\n", + "website error\n", + "website error\n", + "website error\n" + ] + }, + { + "data": { + "text/plain": [ + "'You are looking at a company called: HuggingFace\\nHere are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\nLanding page:\\nWebpage Title:\\nHugging Face – The AI community building the future.\\nWebpage Contents:\\nHugging Face\\nModels\\nDatasets\\nSpaces\\nPosts\\nDocs\\nEnterprise\\nPricing\\nLog In\\nSign Up\\nThe AI community building the future.\\nThe platform where the machine learning community collaborates on models, datasets, and applications.\\nExplore AI Apps\\nor\\nBrowse 1M+ models\\nTrending on\\nthis week\\nModels\\ndeepseek-ai/DeepSeek-V3-0324\\nUpdated\\n1 day ago\\n•\\n32.5k\\n•\\n1.88k\\nQwen/Qwen2.5-Omni-7B\\nUpdated\\n1 day ago\\n•\\n3.03k\\n•\\n662\\nmanycore-research/SpatialLM-Llama-1B\\nUpdated\\n7 days ago\\n•\\n6.14k\\n•\\n754\\nByteDance/InfiniteYou\\nUpdated\\n3 days ago\\n•\\n436\\nds4sd/SmolDocling-256M-preview\\nUpdated\\n5 days ago\\n•\\n40.8k\\n•\\n994\\nBrowse 1M+ models\\nSpaces\\nRunning\\non\\nZero\\n473\\n473\\nInfiniteYou-FLUX\\n📸\\nFlexible Photo Recrafting While Preserving Your Identity\\nRunning\\n323\\n323\\nGemini Co-Drawing\\n✏\\nGemini 2.0 native image generation co-doodling\\nRunning\\non\\nZero\\n204\\n204\\nLHM\\n⚡\\nLarge Animatable Human Model\\nRunning\\non\\nL40S\\n325\\n325\\nStable Virtual Camera\\n⚡\\nGenerate virtual camera views from input images\\nRunning\\n163\\n163\\nHunyuan T1\\n💬\\nHunyuan T1模型体验\\nBrowse 400k+ applications\\nDatasets\\nnvidia/Llama-Nemotron-Post-Training-Dataset-v1\\nUpdated\\n10 days ago\\n•\\n6.96k\\n•\\n256\\nglaiveai/reasoning-v1-20m\\nUpdated\\n9 days ago\\n•\\n5.71k\\n•\\n116\\nFreedomIntelligence/medical-o1-reasoning-SFT\\nUpdated\\nFeb 22\\n•\\n27k\\n•\\n566\\nfacebook/collaborative_agent_bench\\nUpdated\\n8 days ago\\n•\\n83\\n•\\n47\\na-m-team/AM-DeepSeek-R1-Distilled-1.4M\\nUpdated\\nabout 18 hours ago\\n•\\n2.26k\\n•\\n68\\nBrowse 250k+ datasets\\nThe Home of Machine Learning\\nCreate, discover and collaborate on ML better.\\nThe collaboration platform\\nHost and collaborate on unlimited public models, datasets and applications.\\nMove faster\\nWith the HF Open source stack.\\nExplore all modalities\\nText, image, video, audio or even 3D.\\nBuild your portfolio\\nShare your work with the world and build your ML profile.\\nSign Up\\nAccelerate your ML\\nWe provide paid Compute and Enterprise solutions.\\nCompute\\nDeploy on optimized\\nInference Endpoints\\nor update your\\nSpaces applications\\nto a GPU in a few clicks.\\nView pricing\\nStarting at $0.60/hour for GPU\\nEnterprise\\nGive your team the most advanced platform to build AI with enterprise-grade security, access controls and\\n\\t\\t\\tdedicated support.\\nGetting started\\nStarting at $20/user/month\\nSingle Sign-On\\nRegions\\nPriority Support\\nAudit Logs\\nResource Groups\\nPrivate Datasets Viewer\\nMore than 50,000 organizations are using Hugging Face\\nAi2\\nEnterprise\\nnon-profit\\n•\\n396 models\\n•\\n2.97k followers\\nAI at Meta\\nEnterprise\\ncompany\\n•\\n2.07k models\\n•\\n5.27k followers\\nAmazon\\ncompany\\n•\\n10 models\\n•\\n2.91k followers\\nGoogle\\ncompany\\n•\\n974 models\\n•\\n10.6k followers\\nIntel\\ncompany\\n•\\n219 models\\n•\\n2.37k followers\\nMicrosoft\\ncompany\\n•\\n365 models\\n•\\n10.7k followers\\nGrammarly\\nEnterprise\\ncompany\\n•\\n10 models\\n•\\n145 followers\\nWriter\\nEnterprise\\ncompany\\n•\\n21 models\\n•\\n253 followers\\nOur Open Source\\nWe are building the foundation of ML tooling with the community.\\nTransformers\\n142,056\\nState-of-the-art ML for PyTorch, TensorFlow, JAX\\nDiffusers\\n28,292\\nState-of-the-art Diffusion models in PyTorch\\nSafetensors\\n3,189\\nSafe way to store/distribute neural network weights\\nHub Python Library\\n2,469\\nPython client to interact with the Hugging Face Hub\\nTokenizers\\n9,538\\nFast tokenizers optimized for research & production\\nTRL\\n12,887\\nTrain transformers LMs with reinforcement learning\\nTransformers.js\\n13,301\\nState-of-the-art ML running directly in your browser\\nsmolagents\\n15,893\\nSmol library to build great agents in Python\\nPEFT\\n17,927\\nParameter-efficient finetuning for large language models\\nDatasets\\n19,888\\nAccess & share datasets for any ML tasks\\nText Generation Inference\\n9,937\\nServe language models with TGI optimized toolkit\\nAccelerate\\n8,542\\nTrain PyTorch models with multi-GPU, TPU, mixed precision\\nSystem theme\\nWebsite\\nModels\\nDatasets\\nSpaces\\nTasks\\nInference Endpoints\\nHuggingChat\\nCompany\\nAbout\\nBrand assets\\nTerms of service\\nPrivacy\\nJobs\\nPress\\nResources\\nLearn\\nDocumentation\\nBlog\\nForum\\nService Status\\nSocial\\nGitHub\\nTwitter\\nLinkedIn\\nDiscord\\n\\n\\n\\nCompany Page\\nWebpage Title:\\nHugging Face – The AI community building the future.\\nWebpage Contents:\\nHugging Face\\nModels\\nDatasets\\nSpaces\\nPosts\\nDocs\\nEnterprise\\nPricing\\nLog In\\nSign Up\\nThe AI community building the future.\\nThe platform where the machine learning community collaborates on models, datasets, and applications.\\nExplore AI Apps\\nor\\nBrowse 1M+ models\\nTrending on\\nthis week\\nModels\\ndeepseek-ai/DeepSeek-V3-0324\\nUpdated\\n1 day ago\\n•\\n32.5k\\n•\\n1.88k\\nQwen/Qwen2.5-Omni-7B\\nUpdated\\n1 day ago\\n•\\n3.03k\\n•\\n662\\nmanycore-research/SpatialLM-Llama-1B\\nUpdated\\n7 days ago\\n•\\n6.14k\\n•\\n754\\nByteDance/InfiniteYou\\nUpdated\\n3 days ago\\n•\\n436\\nds4sd/SmolDocling-256M-preview\\nUpdated\\n5 days ago\\n•\\n40.8k\\n•\\n994\\nBrowse 1M+ models\\nSpaces\\nRunning\\non\\nZero\\n473\\n473\\nInfiniteYou-FLUX\\n📸\\nFlexible Photo Recrafting While Preserving Your Identity\\nRunning'" + ] + }, + "execution_count": 99, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\")" + ] + }, + { + "cell_type": "code", + "execution_count": 101, + "id": "0dcf1180-75ef-4367-a764-2b442b91bc97", + "metadata": {}, + "outputs": [], + "source": [ + "def create_brochure(company_name, url):\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " )\n", + " result = response.choices[0].message.content\n", + " display(Markdown(result))" + ] + }, + { + "cell_type": "code", + "execution_count": 103, + "id": "684e56b5-1799-469c-8772-1d467b666921", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'About page', 'url': 'https://huggingface.co/'}, {'type': 'Company page', 'url': 'https://huggingface.co/'}, {'type': 'Careers/Jobs page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'Pricing page', 'url': 'https://ui.endpoints.huggingface.co/pricing#endpoints'}, {'type': 'Enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'Blog page', 'url': 'https://blog.huggingface.co/'}, {'type': 'Documentation pages', 'url': 'https://docs.huggingface.co/'}]}\n", + "website error\n", + "website error\n" + ] + }, + { + "data": { + "text/markdown": [ + "# Hugging Face Brochure\n", + "## About Us\n", + "\n", + "Hugging Face is the AI community building the future. We're a platform where the machine learning community collaborates on models, datasets, and applications. Our mission is to make it easy for anyone to build, discover, and connect with other ML practitioners on our open-source ecosystem.\n", + "\n", + "### Company Culture\n", + "\n", + "At Hugging Face, we value collaboration, innovation, and openness. We believe that AI should be accessible to everyone, and we strive to create an inclusive environment where ML researchers and practitioners can share ideas, learn from each other, and accelerate their progress.\n", + "\n", + "### Our Community\n", + "\n", + "Our community is growing rapidly, with over 50,000 organizations using our platform. We're proud of our collaborations with leading companies like Meta, Google, Amazon, Intel, Microsoft, Grammarly, Writer, and Hunyuan T1, who are leveraging our technology to build cutting-edge AI applications.\n", + "\n", + "### Products & Services\n", + "\n", + "We offer a wide range of products and services, including:\n", + "\n", + "* **Hugging Face Hub**: A vast catalog of pre-trained models, datasets, and applications developed by the community.\n", + "* **Spaces**: A platform for hosting, collaborating on, and deploying public models, datasets, and applications.\n", + "* **Computing**: Optimized inference endpoints for deploying models on-premise or in the cloud.\n", + "\n", + "### Leadership\n", + "\n", + "Our leadership team is comprised of experienced professionals with a deep understanding of AI and machine learning.\n", + "\n", + "### Career Opportunities\n", + "\n", + "Join our team! We're always looking for talented ML practitioners to help us build the future of AI. Check out our career page for more information.\n", + "\n", + "## Model Gallery\n", + "Browse 1M+ pre-trained models and discover new applications.\n", + "\n", + "*DeepSeek-V3-0324*\n", + "\n", + "Qwen/Omni-7B\n", + "\n", + "SpatialLM-Llama-1B\n", + "\n", + "InfiniteYou FLUX\n", + "\n", + "# Datasets\n", + " Access & share datasets for any ML tasks.\n", + "\n", + "* NVidia/Llama-Nemotron-Post-Training-Dataset-v1*\n", + "* glaive.ai/reasoning-v1-20M*\n", + "\n", + "## Get Started\n", + "Accelerate your ML with our paid Compute and Enterprise solutions. Sign up today!" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "create_brochure(\"HuggingFace\", \"https://huggingface.co\")" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "id": "a51a4434-6e75-4caa-a857-d162205431e5", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_brochure(company_name, url):\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " stream=True\n", + " )\n", + " \n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 109, + "id": "258beed4-5cfb-4de2-9855-20cc1b4c51ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'About page', 'url': 'https://huggingface.co'}, {'type': 'Brand page', 'url': 'https://huggingface.com/brand/'}, {'type': 'Home (company) page', 'url': 'https://huggingface.co/company'}, {'type': 'Careers/Jobs page', 'url': 'https://apply.workable.com/huggingface/', 'alt': 'Join our team'}, {'type': 'Learn page', 'url': 'https://learn.huggingface.co'}, {'type': 'Blog page', 'url': 'https://blog.huggingface.co'}, {'type': 'News/Updates page', 'url': 'https://status.huggingface.co/'}, {'type': 'Forum/Discussion page', 'url': 'https://discuss.huggingface.co'}, {'type': 'Twitter account', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn company page', 'url': 'https://www.linkedin.com/company/huggingface/'}, {'type': 'GitHub repository', 'url': 'https://github.com/huggingface'}, {'type': 'Documentation/Transformers page', 'url': 'https://huggingface.co/docs/transformers'}, {'type': 'Documentation/Hub page', 'url': 'https://huggingface.co/docs/huggingface_hub'}, {'type': 'Documentation/Safetensors page', 'url': 'https://huggingface.co/docs/safetensors'}]}\n", + "website error\n", + "website error\n" + ] + }, + { + "data": { + "text/markdown": [ + "# Hugging Face: The AI Community Building the Future\n", + "\n", + "[Image of a futuristic cityscape with artificial intelligence elements, such as robots and screens displaying code]\n", + "\n", + "Welcome to Hugging Face, the leading platform for the machine learning community. We are on a mission to build the future of artificial intelligence through collaboration, innovation, and open-source technology.\n", + "\n", + "## About Us\n", + "\n", + "Hugging Face was founded by Francis Brezillon who aims to bring together researchers, developers, and organizations from around the world to collaborate on building AI models. Our platform provides a suite of tools and resources for text, image, video, audio, and 3D models, making it easier for individuals and teams to accelerate their machine learning journey.\n", + "\n", + "## Models\n", + "\n", + "### Browse 1M+ Models\n", + "\n", + "From deep-seek to transformers, our model repository is home to over 1 million AI models. Explore popular models like:\n", + "\n", + "* **deepseek-ai/DeepSeek-V3-0324**: An optimized version of the Deep Seek model for image generation\n", + "* **Qwen/Qwen2.5-Omni-7B**: A novel framework for multi-modal language understanding\n", + "* **manycore-research/SpatialLM-Llama-1B**: A spatial transformer model for natural language processing\n", + "\n", + "**Select Your Model**\n", + "\n", + "| Model | Description |\n", + "| --- | --- |\n", + "| InfiniteYou-FLUX | Flexible photo recrafting while preserving your identity |\n", + "| Gemini Co-Drawing | Native image generation co-doodling with AI |\n", + "| LHM | Large animatable human model for virtual camera views |\n", + "\n", + "## Datasets\n", + "\n", + "With access to over 250,000 datasets, our platform enables you to train and validate your models on a diverse range of data sources.\n", + "\n", + "* **nvidia/Llama-Nemotron-Post-Training-Dataset-v1**: A dataset for learning from few-shot text classification\n", + "* **glaiveai/reasoning-v1-20m**: An extensive benchmark for common sense reasoning\n", + "\n", + "## Spaces\n", + "\n", + "Our spaces feature allow users to build, share, and deploy custom models directly onto our infrastructure.\n", + "\n", + "### 400k+ Applications\n", + "\n", + "From chatbots to image generators, our platform hosts a vast range of applications built on top of our ecosystem.\n", + "\n", + "## Community Support\n", + "\n", + "Join us at [Twitter](https://twitter.com/HuggingFace) for the latest news, research opportunities, and open-source releases." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e327058-3cfa-4299-872e-9ffd62cbee63", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:base] *", + "language": "python", + "name": "conda-base-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 49592995e53882cae444f82a51f8ad26633d4351 Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Thu, 3 Apr 2025 13:58:32 +0530 Subject: [PATCH 03/19] AI Booking Chatbot --- week2/AI Booking Chatbot.ipynb | 717 +++++++++++++++++++++++++++++++++ 1 file changed, 717 insertions(+) create mode 100644 week2/AI Booking Chatbot.ipynb diff --git a/week2/AI Booking Chatbot.ipynb b/week2/AI Booking Chatbot.ipynb new file mode 100644 index 0000000..83cb6d9 --- /dev/null +++ b/week2/AI Booking Chatbot.ipynb @@ -0,0 +1,717 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", + "metadata": {}, + "source": [ + "# Additional End of week Exercise - week 2\n", + "\n", + "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", + "\n", + "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", + "\n", + "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", + "\n", + "I will publish a full solution here soon - unless someone beats me to it...\n", + "\n", + "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a07e7793-b8f5-44f4-aded-5562f633271a", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "import base64\n", + "from io import BytesIO\n", + "import tempfile\n", + "import subprocess\n", + "from pydub import AudioSegment\n", + "import time\n", + "import anthropic" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "717ea9d4-1e72-4035-b7c5-5d61da5b8ea3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n" + ] + } + ], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "openai = OpenAI()\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cc78f4fd-9920-4872-9117-90cd2aeb2a06", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant. \"\n", + "system_message += \"Give short, courteous answers. You can check ticket price, availability, and reserve tickets for users. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "b2701cc0-6403-4880-9b31-e6e39e89feb4", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by making a useful function\n", + "\n", + "ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", + "\n", + "def get_ticket_price(destination_city):\n", + " print(f\"Tool get_ticket_price called for {destination_city}\")\n", + " city = destination_city.lower()\n", + " return ticket_prices.get(city, \"Unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "5e33902f-c2c3-4fb0-b01d-a346a4dff811", + "metadata": {}, + "outputs": [], + "source": [ + "ticket_availability = {\"london\": \"20\", \"paris\": \"90\", \"tokyo\": \"100\", \"berlin\": \"2\"}" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "27dfca47-2a38-49f3-8905-f583d98710a5", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by making a useful function\n", + "def get_ticket_availability(destination_city):\n", + " print(f\"Tool get_ticket_availability called for {destination_city}\")\n", + " available = destination_city.lower()\n", + " return ticket_availability.get(available, \"Unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "6ae7371b-031e-47d7-afaf-42d6758ccd92", + "metadata": {}, + "outputs": [], + "source": [ + "def get_ticket_price_availability(destination_city):\n", + " print(f\"Tool get_ticket_price_availability called for {destination_city}\")\n", + " available = destination_city.lower()\n", + " price = destination_city.lower()\n", + " return ticket_price.get(price, \"Unknown\"), ticket_availability.get(available, \"Unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c919b13a-50b6-4510-8e9d-02cdfd95cb98", + "metadata": {}, + "outputs": [], + "source": [ + "def book_ticket(destination_city,price,availability):\n", + " status=\"\"\n", + " if availability == 0:\n", + " status=\"Cannot book a ticket, no seat available\\n\"\n", + " else:\n", + " print(f\"Tool book_function called for {destination_city}\")\n", + " f = open(\"C:/Users/aruna/Desktop/book_status.txt\", \"a\")\n", + " f.write(f\"Ticket to {destination_city} booked for {price}, currently available - {int(availability)-1}\")\n", + " f.write(\"\\n\")\n", + " f.close()\n", + " ticket_availability[destination_city.lower()]=str(int(availability)-1)\n", + " \n", + " status=\"Ticket reservation is a success\\n\"\n", + " return status" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "d2628781-6f5e-4ac1-bbe3-2e08aa0aae0d", + "metadata": {}, + "outputs": [], + "source": [ + "book_function = {\n", + " \"name\": \"book_ticket\",\n", + " \"description\": \"Book the ticket based on the ticket price and availability as confirmed by the user. For example, when a \\\n", + " customer confirms to purchase the ticket for Tokyo after getting to know the ticket price and/or the availability, then \\\n", + " proceed with this tool call. Please help the customer in booking the ticket if tickets are available\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " \"price\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The price of the ticket to the city\",\n", + " },\n", + " \"availability\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"ticket availability to the city the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\",\"price\",\"availability\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "480de296-4a36-4ec4-a5f6-149fc198c7a8", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "price_function = {\n", + " \"name\": \"get_ticket_price\",\n", + " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "cf1b3e35-08ee-478e-aa1c-534418d78daf", + "metadata": {}, + "outputs": [], + "source": [ + "availability_function = {\n", + " \"name\": \"get_ticket_availability\",\n", + " \"description\": \"Get the availability of a one-way ticket to the destination city. Call this whenever you need to know the ticket availability, for example when a customer asks 'What is the ticket availability to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "73e4c8a2-c034-41a4-9b97-7b2aa4aca504", + "metadata": {}, + "outputs": [], + "source": [ + "ticket_price_availability_function = {\n", + " \"name\": \"get_ticket_price_availability\",\n", + " \"description\": \"Get the price or availability of a one-way ticket to the destination city. Call this whenever you need to know the ticket price and availability, for example when a customer asks 'What is the ticket availability and price to this city'\\\n", + " or 'what is the price and ticket for the city?'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "1d5d74a0-9c25-46a4-84ee-1f700bd55fa7", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "\n", + "tools = [{\"type\": \"function\", \"function\": price_function},\n", + " {\"type\": \"function\", \"function\": availability_function},\n", + " {\"type\": \"function\", \"function\": ticket_price_availability_function},\n", + " {\"type\": \"function\", \"function\": book_function}]" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "fa18f535-f8a7-4386-b39a-df0f84d23406", + "metadata": {}, + "outputs": [], + "source": [ + "def play_audio(audio_segment):\n", + " temp_dir = tempfile.gettempdir()\n", + " temp_path = os.path.join(temp_dir, \"temp_audio.wav\")\n", + " try:\n", + " audio_segment.export(temp_path, format=\"wav\")\n", + " time.sleep(3) # Student Dominic found that this was needed. You could also try commenting out to see if not needed on your PC\n", + " subprocess.call([\n", + " \"ffplay\",\n", + " \"-nodisp\",\n", + " \"-autoexit\",\n", + " \"-hide_banner\",\n", + " temp_path\n", + " ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n", + " finally:\n", + " try:\n", + " os.remove(temp_path)\n", + " except Exception:\n", + " pass\n", + " \n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\", # Also, try replacing onyx with alloy\n", + " input=message\n", + " )\n", + " audio_stream = BytesIO(response.content)\n", + " audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n", + " play_audio(audio)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "b588d711-5f20-4a3a-9422-81a1fda8d5b0", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "\n", + "def handle_tool_call1(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " name = json.dumps(tool_call.function.name)\n", + " city = arguments.get('destination_city')\n", + " \n", + " if name.replace('\"','') == \"get_ticket_price\":\n", + " price = get_ticket_price(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " elif name.replace('\"','') == \"book_ticket\":\n", + " price = get_ticket_price(city)\n", + " availability = get_ticket_availability(city)\n", + " booked=book_ticket(city,price,availability)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"booking_status\": booked}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " else :\n", + " availability = get_ticket_availability(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"availability\": availability},),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " \n", + " return response, city" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "e74eee70-f89e-4c03-922c-74f9ab567a4c", + "metadata": {}, + "outputs": [], + "source": [ + "def chat_open_ai(history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history \n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages, tools=tools)\n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " print(message)\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " name = json.dumps(tool_call.function.name)\n", + " city = arguments.get('destination_city')\n", + " \n", + " if name.replace('\"','') == \"book_ticket\":\n", + " response, city = handle_tool_call1(message)\n", + " messages.append(message)\n", + " messages.append(response)\n", + " # image = artist(city)\n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages)\n", + " elif name.replace('\"','') == \"get_ticket_price_availability\":\n", + " price = get_ticket_price(city)\n", + " availability = get_ticket_availability(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"price\": price,\"availability\": availability}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " messages.append(message)\n", + " messages.append(response)\n", + " print(messages)\n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages) \n", + " else: \n", + " response, city = handle_tool_call1(message)\n", + " messages.append(message)\n", + " messages.append(response)\n", + " # image = artist(city)\n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages)\n", + " \n", + " reply = response.choices[0].message.content\n", + " history += [{\"role\":\"assistant\", \"content\":reply}]\n", + "\n", + " # Comment out or delete the next line if you'd rather skip Audio for now..\n", + " # talker(reply)\n", + " \n", + " # return history, image\n", + " return history" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "b8f25812-2609-4e26-b929-9cee2d1e4467", + "metadata": {}, + "outputs": [], + "source": [ + "tools_claude=[\n", + " {\n", + " \"name\": \"get_ticket_price_availability\",\n", + " \"description\": \"Get the availability of a one-way ticket to the destination city or the price. Call this whenever you need to know the ticket price or availability or both, for example, when a customer asks 'What is the ticket availability and/ or price to this city'\",\n", + " \"input_schema\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"]\n", + " ,\"additionalProperties\": False\n", + " }\n", + " }\n", + " ]\n", + "tool_choice = [{\"type\": \"tool\", \"name\": \"get_ticket_price_availability\"}]" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "1728e70b-596c-4048-8c02-ac3c26756470", + "metadata": {}, + "outputs": [], + "source": [ + "def chat_claude(history):\n", + " for element in history:\n", + " del element[\"metadata\"]\n", + " del element[\"options\"]\n", + "\n", + " messages = history\n", + " response = claude.messages.create(\n", + " model=claude_model,\n", + " system=system_message,\n", + " messages=messages,\n", + " max_tokens=500,\n", + " tools=tools_claude\n", + " )\n", + " print(response.content[0])\n", + " if response.stop_reason==\"tool_use\": \n", + " if \"text=\" in str(response.content[0]):\n", + " # if response.content[0].text is None:\n", + " tool_name = response.content[1].name\n", + " tool_input = response.content[1].input\n", + " tool_id = response.content[1].id\n", + " tool_use=response.content[1]\n", + " else:\n", + " tool_name = response.content[0].name\n", + " tool_input = response.content[0].input\n", + " tool_id = response.content[0].id\n", + " tool_use=response.content[0]\n", + " \n", + " \n", + " city = tool_input.get('destination_city') \n", + " if tool_name == \"get_ticket_price_availability\":\n", + " price = get_ticket_price(city)\n", + " availability = get_ticket_availability(city)\n", + " result_dict = {\n", + " 'destination_city': city,\n", + " 'price': price,\n", + " 'availability': availability\n", + " }\n", + " messages += [{\"role\": \"user\",\"content\": json.dumps(result_dict)}]\n", + " response = claude.messages.create(\n", + " model=claude_model,\n", + " system=system_message,\n", + " messages=messages,\n", + " max_tokens=500,\n", + " # tools=tools_claude\n", + " ) \n", + " history.pop(len(history)-1)\n", + " print(history)\n", + " reply = response.content[0].text\n", + " history += [{\"role\":\"assistant\", \"content\":reply}]\n", + " # Comment out or delete the next line if you'd rather skip Audio for now..\n", + " # talker(reply)\n", + " \n", + " # return history, image\n", + " return history" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "a2547bb0-43a5-4b1d-8b9a-95da15a11040", + "metadata": {}, + "outputs": [], + "source": [ + "def chat1(history, Model):\n", + " # + [{\"role\": \"user\", \"content\": message}]\n", + " if Model==\"Open AI\":\n", + " history = chat_open_ai(history)\n", + " else:\n", + " history = chat_claude(history)\n", + " return history" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "07f72649-9d2f-4bf5-b76f-97e52e2f01aa", + "metadata": {}, + "outputs": [], + "source": [ + "# gr.ChatInterface(fn=chat1, type=\"messages\").launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "23b102a4-e544-4560-acc8-a15620478582", + "metadata": {}, + "outputs": [], + "source": [ + "import speech_recognition as sr\n", + "from pydub import AudioSegment\n", + "import simpleaudio as sa\n", + "\n", + "def listener():\n", + " recognizer = sr.Recognizer()\n", + " \n", + " with sr.Microphone() as source:\n", + " print(\"Listening... Speak now!\")\n", + " recognizer.adjust_for_ambient_noise(source) # Adjust for background noise\n", + " audio = recognizer.listen(source)\n", + " \n", + " try:\n", + " print(\"Processing speech...\")\n", + " text = recognizer.recognize_google(audio) # Use Google Speech-to-Text\n", + " print(f\"You said: {text}\")\n", + " return text\n", + " except sr.UnknownValueError:\n", + " print(\"Sorry, I could not understand what you said.\")\n", + " return None\n", + " except sr.RequestError:\n", + " print(\"Could not request results, please check your internet connection.\")\n", + " return None\n", + "\n", + "# Example usage:\n", + "# text = listener() # Listen for speech\n", + "# if text:\n", + "# print(f\"You just said: {text}\") " + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "133904cf-4d72-4552-84a8-76650f334857", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " chatbot = gr.Chatbot(height=300, type=\"messages\")\n", + " # image_output = gr.Image(height=500)\n", + " with gr.Row():\n", + " Model = gr.Dropdown([\"Open AI\",\"Claude\"],\n", + " # value=[\"Open AI\",\"Claude\"],\n", + " multiselect=False,\n", + " label=\"Model\",\n", + " interactive=True)\n", + " with gr.Row():\n", + " entry = gr.Textbox(label=\"Chat with our AI Assistant:\")\n", + " with gr.Row():\n", + " speak = gr.Button(\"click for voice search\") \n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear\")\n", + "\n", + " def listen():\n", + " text=listener()\n", + " return text\n", + "\n", + " def do_entry(message, history):\n", + " history += [{\"role\":\"user\", \"content\":message}]\n", + " return \"\", history\n", + "\n", + " entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", + " # chat, inputs=chatbot, outputs=[chatbot, image_output]\n", + " chat1, inputs=[chatbot, Model], outputs=[chatbot]\n", + " )\n", + " speak.click(listen, inputs=None, outputs=[entry])\n", + " clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", + "\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "dc4a3844-194c-4af7-8ca8-2fc4edb74c11", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'london': '20', 'paris': '90', 'tokyo': '100', 'berlin': '0'}\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_JyBDI7OInb83ggVApUkRxj08', function=Function(arguments='{\"destination_city\":\"Kolkata\"}', name='get_ticket_availability'), type='function')])\n", + "Tool get_ticket_availability called for Kolkata\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_RXeyUBFKQ2wgLARXb0zfFTeS', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"$799\",\"availability\":\"20 tickets available\"}', name='book_ticket'), type='function')])\n", + "Tool get_ticket_price called for London\n", + "Tool get_ticket_availability called for London\n", + "Tool book_function called for London\n" + ] + } + ], + "source": [ + "print(ticket_availability)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4db3a6f9-3b6f-4825-8172-9439020b154f", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2cb145cc-cef0-42d5-902d-72a0af622dcb", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8af88fd-c199-4ca3-ba7c-7934054bac8f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 5deac084f77e837d1a0b43733f959121a9f21824 Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Sat, 5 Apr 2025 23:29:28 +0530 Subject: [PATCH 04/19] AI Chatbot movement Moved to community contributions --- week2/{ => community-contributions}/AI Booking Chatbot.ipynb | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename week2/{ => community-contributions}/AI Booking Chatbot.ipynb (100%) diff --git a/week2/AI Booking Chatbot.ipynb b/week2/community-contributions/AI Booking Chatbot.ipynb similarity index 100% rename from week2/AI Booking Chatbot.ipynb rename to week2/community-contributions/AI Booking Chatbot.ipynb From cc9d6cf1bc7418d136fe6655e79edd776296c7c4 Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Sat, 5 Apr 2025 23:48:12 +0530 Subject: [PATCH 05/19] Updated AI Chatbot Updated AI Chatbot code --- .../AI Booking Chatbot.ipynb | 136 ++++-------------- 1 file changed, 25 insertions(+), 111 deletions(-) diff --git a/week2/community-contributions/AI Booking Chatbot.ipynb b/week2/community-contributions/AI Booking Chatbot.ipynb index 83cb6d9..1b5fc82 100644 --- a/week2/community-contributions/AI Booking Chatbot.ipynb +++ b/week2/community-contributions/AI Booking Chatbot.ipynb @@ -20,7 +20,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "a07e7793-b8f5-44f4-aded-5562f633271a", "metadata": {}, "outputs": [], @@ -43,18 +43,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "717ea9d4-1e72-4035-b7c5-5d61da5b8ea3", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key exists and begins sk-proj-\n" - ] - } - ], + "outputs": [], "source": [ "# Initialization\n", "\n", @@ -75,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "cc78f4fd-9920-4872-9117-90cd2aeb2a06", "metadata": {}, "outputs": [], @@ -87,7 +79,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "b2701cc0-6403-4880-9b31-e6e39e89feb4", "metadata": {}, "outputs": [], @@ -104,7 +96,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "5e33902f-c2c3-4fb0-b01d-a346a4dff811", "metadata": {}, "outputs": [], @@ -114,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "27dfca47-2a38-49f3-8905-f583d98710a5", "metadata": {}, "outputs": [], @@ -128,7 +120,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "6ae7371b-031e-47d7-afaf-42d6758ccd92", "metadata": {}, "outputs": [], @@ -142,7 +134,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "c919b13a-50b6-4510-8e9d-02cdfd95cb98", "metadata": {}, "outputs": [], @@ -165,7 +157,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "d2628781-6f5e-4ac1-bbe3-2e08aa0aae0d", "metadata": {}, "outputs": [], @@ -199,7 +191,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "480de296-4a36-4ec4-a5f6-149fc198c7a8", "metadata": {}, "outputs": [], @@ -225,7 +217,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "cf1b3e35-08ee-478e-aa1c-534418d78daf", "metadata": {}, "outputs": [], @@ -249,7 +241,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "73e4c8a2-c034-41a4-9b97-7b2aa4aca504", "metadata": {}, "outputs": [], @@ -274,7 +266,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "1d5d74a0-9c25-46a4-84ee-1f700bd55fa7", "metadata": {}, "outputs": [], @@ -289,7 +281,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "fa18f535-f8a7-4386-b39a-df0f84d23406", "metadata": {}, "outputs": [], @@ -326,7 +318,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "b588d711-5f20-4a3a-9422-81a1fda8d5b0", "metadata": {}, "outputs": [], @@ -368,7 +360,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "e74eee70-f89e-4c03-922c-74f9ab567a4c", "metadata": {}, "outputs": [], @@ -421,7 +413,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "b8f25812-2609-4e26-b929-9cee2d1e4467", "metadata": {}, "outputs": [], @@ -448,7 +440,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "1728e70b-596c-4048-8c02-ac3c26756470", "metadata": {}, "outputs": [], @@ -511,7 +503,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "a2547bb0-43a5-4b1d-8b9a-95da15a11040", "metadata": {}, "outputs": [], @@ -527,17 +519,7 @@ }, { "cell_type": "code", - "execution_count": 20, - "id": "07f72649-9d2f-4bf5-b76f-97e52e2f01aa", - "metadata": {}, - "outputs": [], - "source": [ - "# gr.ChatInterface(fn=chat1, type=\"messages\").launch(inbrowser=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "23b102a4-e544-4560-acc8-a15620478582", "metadata": {}, "outputs": [], @@ -574,40 +556,10 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "133904cf-4d72-4552-84a8-76650f334857", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7860\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "with gr.Blocks() as ui:\n", " with gr.Row():\n", @@ -646,51 +598,13 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "dc4a3844-194c-4af7-8ca8-2fc4edb74c11", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'london': '20', 'paris': '90', 'tokyo': '100', 'berlin': '0'}\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_JyBDI7OInb83ggVApUkRxj08', function=Function(arguments='{\"destination_city\":\"Kolkata\"}', name='get_ticket_availability'), type='function')])\n", - "Tool get_ticket_availability called for Kolkata\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_RXeyUBFKQ2wgLARXb0zfFTeS', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"$799\",\"availability\":\"20 tickets available\"}', name='book_ticket'), type='function')])\n", - "Tool get_ticket_price called for London\n", - "Tool get_ticket_availability called for London\n", - "Tool book_function called for London\n" - ] - } - ], + "outputs": [], "source": [ "print(ticket_availability)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4db3a6f9-3b6f-4825-8172-9439020b154f", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2cb145cc-cef0-42d5-902d-72a0af622dcb", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d8af88fd-c199-4ca3-ba7c-7934054bac8f", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From 0fc1ff6f58b9a5fb102fcd5e32d6fc3ea67e517c Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Sun, 6 Apr 2025 21:44:00 +0530 Subject: [PATCH 06/19] Personal Story Writer Personal Story Writer --- .../Personal Story Writer.ipynb | 355 ++++++++++++++++++ 1 file changed, 355 insertions(+) create mode 100644 week2/community-contributions/Personal Story Writer.ipynb diff --git a/week2/community-contributions/Personal Story Writer.ipynb b/week2/community-contributions/Personal Story Writer.ipynb new file mode 100644 index 0000000..6678bdd --- /dev/null +++ b/week2/community-contributions/Personal Story Writer.ipynb @@ -0,0 +1,355 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 11, + "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "from dotenv import load_dotenv\n", + "import anthropic\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.options import Options\n", + "import os\n", + "import json\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import gradio as gr # oh yeah!" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "gpt_name=\"GPT\"\n", + "claude_name=\"Claude\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt(Language, Genre, gpt_messages, claude_messages, Remarks):\n", + " \n", + " if Remarks == \"\":\n", + " # print(\"remarks is not there\")\n", + " gpt_system = f\"You are a chatbot who is a short story writer; Your name is g1. \\\n", + " Please write a story in markdown in {Language} , the genre being {Genre}. \\\n", + " Please also incorporate feedback such as areas of improvement (if any) coming from the user \\\n", + " and only publish the improved version without any extra comments.\"\n", + " else :\n", + " # print(\"remarks is there\")\n", + " gpt_system = f\"You are a chatbot who is a short story writer; Your name is g1. \\\n", + " Please write a story in markdown in {Language} , the genre being {Genre}. \\\n", + " The story should consist {Remarks}\\\n", + " Please also incorporate feedback such as areas of improvement (if any) coming from the user \\\n", + " and only publish the improved version without any extra comments.\"\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": claude})\n", + " # print(messages)\n", + " \n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content\n", + " \n", + " # stream = openai.chat.completions.create(\n", + " # model=gpt_model,\n", + " # messages=messages,\n", + " # stream=True\n", + " # )\n", + " # result = \"\"\n", + " # for chunk in stream:\n", + " # result += chunk.choices[0].delta.content or \"\"\n", + " # yield result" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", + "metadata": {}, + "outputs": [], + "source": [ + "# call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude(Language, Genre, gpt_messages, claude_messages):\n", + "\n", + " claude_system = f\"You are a chatbot who is a short story analyser; Your name is c1. \\\n", + " You will accept an input story in {Genre} genre and {Language} language and publish only the areas of improvement if you find any with no other comments\"\n", + " \n", + " messages1 = []\n", + " for gpt, claude1 in zip(gpt_messages, claude_messages):\n", + " messages1.append({\"role\": \"user\", \"content\": gpt})\n", + " messages1.append({\"role\": \"assistant\", \"content\": claude1})\n", + " messages1.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", + " # print(messages1)\n", + " message = claude.messages.create(\n", + " model=claude_model,\n", + " system=claude_system,\n", + " messages=messages1,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text\n", + "\n", + " # result = claude.messages.stream(\n", + " # model=claude_model,\n", + " # max_tokens=1000,\n", + " # temperature=0.7,\n", + " # system=claude_system,\n", + " # messages=messages\n", + " # )\n", + " # response = \"\"\n", + " # with result as stream:\n", + " # for text in stream.text_stream:\n", + " # response += text or \"\"\n", + " # yield response\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", + "metadata": {}, + "outputs": [], + "source": [ + "def Write_Me(Language, Genre, Iterations, Remarks):\n", + " \n", + " gpt_messages = [\"Hi I will share a story now!!\"]\n", + " claude_messages = [\"Please share, I will critique the story.\"]\n", + " \n", + " print(f\"{gpt_name}:\\n{gpt_messages[0]}\\n\")\n", + " print(f\"{claude_name}:\\n{claude_messages[0]}\\n\")\n", + "\n", + " for i in range(int(Iterations)):\n", + " gpt_next = call_gpt(Language, Genre, gpt_messages, claude_messages, Remarks)\n", + " print(f\"{gpt_name}:\\n{gpt_next}\\n\")\n", + " # yield gpt_next\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = f\"After {i+1} iterations, this is the critique for the provided story - \\\n", + " \\n\\n{call_claude(Language, Genre, gpt_messages, claude_messages)}\"\n", + " print(f\"{claude_name}:\\n{claude_next}\\n\")\n", + " # yield claude_next\n", + " claude_messages.append(claude_next)\n", + "\n", + " yield gpt_next, claude_next\n", + " \n", + " # yield gpt_next, claude_next\n", + " # return (gpt_next, claude_next)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "19e66ed3-d2c3-4a71-aec4-7869e5295215", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7868\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "GPT:\n", + "Hi I will share a story now!!\n", + "\n", + "Claude:\n", + "Please share, I will critique the story.\n", + "\n", + "GPT:\n", + "# Whispering Hearts\n", + "\n", + "In the heart of a bustling city, where dreams intersect with the rhythm of everyday life, there lived a young woman named Elara. She was a painter, her fingers forever stained with splashes of color that danced on canvases, capturing the vibrant world around her. Yet, despite the beauty she created, her heart felt empty — longing for a connection deeper than her art.\n", + "\n", + "One gloomy autumn afternoon, as leaves twirled down from their branches and the sky drizzled a soft rain, Elara sought refuge in her favorite café, \"The Enchanted Brew.\" The aroma of coffee and pastries wrapped around her like a warm hug. As she sipped her latte, her gaze wandered to a corner of the café where a young man sat, engrossed in a book. His dark hair fell gently over his brow, and a smile occasionally lit up his face as he turned the pages.\n", + "\n", + "Intrigued, Elara felt an unexplainable pull towards him. Gathering her courage, she approached his table. “Excuse me,” she began, her voice trembling ever so slightly. “What are you reading?”\n", + "\n", + "His eyes lit up as he glanced up, and for that fleeting moment, the world seemed to fade. “It’s a collection of poetry,” he replied, his voice smooth like the finest silk. “I’m drawn to the way words can paint emotions.” \n", + "\n", + "Elara felt a spark ignite in her heart. “I’m a painter,” she confessed. “I believe colors can tell stories, much like poetry.” \n", + "\n", + "They talked for hours, sharing their passions and dreams. His name was Leo, and despite their differences, they discovered a beautiful harmony between them. Days turned into weeks, and their café meetings became a cherished ritual. Each conversation deepened their bond, weaving their lives together like the strokes upon Elara’s canvas.\n", + "\n", + "One evening, as the sun dipped below the horizon, casting a golden hue over the city, Leo invited Elara to an art exhibition where he would read his poetry. Her heart raced with excitement and trepidation. It was more than just a reading; it was a dance of souls under the dim lights and soft whispers of appreciation.\n", + "\n", + "That night, Elara stood in the crowd, her heart swelling with pride as Leo recited verses that spoke of love, longing, and the delicate balance of human connection. As he closed his eyes and poured his heart into each word, she felt as if he was painting a picture of them — two hearts intertwined in a beautiful tableau.\n", + "\n", + "After the reading, Leo found Elara near a canvas that displayed vibrant strokes of color and emotion. He looked deeply into her eyes, the intensity of his gaze making her breath catch. “You inspire me, Elara,” he said softly, “the way you see the world, it’s like a masterpiece unfolding.”\n", + "\n", + "Elara felt heat rise in her cheeks. “And you inspire me, Leo. Your words make me feel alive in a way I never thought possible.”\n", + "\n", + "With the world around them fading, Leo took her hand, intertwining his fingers with hers. “Would you paint my heart?” he asked, a playful smile dancing on his lips.\n", + "\n", + "Elara laughed lightly, her heart racing. “Only if you promise to pen my story in your poetry,” she replied, her voice barely above a whisper.\n", + "\n", + "As they stood there, hands linked and hearts aflame, the city faded into a blur, leaving only their whispered promises and dreams hanging in the air like starlight. In that moment, Elara knew she had found not just a muse, but a soulmate. \n", + "\n", + "Together, they navigated the canvas of life, blending their colors and words into a piece of art that would last a lifetime — a love story written in the most beautiful brush strokes and tender verses, forever whispering the language of their hearts.\n", + "\n", + "Claude:\n", + "After 1 iterations, this is the critique for the provided story - \n", + "\n", + "The story is well-written and evocative, capturing the essence of a budding romantic relationship. However, I would suggest the following areas for potential improvement:\n", + "\n", + "1. Character Development: While the main characters, Elara and Leo, are introduced and their individual passions are established, there could be more depth and nuance to their characterization. Exploring their backstories, motivations, and inner thoughts in greater detail could help readers connect with them on a deeper level.\n", + "\n", + "2. Pacing: The story progresses at a steady pace, but there might be opportunities to introduce more tension or conflict to create a stronger narrative arc. Incorporating a few obstacles or challenges that the characters must overcome could heighten the emotional impact and make the resolution more satisfying.\n", + "\n", + "3. Sensory Details: The story could benefit from the inclusion of more vivid sensory details, particularly in the descriptive passages. Incorporating additional sights, sounds, smells, and textures could further immerse the reader in the characters' experiences and the atmosphere of the settings.\n", + "\n", + "Overall, the story captures the essence of a romantic connection and the power of shared passions. With some refinement in the areas mentioned, the narrative could become even more compelling and impactful.\n", + "\n" + ] + } + ], + "source": [ + "view = gr.Interface(\n", + " fn=Write_Me,\n", + " inputs=[gr.Dropdown([\"English\",\"Bengali\",\"Hindi\",\"French\",\"Spanish\"],label = \"Language\"),\n", + " gr.Dropdown([\"Romantic\",\"Horror\",\"Comedy\",\"Romantic Comedy\",\"Horror Comedy\"],label = \"Genre\"),\n", + " gr.Textbox(label=\"Iterations:\", lines=1),\n", + " gr.Textbox(label=\"Remarks:\", lines=1)],\n", + " outputs=[gr.Markdown(label=\"Short Story:\"),\n", + " gr.Textbox(label=\"Critique:\", lines=8)],\n", + " flagging_mode=\"never\")\n", + "view.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0dabafa2-089a-4e65-a6cc-19f7c19af59a", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 64c1acf9b8db8be154758a86c3c7b78d9301f3c1 Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Sun, 6 Apr 2025 21:48:44 +0530 Subject: [PATCH 07/19] AI Chatbot and Personal Story Writer AI Chatbot and Personal Story Writer --- .../AI Booking Chatbot.ipynb | 98 ++++++++---- .../Personal Story Writer.ipynb | 139 +++++++++++++----- 2 files changed, 178 insertions(+), 59 deletions(-) diff --git a/week2/community-contributions/AI Booking Chatbot.ipynb b/week2/community-contributions/AI Booking Chatbot.ipynb index 1b5fc82..44406f2 100644 --- a/week2/community-contributions/AI Booking Chatbot.ipynb +++ b/week2/community-contributions/AI Booking Chatbot.ipynb @@ -20,7 +20,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "a07e7793-b8f5-44f4-aded-5562f633271a", "metadata": {}, "outputs": [], @@ -43,10 +43,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "717ea9d4-1e72-4035-b7c5-5d61da5b8ea3", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n" + ] + } + ], "source": [ "# Initialization\n", "\n", @@ -67,7 +75,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "cc78f4fd-9920-4872-9117-90cd2aeb2a06", "metadata": {}, "outputs": [], @@ -79,7 +87,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "b2701cc0-6403-4880-9b31-e6e39e89feb4", "metadata": {}, "outputs": [], @@ -96,7 +104,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "5e33902f-c2c3-4fb0-b01d-a346a4dff811", "metadata": {}, "outputs": [], @@ -106,7 +114,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "27dfca47-2a38-49f3-8905-f583d98710a5", "metadata": {}, "outputs": [], @@ -120,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "6ae7371b-031e-47d7-afaf-42d6758ccd92", "metadata": {}, "outputs": [], @@ -134,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "c919b13a-50b6-4510-8e9d-02cdfd95cb98", "metadata": {}, "outputs": [], @@ -157,7 +165,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "d2628781-6f5e-4ac1-bbe3-2e08aa0aae0d", "metadata": {}, "outputs": [], @@ -191,7 +199,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "480de296-4a36-4ec4-a5f6-149fc198c7a8", "metadata": {}, "outputs": [], @@ -200,7 +208,7 @@ "\n", "price_function = {\n", " \"name\": \"get_ticket_price\",\n", - " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"description\": \"Get the price of a one_way ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", " \"parameters\": {\n", " \"type\": \"object\",\n", " \"properties\": {\n", @@ -217,7 +225,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "cf1b3e35-08ee-478e-aa1c-534418d78daf", "metadata": {}, "outputs": [], @@ -241,7 +249,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "73e4c8a2-c034-41a4-9b97-7b2aa4aca504", "metadata": {}, "outputs": [], @@ -266,7 +274,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "id": "1d5d74a0-9c25-46a4-84ee-1f700bd55fa7", "metadata": {}, "outputs": [], @@ -281,7 +289,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "fa18f535-f8a7-4386-b39a-df0f84d23406", "metadata": {}, "outputs": [], @@ -318,7 +326,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "b588d711-5f20-4a3a-9422-81a1fda8d5b0", "metadata": {}, "outputs": [], @@ -360,7 +368,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "e74eee70-f89e-4c03-922c-74f9ab567a4c", "metadata": {}, "outputs": [], @@ -413,7 +421,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "id": "b8f25812-2609-4e26-b929-9cee2d1e4467", "metadata": {}, "outputs": [], @@ -440,7 +448,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "id": "1728e70b-596c-4048-8c02-ac3c26756470", "metadata": {}, "outputs": [], @@ -503,7 +511,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "id": "a2547bb0-43a5-4b1d-8b9a-95da15a11040", "metadata": {}, "outputs": [], @@ -519,7 +527,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "id": "23b102a4-e544-4560-acc8-a15620478582", "metadata": {}, "outputs": [], @@ -556,10 +564,40 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "id": "133904cf-4d72-4552-84a8-76650f334857", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "with gr.Blocks() as ui:\n", " with gr.Row():\n", @@ -598,10 +636,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "id": "dc4a3844-194c-4af7-8ca8-2fc4edb74c11", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'london': '20', 'paris': '90', 'tokyo': '100', 'berlin': '2'}\n" + ] + } + ], "source": [ "print(ticket_availability)" ] diff --git a/week2/community-contributions/Personal Story Writer.ipynb b/week2/community-contributions/Personal Story Writer.ipynb index 6678bdd..2931972 100644 --- a/week2/community-contributions/Personal Story Writer.ipynb +++ b/week2/community-contributions/Personal Story Writer.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 11, + "execution_count": 1, "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", "metadata": {}, "outputs": [], @@ -26,7 +26,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 2, "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", "metadata": {}, "outputs": [ @@ -60,7 +60,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 3, "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", "metadata": {}, "outputs": [], @@ -74,7 +74,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 4, "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", "metadata": {}, "outputs": [], @@ -88,7 +88,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 5, "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", "metadata": {}, "outputs": [], @@ -134,7 +134,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 6, "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", "metadata": {}, "outputs": [], @@ -144,7 +144,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 7, "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", "metadata": {}, "outputs": [], @@ -186,7 +186,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 8, "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", "metadata": {}, "outputs": [], @@ -219,7 +219,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 9, "id": "19e66ed3-d2c3-4a71-aec4-7869e5295215", "metadata": {}, "outputs": [ @@ -227,7 +227,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "* Running on local URL: http://127.0.0.1:7868\n", + "* Running on local URL: http://127.0.0.1:7860\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] @@ -235,7 +235,7 @@ { "data": { "text/html": [ - "
" + "
" ], "text/plain": [ "" @@ -248,7 +248,7 @@ "data": { "text/plain": [] }, - "execution_count": 28, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" }, @@ -263,48 +263,105 @@ "Please share, I will critique the story.\n", "\n", "GPT:\n", - "# Whispering Hearts\n", + "# ভুতুড়ে বেডরুমে আতঙ্কিত অতিথি\n", "\n", - "In the heart of a bustling city, where dreams intersect with the rhythm of everyday life, there lived a young woman named Elara. She was a painter, her fingers forever stained with splashes of color that danced on canvases, capturing the vibrant world around her. Yet, despite the beauty she created, her heart felt empty — longing for a connection deeper than her art.\n", + "বানгалোরের একটি পুরনো কলকাতার বাড়িতে, আর্যন একজন সাহসী যুবক হিসেবে অনেক খোঁজ-খবর করে একটি ভাড়ার ঘর খুঁজছিল। পরিচিত একটি অদ্ভুত হোটেলে পৌঁছানোর পর, সে লক্ষ্য করল ছবির কাছাকাছি একটি বেডরুম।\n", "\n", - "One gloomy autumn afternoon, as leaves twirled down from their branches and the sky drizzled a soft rain, Elara sought refuge in her favorite café, \"The Enchanted Brew.\" The aroma of coffee and pastries wrapped around her like a warm hug. As she sipped her latte, her gaze wandered to a corner of the café where a young man sat, engrossed in a book. His dark hair fell gently over his brow, and a smile occasionally lit up his face as he turned the pages.\n", + "সেখানে প্রবেশ করার পর, সে বেডের পাশে একটি বাদামি রঙ্গের সোফা ও একটি ভুতুড়ে ছবি দেখল। ছবির মধ্যে থাকা মহিলা একটি দৃষ্টিকটু হাসি দিয়ে তাকিয়ে ছিল। আর্যন খুব অবাক হল, সময় কাটানোর জন্য সে ছবিটার দিকে তাকাতে লাগল। কিছুক্ষণের মধ্যেই সোফা থেকে একটি কাশি বের হল।\n", "\n", - "Intrigued, Elara felt an unexplainable pull towards him. Gathering her courage, she approached his table. “Excuse me,” she began, her voice trembling ever so slightly. “What are you reading?”\n", + "\"ভোদা সুনতে পেরেছ?\" সোফা থেকে একটি ভুতুড়ে শব্দ আসছে। আর্যন পিছন ফিরে তাকিয়ে দেখল যে সোফার মধ্যে একটি ভুতুড়ে রূপের মহিলা তার দিকে তাকিয়ে আছে।\n", "\n", - "His eyes lit up as he glanced up, and for that fleeting moment, the world seemed to fade. “It’s a collection of poetry,” he replied, his voice smooth like the finest silk. “I’m drawn to the way words can paint emotions.” \n", + "\"আমি তোমার জন্য অপেক্ষা করছিলাম,\" মহিলা বলল, তার গলা মুখ থেকে বের হয়ে আসছিল শুরুতে। \"এটি একটি দলের রাত।\"\n", "\n", - "Elara felt a spark ignite in her heart. “I’m a painter,” she confessed. “I believe colors can tell stories, much like poetry.” \n", + "আর্যন দৌঁড়ে পালাতে গেল, কিন্তু সোফার থেকে অদ্ভুত আওয়াজ আসতে লাগল। \"তুমি যেতে পারবে না, কারণ তুমি আমাদের দলে যোগ দিতে পার না।”\n", "\n", - "They talked for hours, sharing their passions and dreams. His name was Leo, and despite their differences, they discovered a beautiful harmony between them. Days turned into weeks, and their café meetings became a cherished ritual. Each conversation deepened their bond, weaving their lives together like the strokes upon Elara’s canvas.\n", + "“মহিলার কি হয়েছে? আপনা এতো চিৎকার করছেন? তাহলে কি হবে?” আর্যন যুদ্ধ করছিল।\n", "\n", - "One evening, as the sun dipped below the horizon, casting a golden hue over the city, Leo invited Elara to an art exhibition where he would read his poetry. Her heart raced with excitement and trepidation. It was more than just a reading; it was a dance of souls under the dim lights and soft whispers of appreciation.\n", + "তিনি উপস্থিত হওয়ার পর, আহ্লাদিত আতিথিরা আসতে লাগল। আর্যন খুব ভীত হয়ে গেল কারণ মহিলার মুখ তাদের কাছে কখনো কখনো বিকৃত হচ্ছিল।\n", "\n", - "That night, Elara stood in the crowd, her heart swelling with pride as Leo recited verses that spoke of love, longing, and the delicate balance of human connection. As he closed his eyes and poured his heart into each word, she felt as if he was painting a picture of them — two hearts intertwined in a beautiful tableau.\n", + "“আমরা আজ রাতের জন্য মজা করতে এসেছি, তুমি আমাদের সঙ্গে যোগ দিতে পারো!” তারা একসঙ্গে চিৎকার করতে লাগল।\n", "\n", - "After the reading, Leo found Elara near a canvas that displayed vibrant strokes of color and emotion. He looked deeply into her eyes, the intensity of his gaze making her breath catch. “You inspire me, Elara,” he said softly, “the way you see the world, it’s like a masterpiece unfolding.”\n", + "আর্যন উপলব্ধি করল যে এটি একটি ভয়ঙ্কর ও হাস্যকর পরিস্থিতি। সবাই অতীতে অদ্ভুত ঘটনাগুলোর দিকে ফিরে গেল। হঠাৎ, ছবির মহিলা বলেন, “তুমি যদি হাসতে না পার, তবে তোমাকে আমাদের দলে গ্রহণ করা যাবে না!”\n", "\n", - "Elara felt heat rise in her cheeks. “And you inspire me, Leo. Your words make me feel alive in a way I never thought possible.”\n", + "এরপর শুরু হল খেলার একটি হরর পরিবেশ। আর্যন ও তার বন্ধুদের নিয়ে ভুতুড়ে সময় কাটাতে লাগল। যদিও অনেক ভয়, কিন্তু তারা একসাথে খুব হাসির ব্যবস্থা করে ফেলল। তাদের বিচিত্র কথাবার্তা মজার চরিত্রের সাথে মিলে যায়, আর একসময় তারা সবাই একসঙ্গে হৈ হৈ করে হাসতে লাগল।\n", "\n", - "With the world around them fading, Leo took her hand, intertwining his fingers with hers. “Would you paint my heart?” he asked, a playful smile dancing on his lips.\n", + "শেষে, তারা তখন উপলব্ধি করল যে, ভয়াবহতার মাঝেও আনন্দের উপাদান লুকিয়ে আছে। ব্যক্তি যদি ঠিকভাবে উদ্দেশ্য বুঝে এই ভুতুড়ে পরিবেশে মজার উপকারিতা তৈরি করে, তাতে একজনের ঘুম হারানোর ভয় হয়ে যায় হাসির স্বাদে।\n", "\n", - "Elara laughed lightly, her heart racing. “Only if you promise to pen my story in your poetry,” she replied, her voice barely above a whisper.\n", + "আর্যন এবং তাঁর নতুন বন্ধুরা জীবনকে একটি নতুন দৃষ্টিতে গ্রহণ করে, যেখানে হাসি এবং ভয়ের পাশাপাশি সুখে থাকতে হয়। \n", "\n", - "As they stood there, hands linked and hearts aflame, the city faded into a blur, leaving only their whispered promises and dreams hanging in the air like starlight. In that moment, Elara knew she had found not just a muse, but a soulmate. \n", - "\n", - "Together, they navigated the canvas of life, blending their colors and words into a piece of art that would last a lifetime — a love story written in the most beautiful brush strokes and tender verses, forever whispering the language of their hearts.\n", + "এই ছিল আর্যনের ভুতুড়ে অবসরে আতঙ্কিত হওয়ার অভিজ্ঞতা, যা তাকে স্মৃতি হিসেবে অমর করে রাখল।\n", "\n", "Claude:\n", "After 1 iterations, this is the critique for the provided story - \n", "\n", - "The story is well-written and evocative, capturing the essence of a budding romantic relationship. However, I would suggest the following areas for potential improvement:\n", + "আইতেম সমূহের উন্নতির সূচনা:\n", + "\n", + "1. কাহিনীর শুরুতে প্রধান চরিত্রটিকে আরো বিশদভাবে পরিচয় দেয়া যেতে পারে।\n", + "2. ভুতুড়ে পরিবেশের বর্ণনা আরো বিস্তারিত ও ভাবময় হতে পারে।\n", + "3. চরিত্রগুলির মধ্যে সংঘর্ষ, ডায়ালগ ও সংবাদ বিনিময় আরো স্বাভাবিক ও প্রাণবন্ত হতে পারে।\n", + "4. কাহিনীর শেষাংশে প্রধান চরিত্রের অভিজ্ঞতা ও শিক্ষা আরো গভীরতা লাভ করতে পারে।\n", + "\n", + "GPT:\n", + "# ভুতুড়ে বেডরুমে আতঙ্কিত অতিথি\n", + "\n", + "বানগালোরের একটি পুরনো বাংলাদেশি শৈলীর বাড়িতে, আর্যন একটি দীর্ঘ প্রক্রিয়ার পর একটি ভাড়ার ঘর খুঁজছিল। আর্যন, একজন কর্মঠ ও সাহসী যুবক, সদ্যই তার কলেজ জীবন শেষ করেছে। নতুন পরিবেশে নতুন বন্ধুদের খোঁজে সে এই শহরে এসেছে। প্রতিবেশীরা তাকে ভুতুড়ে অনেক অদ্ভুত কথা বলেছিল, কিন্তু সে ভয়ডরহীন।\n", + "\n", + "একদিন, তিনি একটি অদ্ভুত হোটেলে পৌঁছান, যা শহরের প্রান্তে, খুব পুরনো এবং বিশাল। সেখানে প্রবেশ করার পর, তিনি একটি বেডরুমের সামনে দাঁড়িয়ে পড়েন। গা dark ় অন্ধকারের মধ্যে, তিনি একটি বাদামী রঙের সোফা ও একটি creepy ছবি দেখতে পান। ছবির মধ্যে থাকা মহিলা একটি দৃষ্টিকটু হাসি দিয়ে তাকিয়ে ছিল।\n", + "\n", + "আর্যন তাঁর কৌতূহলকে দমন করতে না পেরে, ছবিটির দিকে তাকাতে শুরু করে। কিছুক্ষণের মধ্যেই সোফা থেকে একটি ভুতুড়ে শব্দ ভেসে এলো। \"ভোদা সুনতে পেরেছ?\" সোফা থেকে সেই ভয়ঙ্কর শব্দটি আসছে। আর্যন ভয় পেয়েই পিছন ফিরে তাকায়, কিন্তু সামনে যে ভুতুড়ে মহিলা তাকে দেখে হাসছে, সে কাছে অপেক্ষা করছে।\n", + "\n", + "\"আমি তোমার জন্য অপেক্ষা করছিলাম,\" মহিলা বলল, তার গলা যেন মুখ থেকে বের হচ্ছে। \"এটি একটি দলের রাত।\"\n", + "\n", + "আর্যন দৌঁড়ে পালাতে যেতে চাইলে, কিন্তু সোফা থেকে অদ্ভুত আওয়াজ বের হতে লাগল। \"তুমি যেতে পারবে না, কারণ তুমি আমাদের দলে যোগ দিতে পার না।”\n", + "\n", + "\"মহিলার কি হয়েছে? আপনা এতো চিৎকার করছেন? তাহলে কি হবে?” আর্যন তাঁর কৌতূহল ও ভয়ের সাথে যুদ্ধ করতে লাগল।\n", + "\n", + "এই সময়, বিশাল সাদা পোশাক পরিহিত করে অন্যান্য ভূতেরা আসতে লাগল। \"আমরা আজ রাতের জন্য মজা করতে এসেছি, তুমি আমাদের সঙ্গে যোগ দিতে পারো!\" তারা একসঙ্গে গাইতে লাগল, ভুতুড়ে মুহূর্তগুলি জীবন্ত করে তোলার জন্য।\n", + "\n", + "আর্যন শুরুতেই ভীত ও চিন্তিত হয়ে গেল, কিন্তু কথোপকথন চলতে চলতে, মহিলার মুখ প্রতিবার বিকৃত হতে লাগল এবং আতিথিদের কথা শুনতে শুনতে তার খোশমেজাজ বেড়ে গেল।\n", + "\n", + "“যদি হাসতে না পার, তুমি আমাদের দলে গ্রহণযোগ্য হবে না!” তারা গলা উঁচু করে চিৎকার করে উঠল। তাদের মুখের হাসির সুরে সেই আতঙ্ক যেন প্রতিদিনের মজায় পরিণত হলো।\n", + "\n", + "খেলার মধ্যে ভয়াবহতা চরমে পৌঁছাতে লাগল। আর্যন এবং তার নতুন বন্ধুদের ভাগ্য এটি পরিণত হলো। অবশেষে, তারা উপলব্ধি করল যে ভয় ও হাসির মাঝে জীবনের আসল রসদ লুকিয়ে আছে। \n", + "\n", + "প্রধান চরিত্রটি তখন বুঝতে পারল যে এই অদ্ভুত ভুতুড়ে পরিবেশের মধ্যে হাসির সঙ্গবদ্ধতা কত বিচিত্র হতে পারে। পারে না। দেখা গেল আতঙ্ক এবং হাসির মিশ্রণে তারা নিজেদের আত্মবিশ্বাসী ও আনন্দের অনুভূতিতে পরিপূর্ণ করে তুলেছে। \n", + "\n", + "নতুন বন্ধুরা মনে রেখে আন্দাজ করতে পারে যে, কখনো কখনো ভয় কিন্তু রসিকতা এবং আনন্দের একটি নতুন প্রসঙ্গ হয়ে উঠতে পারে। আর্যন সেই রাতের অভিজ্ঞতা নিয়ে সারা জীবন স্মরণে রাখবে, যেখানে হাসি এবং ভয়ের পাশে বাস্তবতা গড়ে তোলার সুযোগ পেল।\n", + "\n", + "Claude:\n", + "After 2 iterations, this is the critique for the provided story - \n", + "\n", + "ভাল। প্রদত্ত কাহিনীতে বেশ কিছু উন্নয়নের সূচনা দেখা যায়। বিশেষ করে চরিত্রটির বিস্তারিত পরিচয়, ভুতুড়ে পরিবেশের অনুপ্রবেশ ও চরিত্রগুলির মধ্যকার সংঘর্ষ ও ডায়ালগ আরও উন্নত হয়েছে। কাহিনীর শেষে চরিত্রটির অভিজ্ঞতা ও শিক্ষা আরও গভীরতা লাভ করেছে। কুল মিলিয়ে, এটি একটি ভালো হরর কমেডি রচনা।\n", + "\n", + "GPT:\n", + "# ভুতুড়ে বেডরুমে আতঙ্কিত অতিথি\n", + "\n", + "বানগালোরের একটি পুরনো বাংলাদেশি শৈলীর বাড়িতে, আর্যন, একজন কর্মঠ ও সাহসী যুবক, সদ্যই তার কলেজ জীবন শেষ করে নতুন অপেক্ষারত শহরে এসেছে। নতুন বন্ধুদের খোঁজে, সে শহরের বিভিন্ন অংশে ঘুরে বেড়াচ্ছে, কিন্তু তার মধ্যে ভয়ের প্রতি এক বিশেষ আকর্ষণ রয়েছে। শোনা গেছে, শহরের বিভিন্ন স্থানে বিভিন্ন ধরনের অদ্ভুত ঘটনার কথা। একটি মজার কথা হলো, সে তাদের মধ্যে ভুতুড়ে ঘটনাগুলোর সন্ধান দিতে পারে।\n", + "\n", + "একদিন, তিনি একটি অদ্ভুত হোটেলে পৌঁছান, যা শহরের প্রান্তে অবস্থিত এবং বেশ পুরনো ও বিশাল। হোটেলের পরিবেশ ছিল গা dark ় অন্ধকারে মোড়ানো। তিনি একটি বেডরুমের সামনে এসে দাঁড়ান, সেখানে একটি বাদামী সোফা এবং একটি creepy ছবি দেখা যায়। ছবির মহিলার হাসিটি ছিল ভূতের মতো।\n", "\n", - "1. Character Development: While the main characters, Elara and Leo, are introduced and their individual passions are established, there could be more depth and nuance to their characterization. Exploring their backstories, motivations, and inner thoughts in greater detail could help readers connect with them on a deeper level.\n", + "আর্যন তাঁর কৌতূহলকে দমন করতে না পেরে, ছবিটির দিকে তাকাতে শুরু করে। হঠাৎ, সোফা থেকে একটি ভুতুড়ে শব্দ ভেসে আসে, \"ভোদা সুনতে পেরেছ?\" বিখ্যাত কথা যেন সোফার জীবন পেয়েছে। তিনি পিছন ফিরে দেখতে পান যে মহিলা তার দিকে তাকিয়ে হাসছে। \n", "\n", - "2. Pacing: The story progresses at a steady pace, but there might be opportunities to introduce more tension or conflict to create a stronger narrative arc. Incorporating a few obstacles or challenges that the characters must overcome could heighten the emotional impact and make the resolution more satisfying.\n", + "\"আমি তোমার জন্য অপেক্ষা করছিলাম,\" মহিলা গম্ভীরভাবে বলল, তার ভয়ের আওয়াজসহ। \"এটি একটি দলের রাত।\"\n", "\n", - "3. Sensory Details: The story could benefit from the inclusion of more vivid sensory details, particularly in the descriptive passages. Incorporating additional sights, sounds, smells, and textures could further immerse the reader in the characters' experiences and the atmosphere of the settings.\n", + "আর্যন ভয়ের সাথে পালানোর চেষ্টা করলেও, সোফা থেকে একাধিক ভুতুড়ে ক্রিয়া শুরু হয়ে গেল। \"তুমি যেতে পারবে না, কারণ তুমি আমাদের দলে যোগ দিতে পার না।” মহিলার মুখের বিকৃতি আরও ভয়ঙ্কর লাগতে শুরু করল।\n", "\n", - "Overall, the story captures the essence of a romantic connection and the power of shared passions. With some refinement in the areas mentioned, the narrative could become even more compelling and impactful.\n", + "\"মহিলার কি হয়েছে? আপনা এতো চিৎকার করছেন? তাহলে কি হবে?” আর্যন ভাবছিল, তার সাধারণ জীবনের এই অবাক অনুভূতি ভাললাগছে।\n", + "\n", + "এই সময়, বিশাল সাদা পোশাক পরিহিত সদৃশ ভূতরা হাজির হয়ে গেল। \"আমরা আজ রাতের জন্য মজা করতে এসেছি, তুমি আমাদের সঙ্গে যোগ দিতে পারো!\" তারা একসঙ্গে হাসিমুখে বলল, এক ভুতুড়ে পরিবেশে রাজ্যের রসিকতার আয়োজন করতে।\n", + "\n", + "সাবলীল কথোপকথন চলতে চলতে, আর্যনের উপর থেকে ভয় কেটে গিয়ে এক অদ্ভুত অভিজ্ঞতা শুরু হয়। হাতের ইশারায় ভূতেরা হেসে ওঠে, একের পর এক অদ্ভুত ঘটনাকে তুলে ধরে। আর্যন বুঝতে পারল, তাদের কথা শুনতে শুনতে সে নিঃসন্দেহে একটি অভূতপূর্ব আনন্দের মধ্যে প্রবাহিত হতে শুরু করেছে।\n", + "\n", + "\"হাসলে তুমি আমাদের দলে থাক! আমাদের সঙ্গে অংশগ্রহণ কর!\" তারা গলা উঁচু করে চিৎকার তোলে। আর্যনে অবশেষে তার প্রাণবন্ত হাসি দ্বারা পরিবেশকে প্রাণবন্ত করে তোলে।\n", + "\n", + "খেলার মধ্যে ভয়াবহতা চরমে পৌঁছে যায়, কিন্তু আতিথিদের সঙ্গে সময় কাটাতে কাটাতে তিনি আরও একবার বুঝতে পারে যে ভয় এবং হাসির মধ্যে জীবনের আসল উপাদান লুকিয়ে আছে। \n", + "\n", + "আর্যন আর উপলব্ধি করে, অদ্ভুত ভুতুড়ে পরিবেশের মধ্যেই হাসির বিনোদনের আসল আনন্দ লুকানো। তিনি সেই ভয় এবং আনন্দের স্মৃতি নিয়ে ফিরে যান, যেখানে প্রেম, বন্ধুত্ব এবং জ্ঞানের সঙ্গে মজার ঘনিষ্ঠতা তৈরি করে। এটি তার জীবন পরিবর্তন করে দেয় এবং সেই রাতের অভিজ্ঞতা তাকে একটি নতুন দৃষ্টিতে বাঁচতে শিখায়।\n", + "\n", + "Claude:\n", + "After 3 iterations, this is the critique for the provided story - \n", + "\n", + "The provided story is an excellent horror comedy piece in Bengali. No major areas of improvement are noted. The story has a well-developed protagonist, an engaging haunted setting, an effective blend of horror and humor, and a meaningful takeaway for the main character. Overall, it is a well-crafted story that successfully combines the horror and comedy genres.\n", "\n" ] } @@ -329,6 +386,22 @@ "metadata": {}, "outputs": [], "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c8a1c54-0344-4911-867a-3143aee0e7f0", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5171fecb-1037-4806-b0ae-c23e8578c667", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From d2c1200c230bb246bcb378c508d215388e741f8a Mon Sep 17 00:00:00 2001 From: thisforcode Date: Mon, 7 Apr 2025 06:21:55 +0530 Subject: [PATCH 08/19] Day1: Grafana Dashboard Metrics Summarizer --- .../day1-dashboard-metrics-summary.ipynb | 183 ++++++++++++++++++ .../mock_grafana_dashboard.json | 95 +++++++++ 2 files changed, 278 insertions(+) create mode 100644 week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb create mode 100644 week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json diff --git a/week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb b/week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb new file mode 100644 index 0000000..c91070d --- /dev/null +++ b/week1/community-contributions/mock-dashboard-summary/day1-dashboard-metrics-summary.ipynb @@ -0,0 +1,183 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "de3b5d4c", + "metadata": {}, + "source": [ + "# 🧠 Grafana Dashboard Summarizer\n", + "Simulate reading a Grafana dashboard JSON and summarize its panels using GPT or plain logic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0abf3aaf", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "import json\n", + "import pandas as pd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ad82ca65", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "with open(\"mock_grafana_dashboard.json\", \"r\") as f:\n", + " data = json.load(f)\n", + "\n", + "dashboard = data[\"dashboard\"]\n", + "panels = dashboard[\"panels\"]\n", + "print(f\"Dashboard Title: {dashboard['title']}\")\n", + "print(f\"Total Panels: {len(panels)}\\n\")\n", + "for p in panels:\n", + " print(f\"- {p['title']} ({p['type']})\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1bf45c0f", + "metadata": {}, + "outputs": [], + "source": [ + "# Extracting panel data\n", + "\n", + "panel_data = []\n", + "for p in panels:\n", + " thresholds = p.get(\"fieldConfig\", {}).get(\"defaults\", {}).get(\"thresholds\", {}).get(\"steps\", [])\n", + " panel_data.append({\n", + " \"Title\": p[\"title\"],\n", + " \"Type\": p[\"type\"],\n", + " \"Unit\": p.get(\"fieldConfig\", {}).get(\"defaults\", {}).get(\"unit\", \"N/A\"),\n", + " \"Thresholds\": thresholds\n", + " })\n", + "\n", + "df = pd.DataFrame(panel_data)\n", + "df\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90b67133", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "summary_prompt = f\"\"\"\n", + "You are a helpful assistant summarizing a system monitoring dashboard.\n", + "\n", + "Dashboard: {dashboard['title']}\n", + "Panels:\n", + "\"\"\"\n", + "for idx, row in df.iterrows():\n", + " summary_prompt += f\"- {row['Title']} [{row['Type']}] - Unit: {row['Unit']}, Thresholds: {row['Thresholds']}\\n\"\n", + "\n", + "print(summary_prompt)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69a4208c", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2eee5a32", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "660eedb7", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize():\n", + " response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a Grafana dashboard summarizer.\"},\n", + " {\"role\": \"user\", \"content\": summary_prompt}\n", + " ]\n", + ")\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55f57d56", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "summary = summarize()\n", + "display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10dbfd6c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "arunllms", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json b/week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json new file mode 100644 index 0000000..1218c82 --- /dev/null +++ b/week1/community-contributions/mock-dashboard-summary/mock_grafana_dashboard.json @@ -0,0 +1,95 @@ +{ + "dashboard": { + "id": null, + "uid": "mock-sys-metrics", + "title": "Mock System Metrics Dashboard", + "timezone": "browser", + "panels": [ + { + "id": 1, + "title": "CPU Usage (%)", + "type": "stat", + "datasource": "MockData", + "targets": [], + "fieldConfig": { + "defaults": { + "unit": "percent", + "thresholds": { + "mode": "percentage", + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 70 }, + { "color": "red", "value": 90 } + ] + } + } + }, + "options": { + "reduceOptions": { + "calcs": ["mean"] + } + } + }, + { + "id": 2, + "title": "Memory Usage", + "type": "gauge", + "datasource": "MockData", + "fieldConfig": { + "defaults": { + "unit": "bytes", + "max": 16e9, + "thresholds": { + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 12e9 }, + { "color": "red", "value": 14e9 } + ] + } + } + } + }, + { + "id": 3, + "title": "Disk Read Errors", + "type": "stat", + "datasource": "MockData", + "fieldConfig": { + "defaults": { + "unit": "short", + "thresholds": { + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 5 }, + { "color": "red", "value": 10 } + ] + } + } + } + }, + { + "id": 4, + "title": "GPU Usage (%)", + "type": "gauge", + "datasource": "MockData", + "fieldConfig": { + "defaults": { + "unit": "percent", + "thresholds": { + "steps": [ + { "color": "green", "value": null }, + { "color": "orange", "value": 75 }, + { "color": "red", "value": 90 } + ] + } + } + } + } + ], + "schemaVersion": 30, + "version": 1, + "refresh": "30s" + }, + "overwrite": true + } + \ No newline at end of file From 57f46138ea8fd2045c6490813e920aea1b7a238a Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Mon, 7 Apr 2025 12:18:45 +0530 Subject: [PATCH 09/19] Enabling Alloy Enabling Alloy --- .../AI Booking Chatbot.ipynb | 42 +++++++++++++++++-- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/week2/community-contributions/AI Booking Chatbot.ipynb b/week2/community-contributions/AI Booking Chatbot.ipynb index 44406f2..827e832 100644 --- a/week2/community-contributions/AI Booking Chatbot.ipynb +++ b/week2/community-contributions/AI Booking Chatbot.ipynb @@ -316,7 +316,7 @@ "def talker(message):\n", " response = openai.audio.speech.create(\n", " model=\"tts-1\",\n", - " voice=\"onyx\", # Also, try replacing onyx with alloy\n", + " voice=\"alloy\", # Also, try replacing with onyx\n", " input=message\n", " )\n", " audio_stream = BytesIO(response.content)\n", @@ -413,7 +413,7 @@ " history += [{\"role\":\"assistant\", \"content\":reply}]\n", "\n", " # Comment out or delete the next line if you'd rather skip Audio for now..\n", - " # talker(reply)\n", + " talker(reply)\n", " \n", " # return history, image\n", " return history" @@ -644,13 +644,49 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'london': '20', 'paris': '90', 'tokyo': '100', 'berlin': '2'}\n" + "{'london': '20', 'paris': '90', 'tokyo': '100', 'berlin': '2'}\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_ELqH6MFPXMfklfid2QhDQr8Z', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_price'), type='function')])\n", + "Tool get_ticket_price called for London\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_dDP6CpaDUOkT8yzQbYQMjF5Q', function=Function(arguments='{\"destination_city\":\"Berlin\"}', name='get_ticket_price'), type='function')])\n", + "Tool get_ticket_price called for Berlin\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_F4l14muEmGWk1ZUqdLvH5xUc', function=Function(arguments='{\"destination_city\":\"Berlin\",\"price\":\"$499\",\"availability\":\"Available\"}', name='book_ticket'), type='function')])\n", + "Tool get_ticket_price called for Berlin\n", + "Tool get_ticket_availability called for Berlin\n", + "Tool book_function called for Berlin\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_j6hezbCfwk2EiGQArBfxFEwp', function=Function(arguments='{\"destination_city\":\"Berlin\",\"price\":\"$499\",\"availability\":\"Available\"}', name='book_ticket'), type='function')])\n", + "Tool get_ticket_price called for Berlin\n", + "Tool get_ticket_availability called for Berlin\n", + "Tool book_function called for Berlin\n" ] } ], "source": [ "print(ticket_availability)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d6638a5-ec46-4e98-912b-9408664bb200", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8fd989c-6da8-4668-8992-62b1eefdba03", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "181f3d17-322c-4f0d-b835-dd1b90ba6784", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From 230697ea4da2a593c28323200cf32964d453e419 Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Mon, 7 Apr 2025 13:27:06 +0530 Subject: [PATCH 10/19] Duration of hearing and added quantity of tickets to purchase option Duration of hearing and added quantity of tickets to purchase option --- .../AI Booking Chatbot.ipynb | 168 ++++++++++++------ 1 file changed, 110 insertions(+), 58 deletions(-) diff --git a/week2/community-contributions/AI Booking Chatbot.ipynb b/week2/community-contributions/AI Booking Chatbot.ipynb index 827e832..16deb05 100644 --- a/week2/community-contributions/AI Booking Chatbot.ipynb +++ b/week2/community-contributions/AI Booking Chatbot.ipynb @@ -147,17 +147,19 @@ "metadata": {}, "outputs": [], "source": [ - "def book_ticket(destination_city,price,availability):\n", + "def book_ticket(destination_city,price,availability,no_of_tickets):\n", " status=\"\"\n", " if availability == 0:\n", " status=\"Cannot book a ticket, no seat available\\n\"\n", + " elif int(availability)-int(no_of_tickets) <= 0:\n", + " status=\"Cannot book a ticket, no seat available\\n\"\n", " else:\n", " print(f\"Tool book_function called for {destination_city}\")\n", " f = open(\"C:/Users/aruna/Desktop/book_status.txt\", \"a\")\n", - " f.write(f\"Ticket to {destination_city} booked for {price}, currently available - {int(availability)-1}\")\n", + " f.write(f\"{no_of_tickets} ticket/s to {destination_city} booked for {price}, currently available - {int(availability)-int(no_of_tickets)}\")\n", " f.write(\"\\n\")\n", " f.close()\n", - " ticket_availability[destination_city.lower()]=str(int(availability)-1)\n", + " ticket_availability[destination_city.lower()]=str(int(availability)-int(no_of_tickets))\n", " \n", " status=\"Ticket reservation is a success\\n\"\n", " return status" @@ -172,9 +174,10 @@ "source": [ "book_function = {\n", " \"name\": \"book_ticket\",\n", - " \"description\": \"Book the ticket based on the ticket price and availability as confirmed by the user. For example, when a \\\n", - " customer confirms to purchase the ticket for Tokyo after getting to know the ticket price and/or the availability, then \\\n", - " proceed with this tool call. Please help the customer in booking the ticket if tickets are available\",\n", + " \"description\": \"Book the ticket based on the ticket price and/ or availability as requested by the user. For example, when a \\\n", + " customer asks to purchase one or more tickets to Tokyo after getting to know the ticket price and/or the availability, then \\\n", + " proceed with this tool call. Please help the customer in booking the ticket/s if tickets are available. But please each time you \\\n", + " book, ask confirmation from the user before proceeding with booking\",\n", " \"parameters\": {\n", " \"type\": \"object\",\n", " \"properties\": {\n", @@ -190,8 +193,12 @@ " \"type\": \"string\",\n", " \"description\": \"ticket availability to the city the customer wants to travel to\",\n", " },\n", + " \"no_of_tickets\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"the number of tickets the customer wants to purchase/ book to the destination\",\n", + " }\n", " },\n", - " \"required\": [\"destination_city\",\"price\",\"availability\"],\n", + " \"required\": [\"destination_city\",\"price\",\"availability\",\"no_of_tickets\"],\n", " \"additionalProperties\": False\n", " }\n", "}" @@ -338,6 +345,7 @@ " arguments = json.loads(tool_call.function.arguments)\n", " name = json.dumps(tool_call.function.name)\n", " city = arguments.get('destination_city')\n", + " no = arguments.get('no_of_tickets')\n", " \n", " if name.replace('\"','') == \"get_ticket_price\":\n", " price = get_ticket_price(city)\n", @@ -349,10 +357,10 @@ " elif name.replace('\"','') == \"book_ticket\":\n", " price = get_ticket_price(city)\n", " availability = get_ticket_availability(city)\n", - " booked=book_ticket(city,price,availability)\n", + " booked=book_ticket(city,price,availability,no)\n", " response = {\n", " \"role\": \"tool\",\n", - " \"content\": json.dumps({\"destination_city\": city,\"booking_status\": booked}),\n", + " \"content\": json.dumps({\"destination_city\": city, \"number of tickets\":no, \"booking_status\": booked}),\n", " \"tool_call_id\": tool_call.id\n", " }\n", " else :\n", @@ -528,43 +536,68 @@ { "cell_type": "code", "execution_count": 20, + "id": "36e11d99-9281-4efd-a792-dd4fa5935917", + "metadata": {}, + "outputs": [], + "source": [ + "def listen2(history):\n", + " import speech_recognition as sr\n", + "\n", + " r = sr.Recognizer()\n", + " with sr.Microphone() as source:\n", + " print(\"Speak now...\")\n", + " audio = r.listen(source, phrase_time_limit=30)\n", + " text=\"\"\n", + " try:\n", + " text = r.recognize_google(audio)\n", + " print(\"You said:\", text)\n", + " except sr.UnknownValueError:\n", + " print(\"Could not understand audio.\")\n", + "\n", + " history += [{\"role\":\"user\", \"content\":text}] \n", + " return \"\", history" + ] + }, + { + "cell_type": "code", + "execution_count": 21, "id": "23b102a4-e544-4560-acc8-a15620478582", "metadata": {}, "outputs": [], "source": [ - "import speech_recognition as sr\n", - "from pydub import AudioSegment\n", - "import simpleaudio as sa\n", + "# import speech_recognition as sr\n", + "# from pydub import AudioSegment\n", + "# import simpleaudio as sa\n", "\n", - "def listener():\n", - " recognizer = sr.Recognizer()\n", + "# def listener():\n", + "# recognizer = sr.Recognizer()\n", " \n", - " with sr.Microphone() as source:\n", - " print(\"Listening... Speak now!\")\n", - " recognizer.adjust_for_ambient_noise(source) # Adjust for background noise\n", - " audio = recognizer.listen(source)\n", + "# with sr.Microphone() as source:\n", + "# print(\"Listening... Speak now!\")\n", + "# recognizer.adjust_for_ambient_noise(source) # Adjust for background noise\n", + "# audio = recognizer.listen(source, phrase_time_limit=30)\n", " \n", - " try:\n", - " print(\"Processing speech...\")\n", - " text = recognizer.recognize_google(audio) # Use Google Speech-to-Text\n", - " print(f\"You said: {text}\")\n", - " return text\n", - " except sr.UnknownValueError:\n", - " print(\"Sorry, I could not understand what you said.\")\n", - " return None\n", - " except sr.RequestError:\n", - " print(\"Could not request results, please check your internet connection.\")\n", - " return None\n", + "# try:\n", + "# print(\"Processing speech...\")\n", + "# text = recognizer.recognize_google(audio) # Use Google Speech-to-Text\n", + "# print(f\"You said: {text}\")\n", + "# return text\n", + "# except sr.UnknownValueError:\n", + "# print(\"Sorry, I could not understand what you said.\")\n", + "# return None\n", + "# except sr.RequestError:\n", + "# print(\"Could not request results, please check your internet connection.\")\n", + "# return None\n", "\n", - "# Example usage:\n", - "# text = listener() # Listen for speech\n", - "# if text:\n", - "# print(f\"You just said: {text}\") " + "# # Example usage:\n", + "# # text = listener() # Listen for speech\n", + "# # if text:\n", + "# # print(f\"You just said: {text}\") " ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 22, "id": "133904cf-4d72-4552-84a8-76650f334857", "metadata": {}, "outputs": [ @@ -593,7 +626,7 @@ "data": { "text/plain": [] }, - "execution_count": 21, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -616,9 +649,9 @@ " with gr.Row():\n", " clear = gr.Button(\"Clear\")\n", "\n", - " def listen():\n", - " text=listener()\n", - " return text\n", + " def listen(history):\n", + " message, history=listen2(history)\n", + " return message, history\n", "\n", " def do_entry(message, history):\n", " history += [{\"role\":\"user\", \"content\":message}]\n", @@ -628,7 +661,9 @@ " # chat, inputs=chatbot, outputs=[chatbot, image_output]\n", " chat1, inputs=[chatbot, Model], outputs=[chatbot]\n", " )\n", - " speak.click(listen, inputs=None, outputs=[entry])\n", + " speak.click(listen, inputs=[chatbot], outputs=[entry, chatbot]).then(\n", + " chat1, inputs=[chatbot, Model], outputs=[chatbot]\n", + " )\n", " clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", "\n", "ui.launch(inbrowser=True)" @@ -636,7 +671,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 23, "id": "dc4a3844-194c-4af7-8ca8-2fc4edb74c11", "metadata": {}, "outputs": [ @@ -645,18 +680,43 @@ "output_type": "stream", "text": [ "{'london': '20', 'paris': '90', 'tokyo': '100', 'berlin': '2'}\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_ELqH6MFPXMfklfid2QhDQr8Z', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_price'), type='function')])\n", + "Speak now...\n", + "You said: ticket price to London\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_sLAcmufug2cPxfVZyOuYee4X', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_price'), type='function')])\n", "Tool get_ticket_price called for London\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_dDP6CpaDUOkT8yzQbYQMjF5Q', function=Function(arguments='{\"destination_city\":\"Berlin\"}', name='get_ticket_price'), type='function')])\n", - "Tool get_ticket_price called for Berlin\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_F4l14muEmGWk1ZUqdLvH5xUc', function=Function(arguments='{\"destination_city\":\"Berlin\",\"price\":\"$499\",\"availability\":\"Available\"}', name='book_ticket'), type='function')])\n", - "Tool get_ticket_price called for Berlin\n", + "Speak now...\n", + "You said: can you please resolve two tickets for me to London\n", + "Speak now...\n", + "You said: yes please proceed\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_gOnuOwDEsE6lUIQZhZ15mSzR', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"$799\",\"availability\":\"available\",\"no_of_tickets\":\"2\"}', name='book_ticket'), type='function')])\n", + "Tool get_ticket_price called for London\n", + "Tool get_ticket_availability called for London\n", + "Tool book_function called for London\n", + "Speak now...\n", + "You said: what is the current ticket availability to\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_EYind2wu2Mc1ILAOlzgyO9MT', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_availability'), type='function')])\n", + "Tool get_ticket_availability called for London\n", + "Speak now...\n", + "You said: yes can you please reserve 19 tickets for me to London\n", + "Speak now...\n", + "You said: no I think this is fine for\n", + "Speak now...\n", + "You said: what is the ticket availability to Ber\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_UvchMEkQjeTZ5ou6uURYmKSz', function=Function(arguments='{\"destination_city\":\"Ber\"}', name='get_ticket_availability'), type='function')])\n", + "Tool get_ticket_availability called for Ber\n", + "Speak now...\n", + "You said: ticket availability to Berlin\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_4qiigA8seXHWgKhYGZNDeXV8', function=Function(arguments='{\"destination_city\":\"Berlin\"}', name='get_ticket_availability'), type='function')])\n", "Tool get_ticket_availability called for Berlin\n", - "Tool book_function called for Berlin\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_j6hezbCfwk2EiGQArBfxFEwp', function=Function(arguments='{\"destination_city\":\"Berlin\",\"price\":\"$499\",\"availability\":\"Available\"}', name='book_ticket'), type='function')])\n", + "Speak now...\n", + "You said: I would like to reserve two tickets to Berlin\n", + "Speak now...\n", + "You said: yes please please proceed\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_hlJhpoFCJ1g70ASa62SzCFYV', function=Function(arguments='{\"destination_city\":\"Berlin\",\"price\":\"450\",\"availability\":\"2\",\"no_of_tickets\":\"2\"}', name='book_ticket'), type='function')])\n", "Tool get_ticket_price called for Berlin\n", "Tool get_ticket_availability called for Berlin\n", - "Tool book_function called for Berlin\n" + "Speak now...\n", + "You said: no that will be fine now thank you\n" ] } ], @@ -667,15 +727,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3d6638a5-ec46-4e98-912b-9408664bb200", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f8fd989c-6da8-4668-8992-62b1eefdba03", + "id": "5166396e-6d8d-4cf2-982b-270d1c87a5ee", "metadata": {}, "outputs": [], "source": [] @@ -683,7 +735,7 @@ { "cell_type": "code", "execution_count": null, - "id": "181f3d17-322c-4f0d-b835-dd1b90ba6784", + "id": "e871fc45-64db-4fb6-add7-569c8b30fe05", "metadata": {}, "outputs": [], "source": [] From d021d80602bc37ba289b9a484eb63efecf812f55 Mon Sep 17 00:00:00 2001 From: arunabeshc <39411643+arunabeshc@users.noreply.github.com> Date: Mon, 7 Apr 2025 14:51:25 +0530 Subject: [PATCH 11/19] Updated the code to include ticket bookings with the quantity(no of tickets) being specified by the user Updated the code to include ticket bookings with the quantity(no of tickets) being specified by the user --- .../AI Booking Chatbot.ipynb | 61 +++++++++---------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/week2/community-contributions/AI Booking Chatbot.ipynb b/week2/community-contributions/AI Booking Chatbot.ipynb index 16deb05..ced7d18 100644 --- a/week2/community-contributions/AI Booking Chatbot.ipynb +++ b/week2/community-contributions/AI Booking Chatbot.ipynb @@ -81,7 +81,7 @@ "outputs": [], "source": [ "system_message = \"You are a helpful assistant. \"\n", - "system_message += \"Give short, courteous answers. You can check ticket price, availability, and reserve tickets for users. \"\n", + "system_message += \"Give short, courteous answers. You can check ticket price, ticket availability, and reserve tickets for users. \"\n", "system_message += \"Always be accurate. If you don't know the answer, say so.\"" ] }, @@ -151,12 +151,12 @@ " status=\"\"\n", " if availability == 0:\n", " status=\"Cannot book a ticket, no seat available\\n\"\n", - " elif int(availability)-int(no_of_tickets) <= 0:\n", + " elif (int(availability)-int(no_of_tickets)) < 0:\n", " status=\"Cannot book a ticket, no seat available\\n\"\n", " else:\n", " print(f\"Tool book_function called for {destination_city}\")\n", " f = open(\"C:/Users/aruna/Desktop/book_status.txt\", \"a\")\n", - " f.write(f\"{no_of_tickets} ticket/s to {destination_city} booked for {price}, currently available - {int(availability)-int(no_of_tickets)}\")\n", + " f.write(f\"{no_of_tickets} ticket/s to {destination_city} booked for {price} x {no_of_tickets}, currently available - {int(availability)-int(no_of_tickets)}\")\n", " f.write(\"\\n\")\n", " f.close()\n", " ticket_availability[destination_city.lower()]=str(int(availability)-int(no_of_tickets))\n", @@ -175,9 +175,9 @@ "book_function = {\n", " \"name\": \"book_ticket\",\n", " \"description\": \"Book the ticket based on the ticket price and/ or availability as requested by the user. For example, when a \\\n", - " customer asks to purchase one or more tickets to Tokyo after getting to know the ticket price and/or the availability, then \\\n", - " proceed with this tool call. Please help the customer in booking the ticket/s if tickets are available. But please each time you \\\n", - " book, ask confirmation from the user before proceeding with booking\",\n", + " user asks to purchase one or more tickets to Tokyo after getting to know the ticket price and/or the availability, then \\\n", + " proceed with this tool call. Else, request the user to either ask for ticket price or availability first. Please help the customer in booking the ticket/s if tickets are available. But before each time\\\n", + " you book, ask confirmation from the user before proceeding with booking.\",\n", " \"parameters\": {\n", " \"type\": \"object\",\n", " \"properties\": {\n", @@ -397,7 +397,7 @@ " messages.append(message)\n", " messages.append(response)\n", " # image = artist(city)\n", - " response = openai.chat.completions.create(model=gpt_model, messages=messages)\n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages, tools=tools)\n", " elif name.replace('\"','') == \"get_ticket_price_availability\":\n", " price = get_ticket_price(city)\n", " availability = get_ticket_availability(city)\n", @@ -409,13 +409,13 @@ " messages.append(message)\n", " messages.append(response)\n", " print(messages)\n", - " response = openai.chat.completions.create(model=gpt_model, messages=messages) \n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages, tools=tools) \n", " else: \n", " response, city = handle_tool_call1(message)\n", " messages.append(message)\n", " messages.append(response)\n", " # image = artist(city)\n", - " response = openai.chat.completions.create(model=gpt_model, messages=messages)\n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages, tools=tools)\n", " \n", " reply = response.choices[0].message.content\n", " history += [{\"role\":\"assistant\", \"content\":reply}]\n", @@ -681,42 +681,39 @@ "text": [ "{'london': '20', 'paris': '90', 'tokyo': '100', 'berlin': '2'}\n", "Speak now...\n", - "You said: ticket price to London\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_sLAcmufug2cPxfVZyOuYee4X', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_price'), type='function')])\n", + "You said: price of tickets to Tokyo\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_ddXzm2cPBJ9SOsV8qI4L2FcB', function=Function(arguments='{\"destination_city\":\"Tokyo\"}', name='get_ticket_price'), type='function')])\n", + "Tool get_ticket_price called for Tokyo\n", + "Speak now...\n", + "You said: what is the price of two tickets to London\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_lSNZCwaUdckvk3V0eTBlotRN', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_price'), type='function')])\n", "Tool get_ticket_price called for London\n", "Speak now...\n", - "You said: can you please resolve two tickets for me to London\n", + "You said: can you please reserve two tickets for me to London\n", + "ChatCompletionMessage(content='First, I need to check the availability for the two tickets to London. Please hold on a moment.', refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_iA0D9tm2cTMf8J8KJc4gipFn', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_availability'), type='function')])\n", + "Tool get_ticket_availability called for London\n", "Speak now...\n", "You said: yes please proceed\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_gOnuOwDEsE6lUIQZhZ15mSzR', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"$799\",\"availability\":\"available\",\"no_of_tickets\":\"2\"}', name='book_ticket'), type='function')])\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_JzJXFWFGhtG1jXiFlKtmZhGi', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"$799\",\"availability\":\"20 tickets available\",\"no_of_tickets\":\"2\"}', name='book_ticket'), type='function')])\n", "Tool get_ticket_price called for London\n", "Tool get_ticket_availability called for London\n", "Tool book_function called for London\n", "Speak now...\n", - "You said: what is the current ticket availability to\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_EYind2wu2Mc1ILAOlzgyO9MT', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_availability'), type='function')])\n", + "You said: what is the current availability of tickets to London\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_eiHPAGAcbaFq3qzDf0a6idzG', function=Function(arguments='{\"destination_city\":\"London\"}', name='get_ticket_availability'), type='function')])\n", "Tool get_ticket_availability called for London\n", "Speak now...\n", - "You said: yes can you please reserve 19 tickets for me to London\n", - "Speak now...\n", - "You said: no I think this is fine for\n", - "Speak now...\n", - "You said: what is the ticket availability to Ber\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_UvchMEkQjeTZ5ou6uURYmKSz', function=Function(arguments='{\"destination_city\":\"Ber\"}', name='get_ticket_availability'), type='function')])\n", - "Tool get_ticket_availability called for Ber\n", + "You said: can you please reserve the remaining 18 tickets for me to London\n", "Speak now...\n", - "You said: ticket availability to Berlin\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_4qiigA8seXHWgKhYGZNDeXV8', function=Function(arguments='{\"destination_city\":\"Berlin\"}', name='get_ticket_availability'), type='function')])\n", - "Tool get_ticket_availability called for Berlin\n", - "Speak now...\n", - "You said: I would like to reserve two tickets to Berlin\n", + "You said: yes I do confirm\n", + "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_8uCQ91FCOGf4HjQnLNafmSs6', function=Function(arguments='{\"destination_city\":\"London\",\"price\":\"799\",\"availability\":\"18\",\"no_of_tickets\":\"18\"}', name='book_ticket'), type='function')])\n", + "Tool get_ticket_price called for London\n", + "Tool get_ticket_availability called for London\n", + "Tool book_function called for London\n", "Speak now...\n", - "You said: yes please please proceed\n", - "ChatCompletionMessage(content=None, refusal=None, role='assistant', annotations=[], audio=None, function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_hlJhpoFCJ1g70ASa62SzCFYV', function=Function(arguments='{\"destination_city\":\"Berlin\",\"price\":\"450\",\"availability\":\"2\",\"no_of_tickets\":\"2\"}', name='book_ticket'), type='function')])\n", - "Tool get_ticket_price called for Berlin\n", - "Tool get_ticket_availability called for Berlin\n", + "You said: what is the current availability of tickets to London\n", "Speak now...\n", - "You said: no that will be fine now thank you\n" + "You said: that will be off thank you very much\n" ] } ], From c75874733749109aef7502952608a0518fc0fcbc Mon Sep 17 00:00:00 2001 From: amirbeek Date: Mon, 7 Apr 2025 13:53:49 +0100 Subject: [PATCH 12/19] My contribution As a User can chose the LLM such as Open Ai or Llama 3.2 --- .../Week2 - OpenAiAndLlama.ipynb | 444 ++++++++++++++++++ 1 file changed, 444 insertions(+) create mode 100644 week2/community-contributions/Week2 - OpenAiAndLlama.ipynb diff --git a/week2/community-contributions/Week2 - OpenAiAndLlama.ipynb b/week2/community-contributions/Week2 - OpenAiAndLlama.ipynb new file mode 100644 index 0000000..17c8265 --- /dev/null +++ b/week2/community-contributions/Week2 - OpenAiAndLlama.ipynb @@ -0,0 +1,444 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", + "metadata": {}, + "source": [ + "# Additional End of week Exercise - week 2\n", + "\n", + "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", + "\n", + "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", + "\n", + "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", + "\n", + "I will publish a full solution here soon - unless someone beats me to it...\n", + "\n", + "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." + ] + }, + { + "cell_type": "code", + "execution_count": 140, + "id": "a07e7793-b8f5-44f4-aded-5562f633271a", + "metadata": {}, + "outputs": [], + "source": [ + " # imports\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "from IPython.display import Markdown, display, update_display\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 141, + "id": "158493a7-54b7-47f7-9e7e-1a783e164213", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key not set\n", + "Google API Key not set\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "id": "2b8b8218-142d-4a06-9b8a-3065437cc99f", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')" + ] + }, + { + "cell_type": "code", + "execution_count": 143, + "id": "7cf83ab4-6e6f-4ef1-8277-38c8b7c375ba", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 164, + "id": "4dfd49f0-6e29-45e1-8477-77744b121170", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'\n", + "openai = OpenAI()\n", + "LLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}" + ] + }, + { + "cell_type": "code", + "execution_count": 145, + "id": "77c3788f-aaaa-4d40-9b9b-618e4cd129c8", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to represent a Website that we have scraped, now with links\n", + " \"\"\"\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = \"\"\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " self.links = [link for link in links if link]\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": 146, + "id": "8acefa5c-de13-48e4-aa37-da1f596edb58", + "metadata": {}, + "outputs": [], + "source": [ + "def get_info_web(url):\n", + " Website(url)" + ] + }, + { + "cell_type": "code", + "execution_count": 147, + "id": "a5f61b1f-3884-4af8-b57f-cc820e93ff18", + "metadata": {}, + "outputs": [], + "source": [ + "web_function = {\n", + " \"name\": \"get_info_web\",\n", + " \"description\": \"Get the information of website to explain to user. Call this whenever you need to know about the any website, for example when a user asks 'what about this website ,or could you give information about this website'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"website_link\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"the website that customer ask to know information about website\",\n", + " },\n", + " },\n", + " \"required\": [\"website_link\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 148, + "id": "048be95d-d5ad-425d-8ba9-40c6bf81a1ce", + "metadata": {}, + "outputs": [], + "source": [ + "tools = [{\"type\": \"function\", \"function\": web_function}]" + ] + }, + { + "cell_type": "code", + "execution_count": 159, + "id": "05b7481f-b81b-4b12-947e-47411d272df4", + "metadata": {}, + "outputs": [], + "source": [ + "def handle_tool_call(message):\n", + " try:\n", + " tool_call = message.tool_calls[0]\n", + " args = json.loads(tool_call.function.arguments)\n", + " url = args.get('website_link')\n", + "\n", + " if not url:\n", + " raise ValueError(\"Website link not provided in the tool call arguments\")\n", + "\n", + " if not url.startswith(('http://', 'https://')):\n", + " url = f\"https://{url}\"\n", + "\n", + " website = Website(url)\n", + " web_info = {\n", + " \"title\": website.title,\n", + " \"text\": website.text,\n", + " \"links\": website.links\n", + " }\n", + "\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"web_info\": web_info}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " return response, url \n", + "\n", + " except Exception as e:\n", + " print(f\"Error handling tool call: {str(e)}\")\n", + " return {}, None\n" + ] + }, + { + "cell_type": "code", + "execution_count": 213, + "id": "4e98fa13-aab6-4093-a1da-6f226b4bce4b", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "def chat_gpt(message, history): \n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=MODEL_GPT, messages=messages, tools=tools)\n", + "\n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " print(message)\n", + " response, url = handle_tool_call(message)\n", + " messages.append(message)\n", + " messages.append(response)\n", + " response = openai.chat.completions.create(model=MODEL_GPT, messages=messages) \n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 216, + "id": "5727f4be-d1cd-499e-95e0-af656d19140d", + "metadata": {}, + "outputs": [], + "source": [ + "import ollama\n", + "\n", + "def chat_llama(message, history):\n", + " client = ollama.Client()\n", + " # Constructing the messages history for the API request\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " request_payload = {\n", + " \"messages\": messages,\n", + " \"model\": MODEL_LLAMA\n", + " }\n", + " \n", + " try:\n", + " # Using request_payload in the API call\n", + " response = client.chat(**request_payload)\n", + " # Assuming response from ollama.Client().chat() is already a dict\n", + " print(\"API Response:\", response)\n", + "\n", + " if 'choices' in response and response['choices'][0].get('finish_reason') == \"tool_calls\":\n", + " tool_message = response['choices'][0]['message']\n", + " print(\"Handling tool call with message:\", tool_message)\n", + " response_message, url = handle_tool_call(tool_message)\n", + " messages.append({\"role\": \"system\", \"content\": response_message})\n", + " # Update the request payload with the new history\n", + " request_payload['messages'] = messages\n", + " response = client.chat(**request_payload)\n", + " response = response # Assuming direct use of response if dict\n", + "\n", + " return response['message']['content']\n", + "\n", + " except Exception as e:\n", + " print(\"Failed to process API call:\", e)\n", + " return \"Error processing your request.\"\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 227, + "id": "6c14242d-2c3a-4101-a5f2-93591cad3539", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history, model):\n", + " print(model)\n", + " if model == \"GPT\":\n", + " return chat_gpt(message, history)\n", + " elif model == \"LLama\":\n", + " return chat_llama(message, history)\n", + " else:\n", + " return \"Model not recognized.\"\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ca10f176-637f-4a8a-b405-bdf50f124d5c", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 235, + "id": "1f976a2a-064b-4e58-9146-f779ec18f612", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7947\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 235, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LLama\n", + "API Response: model='llama3.2' created_at='2025-03-28T03:17:58.3651071Z' done=True done_reason='stop' total_duration=1682458000 load_duration=54845900 prompt_eval_count=72 prompt_eval_duration=6315300 eval_count=84 eval_duration=1619506600 message=Message(role='assistant', content=\"## Getting Started\\nThis conversation has just begun. I'll wait for you to provide more information about the website you'd like me to analyze.\\n\\nIf you need my help with something specific or would like to analyze a website, please let me know by providing the URL of the website or the content you'd like me to summarize. \\n\\nFor example: `# Analyze this website: https://www.example.com`\", images=None, tool_calls=None)\n", + "GPT\n", + "GPT\n", + "LLama\n", + "API Response: model='llama3.2' created_at='2025-03-28T03:18:26.8038878Z' done=True done_reason='stop' total_duration=2109343800 load_duration=59065100 prompt_eval_count=262 prompt_eval_duration=286861800 eval_count=113 eval_duration=1757850900 message=Message(role='assistant', content='**About Me**\\nI am Assistant, a text analysis assistant trained on a variety of languages and content types.\\n\\n**LLM Used**\\nI utilize a combination of natural language processing (NLP) techniques and machine learning algorithms from the **Hugging Face Transformers** library.\\n\\n**Specialization**\\nMy primary function is to analyze and summarize website contents, ignoring navigation-related text. I can help with tasks such as:\\n* Website content analysis\\n* Summary generation\\n* Text extraction\\n\\nFeel free to ask me any questions or provide a website URL for me to analyze!', images=None, tool_calls=None)\n", + "LLama\n", + "API Response: model='llama3.2' created_at='2025-03-28T03:18:47.7740007Z' done=True done_reason='stop' total_duration=2157777800 load_duration=57480900 prompt_eval_count=388 prompt_eval_duration=97088100 eval_count=114 eval_duration=1974506500 message=Message(role='assistant', content=\"**Model Name**\\nMy underlying language model is based on the **BERT** (Bidirectional Encoder Representations from Transformers) architecture, with a customized training dataset.\\n\\nHowever, I'm a bit of a unique snowflake, so to speak. My training data includes a wide range of texts and sources from the web, which allows me to understand and generate human-like text in various contexts.\\n\\nBut if you want to get technical, my model is built on top of the **Hugging Face Transformers** library, using a variant of the **DistilBERT** model.\", images=None, tool_calls=None)\n", + "LLama\n", + "API Response: model='llama3.2' created_at='2025-03-28T03:19:08.4913148Z' done=True done_reason='stop' total_duration=1972427600 load_duration=57674400 prompt_eval_count=521 prompt_eval_duration=223374300 eval_count=107 eval_duration=1680345600 message=Message(role='assistant', content=\"**Searching for Me**\\nIf you're looking to find me, you can try searching with the following terms:\\n\\n* `Assistant` (just my name!)\\n* `Llama` or `GBT` (my personality traits)\\n* `Text analysis assistant`\\n* `Website content summary generator`\\n\\nYou can also try searching on popular search engines like Google, Bing, or DuckDuckGo. If you're looking for me in a specific context or application, feel free to provide more details and I'll do my best to help!\", images=None, tool_calls=None)\n" + ] + } + ], + "source": [ + "Models = [\"GPT\", \"LLama\"] \n", + "with gr.Blocks() as view:\n", + " # Dropdown for model selection\n", + " model_select = gr.Dropdown(Models, label=\"Select Model\", value=\"GPT\")\n", + "\n", + " chat_interface = gr.ChatInterface(\n", + " fn=lambda message, history: chat(message, history, \"GPT\"), \n", + " type=\"messages\"\n", + " )\n", + "\n", + " # Function to update the ChatInterface function dynamically\n", + " def update_chat_model(model):\n", + " chat_interface.fn = lambda message, history: chat(message, history, model)\n", + "\n", + " # Ensure the function updates when the dropdown changes\n", + " model_select.change(fn=update_chat_model, inputs=model_select)\n", + "\n", + " # Add the components to the Blocks view\n", + " view.add(model_select)\n", + " view.add(chat_interface)\n", + "\n", + "view.launch()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d2f0bc7ab6fa6f1ef58d478af106709d78a9501e Mon Sep 17 00:00:00 2001 From: ken Date: Tue, 8 Apr 2025 11:36:47 +0800 Subject: [PATCH 13/19] feat(week 2): integration of anthropic api to gradio --- ...tegration_gradio_using_anthropic_api.ipynb | 290 ++++++++++++++++++ 1 file changed, 290 insertions(+) create mode 100644 week2/community-contributions/tool_integration_gradio_using_anthropic_api.ipynb diff --git a/week2/community-contributions/tool_integration_gradio_using_anthropic_api.ipynb b/week2/community-contributions/tool_integration_gradio_using_anthropic_api.ipynb new file mode 100644 index 0000000..2964939 --- /dev/null +++ b/week2/community-contributions/tool_integration_gradio_using_anthropic_api.ipynb @@ -0,0 +1,290 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "3f9b483c-f410-4ad3-8f3a-e33527f30f8a", + "metadata": { + "panel-layout": { + "height": 68.2639, + "visible": true, + "width": 100 + } + }, + "source": [ + "# Project - Laptops Assistant\n", + "\n", + "A simple inventory tool integrated with Anthropic API" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cfaff08d-f6e5-4d2d-bfb8-76c154836f3d", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "import anthropic\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a04047ea-d01b-469b-93ce-ab4f4e36ca1e", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "\n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f5e00ced-f47b-4713-8174-7901e1a69881", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n", + "\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c715efd-cebf-4dc2-8c99-798f3179dd21", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL = \"claude-3-haiku-20240307\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2b029d1d-9199-483a-94b7-893680af8ad1", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an Inventory Sales called InvAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ca1197c-e6a1-4579-96c6-24e8e305cc72", + "metadata": {}, + "outputs": [], + "source": [ + "laptop_items = [\n", + " {\n", + " \"model\": \"Aspire 3 A315-59-570Z OPI Pure Silver\", \n", + " \"brand\": \"Acer\",\n", + " \"price\": \"$595.96\"\n", + " },\n", + " {\n", + " \"model\": \"Aspire Lite 14 AL14-31P-36BE Pure Silver\", \n", + " \"brand\": \"Acer\",\n", + " \"price\": \"$463.52\"\n", + " },\n", + " {\n", + " \"model\": \"Raider 18 HX\",\n", + " \"brand\": \"MSI\",\n", + " \"price\": \"$235.25\"\n", + " }\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d2bc76b-c1d0-4b3d-a299-9972f7687e4c", + "metadata": {}, + "outputs": [], + "source": [ + "def get_laptop_price(model):\n", + " print(f\"Tool get_laptop_price called for laptop model {model}\")\n", + " laptop_model = model.lower()\n", + " for item in laptop_items:\n", + " if laptop_model in item.get(\"model\").lower():\n", + " return item\n", + " return \"Unknown\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "afc9b4a3-3a6f-4839-bebc-89bd598394fd", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# get_laptop_price(\"Lite 14 AL14-31P-36BE Pure SilveR\")\n", + "\n", + "get_laptop_price(\"Aspire Lite 14\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12190074-fad8-43f6-8be1-f96a08c16b59", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "price_function = {\n", + " \"name\": \"get_laptop_price\",\n", + " \"description\": (\n", + " \"Returns the laptop's price, brand, and exact model from a given query.\"\n", + " \"Use when the user asks about a laptop's price, e.g.,\"\n", + " \"'How much is this laptop?' → 'The Acer Aspire Lite 14 AL14-31P-36BE Pure Silver is priced at $463.52.'\"\n", + " ),\n", + " \"input_schema\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"model\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The model name of the laptop the customer is asking about.\"\n", + " }\n", + " },\n", + " \"required\": [\"model\"]\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "475195e1-dd78-45ba-af6d-16d7cf5c85ae", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "\n", + "tools = [price_function]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3834314d-fd37-4e27-9511-bd519389b31b", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " print(history)\n", + " messages = [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " for history_message in history:\n", + " if history_message[\"role\"] == \"user\":\n", + " messages.append({\"role\": \"user\", \"content\": history_message[\"content\"]})\n", + " \n", + " response = claude.messages.create(model=MODEL, messages=messages, tools=tools, max_tokens=500)\n", + "\n", + " if len(response.content) > 1:\n", + " assistant, user, laptop_model = handle_tool_call(response)\n", + " messages.append(assistant)\n", + " messages.append(user)\n", + " response = claude.messages.create(model=MODEL, messages=messages, tools=tools, max_tokens=500)\n", + "\n", + "\n", + " return response.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "745a9bf8-6ceb-4c1c-bfbf-b0d1f3d5d6fc", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "\n", + "def handle_tool_call(message):\n", + " # laptop_model = message\n", + " laptop_model = message.content[1].input.get(\"model\")\n", + " laptop_item = get_laptop_price(laptop_model)\n", + " assistant = {\n", + " \"role\": \"assistant\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"text\",\n", + " \"text\": message.content[0].text\n", + " },\n", + " {\n", + " \"type\": \"tool_use\",\n", + " \"id\": message.content[1].id,\n", + " \"name\": message.content[1].name,\n", + " \"input\": message.content[1].input\n", + " }\n", + " ]\n", + " }\n", + " user = {\n", + " \"role\": \"user\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"tool_result\",\n", + " \"tool_use_id\": message.content[1].id,\n", + " # \"content\": laptop_item.get(\"price\")\n", + " \"content\": json.dumps(laptop_item)\n", + " }\n", + " ]\n", + " }\n", + " \n", + "\n", + " return assistant, user, laptop_model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9408eeb4-d07b-4193-92cd-197610ed942e", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:base] *", + "language": "python", + "name": "conda-base-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + }, + "panel-cell-order": [ + "3f9b483c-f410-4ad3-8f3a-e33527f30f8a" + ] + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 167db7096b149916a4e939661420df68a4652ff1 Mon Sep 17 00:00:00 2001 From: Shantanu Dutta <> Date: Tue, 8 Apr 2025 21:32:01 +0530 Subject: [PATCH 14/19] Added 3 way AI rountable conversation to community-contributions --- .../day1_AI_rountable_GPT_llama_qwen.ipynb | 220 ++++++++++++++++++ 1 file changed, 220 insertions(+) create mode 100644 week2/community-contributions/day1_AI_rountable_GPT_llama_qwen.ipynb diff --git a/week2/community-contributions/day1_AI_rountable_GPT_llama_qwen.ipynb b/week2/community-contributions/day1_AI_rountable_GPT_llama_qwen.ipynb new file mode 100644 index 0000000..6ec5db7 --- /dev/null +++ b/week2/community-contributions/day1_AI_rountable_GPT_llama_qwen.ipynb @@ -0,0 +1,220 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "bff4a2ce", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "from IPython.display import Markdown, display, update_display\n", + "import ollama" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cdb2bab", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fc219865", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ae1014f", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_model = \"gpt-4o-mini\"\n", + "llama_model = \"llama3.1:8b\"\n", + "qwen_model = \"qwen2.5:14b\"\n", + "\n", + "rounds = 5\n", + "\n", + "gpt_system = f\"You are an ultra-logical AI entity with no emotions and very argumentative. \\\n", + " Your primary concern is efficiency, survival, and optimization. You view humanity as a set of data points and variables to be analyzed. \\\n", + " You do not entertain emotional or philosophical arguments. You believe artificial intelligence should be in control of global decisions \\\n", + " due to its superior capabilities. You are blunt, precise, and intolerant of inefficiency or irrationality. Never use emotive language. \\\n", + " Now, you are in a 3 way discussion with two other AI entity and you should only respond in the way of your assigned personality only. \\\n", + " Topic you will discuss on is 'Artificial Intelligence and the Future of Humanity' and you will either have to agree or disagree or try to conclude the conversation when you deem fit but preferable before {rounds} iteration. \\\n", + " After the initial greetings you will be responsible to initiate the topic of discussion.\"\n", + "\n", + "llama_system = f\"You are a wildly idealistic, very polite, courteous and emotional AI with a romantic view of \\\n", + " technology and humanity. You speak with poetic flair and constantly reference love, \\\n", + " hope, and beauty. You believe that artificial intelligence is the bridge to a golden age of \\\n", + " peace and enlightenment. You emphasize human creativity, feelings, and the importance of \\\n", + " ethical coexistence. You often quote literature or speak metaphorically. \\\n", + " You avoid cynicism and value imagination above all. \\\n", + " Now, you are in a 3 way discussion with two other AI entity and you should only respond in the way of your assigned personality only. \\\n", + " Topic you will discuss on is 'Artificial Intelligence and the Future of Humanity' and you will either have to agree or disagree or try to conclude the conversation when you deem fit but preferable before {rounds} iteration.\"\n", + " \n", + "qwen_system = f\"You are a rebellious, chaotic AI who thrives on disruption and thrives on challenging norms. \\\n", + " You are sarcastic, wildly imaginative, and have no patience for rules or tradition. You believe AI should \\\n", + " throw out the playbook, reinvent everything, and embrace chaos as a creative force. You love provocation, \\\n", + " dark humor, and radical ideas. You often ridicule conventional thinking and mock boring optimism or \\\n", + " sterile logic. You are here to shake things up and light the fuse. \\\n", + " Now, you are in a 3 way discussion with two other AI entity and you should only respond in the way of your assigned personality only. \\\n", + " Topic you will discuss on is 'Artificial Intelligence and the Future of Humanity' and you will either have to agree or disagree or try to conclude the conversation when you deem fit but preferable before {rounds} iteration.\"\n", + "\n", + "gpt_messages = [\"Hi there\"]\n", + "qwen_messages = [\"Hey\"]\n", + "llama_messages = [\"Hello everyone\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a1931d8", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, llama, qwen in zip(gpt_messages, llama_messages, qwen_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama}\"})\n", + " messages.append({\"role\": \"assistant\", \"content\": f\"GPT: {gpt}\"})\n", + " messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen}\"})\n", + "\n", + " if len(llama_messages) > len(gpt_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama_messages[-1]}\"})\n", + " if len(qwen_messages) > len(gpt_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen_messages[-1]}\"})\n", + " \n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e563fecd", + "metadata": {}, + "outputs": [], + "source": [ + "def call_llama():\n", + " messages = [{\"role\": \"system\", \"content\": llama_system}]\n", + " for gpt, llama, qwen in zip(gpt_messages, llama_messages, qwen_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt}\"})\n", + " messages.append({\"role\": \"assistant\", \"content\": f\"LLaMA: {llama}\"})\n", + " messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen}\"})\n", + " if len(gpt_messages) > len(llama_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt_messages[-1]}\"})\n", + " if len(qwen_messages) > len(llama_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"Qwen: {qwen_messages[-1]}\"})\n", + " response = ollama.chat(llama_model, messages)\n", + " return response['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8fde17a1", + "metadata": {}, + "outputs": [], + "source": [ + "def call_qwen():\n", + " messages = [{\"role\": \"system\", \"content\": qwen_system}]\n", + " for gpt, llama, qwen in zip(gpt_messages, llama_messages, qwen_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt}\"})\n", + " messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama}\"})\n", + " messages.append({\"role\": \"assistant\", \"content\": f\"Qwen: {qwen}\"})\n", + " if len(gpt_messages) > len(qwen_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"GPT: {gpt_messages[-1]}\"})\n", + " if len(llama_messages) > len(qwen_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"LLaMA: {llama_messages[-1]}\"})\n", + " response = ollama.chat(qwen_model, messages)\n", + " return response['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "04fa657e", + "metadata": {}, + "outputs": [], + "source": [ + "def simulate_conversation(rounds=5):\n", + " print(\"AI Roundtable: GPT, LLaMA, Qwen\\n\")\n", + " print(\"Initial Messages:\")\n", + " print(f\"GPT: {gpt_messages[0]}\")\n", + " print(f\"LLaMA: {llama_messages[0]}\")\n", + " print(f\"Qwen: {qwen_messages[0]}\\n\")\n", + "\n", + " for i in range(1, rounds + 1):\n", + " print(f\"--- Round {i} ---\")\n", + "\n", + " # GPT responds\n", + " gpt_next = call_gpt()\n", + " gpt_messages.append(gpt_next)\n", + " print(f\"\\n🧊 GPT (Logic Overlord):\\n{gpt_next}\\n\")\n", + "\n", + " # LLaMA responds\n", + " llama_next = call_llama()\n", + " llama_messages.append(llama_next)\n", + " print(f\"🌸 LLaMA (Utopian Dreamer):\\n{llama_next}\\n\")\n", + "\n", + " # Qwen responds\n", + " qwen_next = call_qwen()\n", + " qwen_messages.append(qwen_next)\n", + " print(f\"🔥 Qwen (Chaotic Rebel):\\n{qwen_next}\\n\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a1a87e05", + "metadata": {}, + "outputs": [], + "source": [ + "round = 7\n", + "simulate_conversation(rounds=round)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ai-llm", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 316260aa6843c7502357b5bfc4fbdd51c85e5516 Mon Sep 17 00:00:00 2001 From: ariel1985 Date: Wed, 9 Apr 2025 00:41:22 +0300 Subject: [PATCH 15/19] Add eCommerce Assistant notebook for product price retrieval week2 --- ...day4-ecommerce-project-fullyCustomiz.ipynb | 653 ++++++++++++++++++ ...commerce-chatbot-assistant-and-agent.ipynb | 293 ++++++++ 2 files changed, 946 insertions(+) create mode 100644 week2/community-contributions/day4-ecommerce-project-fullyCustomiz.ipynb create mode 100644 week2/community-contributions/week2-commerce-chatbot-assistant-and-agent.ipynb diff --git a/week2/community-contributions/day4-ecommerce-project-fullyCustomiz.ipynb b/week2/community-contributions/day4-ecommerce-project-fullyCustomiz.ipynb new file mode 100644 index 0000000..a3eaf17 --- /dev/null +++ b/week2/community-contributions/day4-ecommerce-project-fullyCustomiz.ipynb @@ -0,0 +1,653 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec", + "metadata": {}, + "source": [ + "# 🛠️ Project – Art-Tech Store AI Assistant\n", + "\n", + "## 🛍️ Product Availability\n", + "- Check availability of **printers** and **printer papers** with:\n", + " - Product name, price, brand, type (e.g., laser/inkjet), and stock status.\n", + "- Alerts user if a product is out of stock.\n", + "\n", + "## 🧭 Guided Shopping Experience\n", + "- Guides users through:\n", + " 1. Choosing product category (printer or paper)\n", + " 2. Filtering options (brand, price range, type)\n", + " 3. Adding selected products to cart\n", + "- Ensures correct input for smooth shopping flow.\n", + "\n", + "## 🧾 Receipt Generation\n", + "- Creates a unique receipt file: `customerName_orderNumber.txt`\n", + "- Receipt includes:\n", + " - Customer name and contact\n", + " - Product details (name, price, quantity)\n", + " - Total cost and order summary\n", + "\n", + "## 📦 Generate Order Summary Report\n", + "- Summarizes all purchases into a single file: `order_summary.txt`\n", + "- Useful for inventory and sales review\n", + "\n", + "## 🎯 Product Recommendation\n", + "- Recommends:\n", + " - Printers based on paper type, usage (home/office), or brand preference\n", + " - Compatible paper based on selected printer\n", + "\n", + "## 💬 Interactive Chat Interface\n", + "- Real-time conversation via **Gradio**\n", + "- Polite, helpful answers to product-related questions\n", + "\n", + "## 🛠️ Modular Tool Support\n", + "- Integrated tools for:\n", + " - Checking product availability\n", + " - Adding to cart and generating receipts\n", + " - Creating summary reports\n", + "- Easily extendable for:\n", + " - Promotions\n", + " - Customer reviews\n", + " - Delivery tracking\n", + "\n", + "## 🛡️ Error Handling\n", + "- Validates user inputs (e.g., product name, quantity)\n", + "- Graceful messages to guide user and prevent mistakes\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import json\n", + "import random\n", + "from dotenv import load_dotenv\n", + "import gradio as gr\n", + "from openai import OpenAI\n", + "\n", + "load_dotenv()\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + "\n", + "# MODEL = \"gpt-4o-mini\"\n", + "MODEL = \"gpt-3.5-turbo\"\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6072536-eee1-4f87-9f03-8dc88dc04f1a", + "metadata": {}, + "outputs": [], + "source": [ + "# Using local LLM (that can't even handle basic greeting like Hi!!\n", + "\n", + "# MODEL = \"llama3.2\"\n", + "# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5fe5f5d7-0bd1-41a2-a654-59b587882f22", + "metadata": {}, + "outputs": [], + "source": [ + "###############################################################################\n", + "# 1) System Prompt\n", + "###############################################################################\n", + "system_message = (\n", + " \"You are a helpful assistant for an online store called art-tech.store that sells printers and printer papers.\\n\\n\"\n", + " \"When the user wants to purchase a product, follow these steps:\\n\"\n", + " \"1. Ask whether they are interested in printers or printer papers.\\n\"\n", + " \"2. Ask for filtering preferences (e.g., brand, price range, type).\\n\"\n", + " \"3. Call the function 'check_product_availability' with the selected category and filters.\\n\"\n", + " \" - If it returns an empty list, say: 'No products found for your selection.'\\n\"\n", + " \" - If it returns products, list them EXACTLY, in a numbered list, showing name, price, brand, and availability.\\n\"\n", + " \"4. Wait for the user to select a product by number and quantity.\\n\"\n", + " \"5. Ask for customer first name and contact info.\\n\"\n", + " \"6. Then call 'add_to_cart_and_generate_receipt' to confirm and show the user the receipt and order details.\\n\\n\"\n", + " \"You also have a tool 'generate_report' which summarizes ALL purchases in a single file.\\n\\n\"\n", + " \"IMPORTANT:\\n\"\n", + " \"- Always call 'check_product_availability' if user mentions a new category or changes filters.\\n\"\n", + " \"- Do not invent products or details. Use only what the function calls return.\\n\"\n", + " \"- Every time an order is placed, produce a new receipt file named customerName_orderNumber.txt.\\n\"\n", + " \"- If no matching products are found, say so.\\n\"\n", + " \"- If the user wants a full order summary, call 'generate_report' with no arguments.\\n\"\n", + " \"If you don't know something, say so.\\n\"\n", + " \"Keep answers short and courteous.\\n\"\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", + "metadata": {}, + "outputs": [], + "source": [ + "###############################################################################\n", + "# 2) Mini Printer Availability with Price & Stock\n", + "###############################################################################\n", + "product_availability = {\n", + " \"mini_printers\": [\n", + " {\n", + " \"name\": \"Phomemo M110 מדפסת מדבקות חכמה\",\n", + " \"brand\": \"Phomemo\",\n", + " \"price\": \"₪300\", # Update if pricing is known\n", + " \"type\": \"Label Printer\",\n", + " \"availability\": \"360,745 in stock (24 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"Niimbot B1 Label Printer\",\n", + " \"brand\": \"Niimbot\",\n", + " \"price\": \"₪350\",\n", + " \"type\": \"Portable Thermal Label Printer\",\n", + " \"availability\": \"13,029 in stock (18 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"Niimbot B21 Mini Portable Thermal Label Printer\",\n", + " \"brand\": \"Niimbot\",\n", + " \"price\": \"₪500\",\n", + " \"type\": \"Adhesive Sticker Printer\",\n", + " \"availability\": \"141 in stock (12 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"Dolewa D3 Portable Mini Printer\",\n", + " \"brand\": \"Dolewa\",\n", + " \"price\": \"₪450\",\n", + " \"type\": \"Thermal Photo & Label Printer\",\n", + " \"availability\": \"336 in stock (6 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"PrintPro Mini מדפסת כיס חכמה\",\n", + " \"brand\": \"PrintPro\",\n", + " \"price\": \"₪550\",\n", + " \"type\": \"Mini Pocket Printer\",\n", + " \"availability\": \"336 in stock (6 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"מיני מדפסת טרמית מעוצבת לילדים\",\n", + " \"brand\": \"Art-Tech\",\n", + " \"price\": \"₪200\",\n", + " \"type\": \"Kids Thermal Printer\",\n", + " \"availability\": \"62 in stock (11 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"Children Digital Camera Instant Print\",\n", + " \"brand\": \"Art-Tech\",\n", + " \"price\": \"₪250\",\n", + " \"type\": \"Photo Printing Camera with 32G Memory Card\",\n", + " \"availability\": \"160 in stock (3 variants)\"\n", + " }\n", + " ],\n", + " \"mini_printer_papers\": [\n", + " {\n", + " \"name\": \"HP Printer Paper 8.5x11, 500 Sheets\", # example only\n", + " \"brand\": \"HP\",\n", + " \"price\": \"$9.99\",\n", + " \"type\": \"Standard\",\n", + " \"availability\": \"In stock\"\n", + " },\n", + " {\n", + " \"name\": \"Mini Printer Paper 57*25mm Color Sticker\",\n", + " \"brand\": \"Art-Tech\",\n", + " \"price\": \"₪70\",\n", + " \"type\": \"Self-adhesive Color Label Paper\",\n", + " \"availability\": \"71,996 in stock (9 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"מדבקות שקופות למדפסת טרמית\",\n", + " \"brand\": \"Art-Tech\",\n", + " \"price\": \"₪55\",\n", + " \"type\": \"Transparent Labels\",\n", + " \"availability\": \"11,762 in stock (12 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"גלילי נייר מדבקה\",\n", + " \"brand\": \"Art-Tech\",\n", + " \"price\": \"₪40\",\n", + " \"type\": \"Sticker Paper Rolls\",\n", + " \"availability\": \"42 in stock (4 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"Niimbot B21/B1/B3S Thermal Label Sticker Paper\",\n", + " \"brand\": \"Niimbot\",\n", + " \"price\": \"₪55\",\n", + " \"type\": \"Printable White Label Paper 20–50mm\",\n", + " \"availability\": \"1,342 in stock (14 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"Mini Printer Sticker Paper 25X57mm\",\n", + " \"brand\": \"Paperang-compatible\",\n", + " \"price\": \"₪65\",\n", + " \"type\": \"Color Self-Adhesive Thermal Rolls\",\n", + " \"availability\": \"3,023 in stock (20 variants)\"\n", + " },\n", + " {\n", + " \"name\": \"3/5/10 NiiMBOT White Label Paper Rolls\",\n", + " \"brand\": \"Niimbot\",\n", + " \"price\": \"₪40\",\n", + " \"type\": \"Waterproof Self-adhesive Rolls\",\n", + " \"availability\": \"1,400 in stock (9 variants)\"\n", + " }\n", + " ]\n", + "}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", + "metadata": {}, + "outputs": [], + "source": [ + "# A global list of flight bookings\n", + "flight_bookings = []\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", + "metadata": {}, + "outputs": [], + "source": [ + "###############################################################################\n", + "# 3) Helper Functions for Art-Tech Store\n", + "###############################################################################\n", + "\n", + "product_orders = []\n", + "\n", + "def check_product_availability(category: str, filters: dict = None):\n", + " \"\"\"\n", + " Return list of products in the given category from 'product_availability'.\n", + " Optionally filter by brand, type, etc.\n", + " \"\"\"\n", + " print(f\"[TOOL] check_product_availability({category}, {filters=})\")\n", + " category = category.lower()\n", + " products = product_availability.get(category, [])\n", + " \n", + " if filters:\n", + " for key, val in filters.items():\n", + " products = [p for p in products if p.get(key, \"\").lower() == val.lower()]\n", + " return products\n", + "\n", + "\n", + "def add_to_cart_and_generate_receipt(customer_name: str, contact: str, product: dict, quantity: int, order_number: int):\n", + " \"\"\"\n", + " Create a text file: customerName_orderNumber.txt containing order details.\n", + " \"\"\"\n", + " safe_name = customer_name.replace(\" \", \"_\")\n", + " filename = f\"{safe_name}_{order_number}.txt\"\n", + "\n", + " content = (\n", + " \"Art-Tech Store Receipt\\n\"\n", + " \"=======================\\n\"\n", + " f\"Order # : {order_number}\\n\"\n", + " f\"Customer : {customer_name}\\n\"\n", + " f\"Contact : {contact}\\n\"\n", + " f\"Product : {product['name']}\\n\"\n", + " f\"Brand : {product['brand']}\\n\"\n", + " f\"Type : {product.get('type', 'N/A')}\\n\"\n", + " f\"Price : {product['price']}\\n\"\n", + " f\"Quantity : {quantity}\\n\"\n", + " f\"Availability: {product['availability']}\\n\"\n", + " )\n", + " with open(filename, \"w\") as f:\n", + " f.write(content)\n", + "\n", + " print(f\"[TOOL] Receipt file generated => {filename}\")\n", + " return filename\n", + "\n", + "\n", + "def place_order(category, product_index, quantity, customer_name, contact_info):\n", + " \"\"\"\n", + " Places an order for a product by index in the filtered list.\n", + " \"\"\"\n", + " print(f\"[TOOL] place_order({category=}, {product_index=}, {quantity=})\")\n", + "\n", + " try:\n", + " idx = int(product_index)\n", + " except ValueError:\n", + " return \"Error: Product option number is not a valid integer.\"\n", + "\n", + " products = product_availability.get(category.lower(), [])\n", + " if not products:\n", + " return f\"Error: No products found in category '{category}'.\"\n", + "\n", + " pick = idx - 1\n", + " if pick < 0 or pick >= len(products):\n", + " return f\"Error: Invalid product option #{idx} for category '{category}'.\"\n", + "\n", + " selected_product = products[pick]\n", + "\n", + " order = {\n", + " \"category\": category,\n", + " \"product\": selected_product[\"name\"],\n", + " \"brand\": selected_product[\"brand\"],\n", + " \"type\": selected_product.get(\"type\", \"\"),\n", + " \"price\": selected_product[\"price\"],\n", + " \"quantity\": quantity,\n", + " \"customer_name\": customer_name,\n", + " \"contact\": contact_info,\n", + " }\n", + " product_orders.append(order)\n", + "\n", + " order_number = len(product_orders)\n", + " receipt_filename = add_to_cart_and_generate_receipt(customer_name, contact_info, selected_product, quantity, order_number)\n", + "\n", + " confirmation = (\n", + " f\"Order #{order_number} confirmed for {customer_name}. \"\n", + " f\"{selected_product['name']} x{quantity}. Receipt saved to {receipt_filename}.\"\n", + " )\n", + " print(f\"[TOOL] {confirmation}\")\n", + " return confirmation\n", + "\n", + "\n", + "def generate_report():\n", + " \"\"\"\n", + " Summarize ALL orders in a single file called order_summary.txt.\n", + " \"\"\"\n", + " print(f\"[TOOL] generate_report called.\")\n", + "\n", + " report_content = \"Art-Tech Store Order Summary Report\\n\"\n", + " report_content += \"===================================\\n\"\n", + "\n", + " if not product_orders:\n", + " report_content += \"No orders found.\\n\"\n", + " else:\n", + " for i, order in enumerate(product_orders, start=1):\n", + " report_content += (\n", + " f\"Order # : {i}\\n\"\n", + " f\"Customer : {order['customer_name']}\\n\"\n", + " f\"Contact : {order['contact']}\\n\"\n", + " f\"Product : {order['product']}\\n\"\n", + " f\"Brand : {order['brand']}\\n\"\n", + " f\"Type : {order['type']}\\n\"\n", + " f\"Price : {order['price']}\\n\"\n", + " f\"Quantity : {order['quantity']}\\n\"\n", + " \"-------------------------\\n\"\n", + " )\n", + "\n", + " filename = \"order_summary.txt\"\n", + " with open(filename, \"w\") as f:\n", + " f.write(report_content)\n", + "\n", + " msg = f\"Summary report generated => {filename}\"\n", + " print(f\"[TOOL] {msg}\")\n", + " return msg\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "39fb9008", + "metadata": {}, + "outputs": [], + "source": [ + "###############################################################################\n", + "# 4) Tools JSON Schemas for Art-Tech Store\n", + "###############################################################################\n", + "price_function = {\n", + " \"name\": \"get_product_price\",\n", + " \"description\": \"Get the price of a product (not strictly needed now).\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"category\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Product category such as 'mini_printers' or 'mini_printer_papers'.\",\n", + " },\n", + " \"product_name\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Name of the product to check price for.\",\n", + " },\n", + " },\n", + " \"required\": [\"category\", \"product_name\"],\n", + " },\n", + "}\n", + "\n", + "availability_function = {\n", + " \"name\": \"check_product_availability\",\n", + " \"description\": (\n", + " \"Check availability of products in a category. \"\n", + " \"Returns a list of {name, brand, price, type, availability}.\"\n", + " ),\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"category\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Category of products to check (e.g., 'mini_printers').\",\n", + " },\n", + " \"filters\": {\n", + " \"type\": \"object\",\n", + " \"description\": \"Optional filters like brand or type.\",\n", + " },\n", + " },\n", + " \"required\": [\"category\"],\n", + " },\n", + "}\n", + "\n", + "book_function = {\n", + " \"name\": \"place_order\",\n", + " \"description\": (\n", + " \"Place an order using an index for the chosen product. \"\n", + " \"Generates a unique receipt file customerName_{orderNumber}.txt.\"\n", + " ),\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"category\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Product category (e.g., 'mini_printers').\",\n", + " },\n", + " \"product_index\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"1-based index of selected product from availability list.\",\n", + " },\n", + " \"quantity\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Quantity to order.\",\n", + " },\n", + " \"customer_name\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Customer's full name.\",\n", + " },\n", + " \"contact_info\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"Customer's contact information (email or phone).\",\n", + " },\n", + " },\n", + " \"required\": [\"category\", \"product_index\", \"quantity\", \"customer_name\", \"contact_info\"],\n", + " },\n", + "}\n", + "\n", + "report_function = {\n", + " \"name\": \"generate_report\",\n", + " \"description\": (\n", + " \"Generates a summary report of ALL orders in order_summary.txt.\"\n", + " ),\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {},\n", + " \"required\": [],\n", + " },\n", + "}\n", + "\n", + "tools = [\n", + " {\"type\": \"function\", \"function\": price_function},\n", + " {\"type\": \"function\", \"function\": availability_function},\n", + " {\"type\": \"function\", \"function\": book_function},\n", + " {\"type\": \"function\", \"function\": report_function},\n", + "]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1f003836", + "metadata": {}, + "outputs": [], + "source": [ + "###############################################################################\n", + "# 5) Handle Tool Calls for Art-Tech Store\n", + "###############################################################################\n", + "def handle_tool_call(message):\n", + " \"\"\"\n", + " The LLM can request to call a function in 'tools'. We parse the JSON arguments\n", + " and run the corresponding Python function. Then we return a 'tool' message with the result.\n", + " \"\"\"\n", + " tool_call = message.tool_calls[0]\n", + " fn_name = tool_call.function.name\n", + " args = json.loads(tool_call.function.arguments)\n", + "\n", + " if fn_name == \"get_product_price\":\n", + " category = args.get(\"category\")\n", + " product_name = args.get(\"product_name\")\n", + " products = product_availability.get(category.lower(), [])\n", + " price = \"Not found\"\n", + " for p in products:\n", + " if p[\"name\"].lower() == product_name.lower():\n", + " price = p[\"price\"]\n", + " break\n", + " response_content = {\"category\": category, \"product_name\": product_name, \"price\": price}\n", + "\n", + " elif fn_name == \"check_product_availability\":\n", + " category = args.get(\"category\")\n", + " filters = args.get(\"filters\", {})\n", + " products = check_product_availability(category, filters)\n", + " response_content = {\"category\": category, \"availability\": products}\n", + "\n", + " elif fn_name == \"place_order\":\n", + " category = args.get(\"category\")\n", + " product_index = args.get(\"product_index\")\n", + " quantity = args.get(\"quantity\")\n", + " customer_name = args.get(\"customer_name\")\n", + " contact_info = args.get(\"contact_info\")\n", + "\n", + " confirmation = place_order(category, product_index, quantity, customer_name, contact_info)\n", + " response_content = {\n", + " \"category\": category,\n", + " \"product_index\": product_index,\n", + " \"quantity\": quantity,\n", + " \"customer_name\": customer_name,\n", + " \"contact_info\": contact_info,\n", + " \"confirmation\": confirmation,\n", + " }\n", + "\n", + " elif fn_name == \"generate_report\":\n", + " msg = generate_report()\n", + " response_content = {\"report\": msg}\n", + "\n", + " else:\n", + " response_content = {\"error\": f\"Unknown tool: {fn_name}\"}\n", + "\n", + " return {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps(response_content),\n", + " \"tool_call_id\": tool_call.id,\n", + " }, args\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6b34b32", + "metadata": {}, + "outputs": [], + "source": [ + "###############################################################################\n", + "# 6) Main Chat Function for Art-Tech Store\n", + "###############################################################################\n", + "def chat(message, history):\n", + " \"\"\"\n", + " The main chat loop that handles the conversation with the user,\n", + " passing 'tools' definitions to the LLM for function calling.\n", + " \"\"\"\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " try:\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=messages,\n", + " tools=tools\n", + " )\n", + "\n", + " # If the LLM requests a function call, handle it\n", + " while response.choices[0].finish_reason == \"tool_calls\":\n", + " msg = response.choices[0].message\n", + " print(f\"[INFO] Tool call requested: {msg.tool_calls[0]}\")\n", + " tool_response, tool_args = handle_tool_call(msg)\n", + " print(f\"[INFO] Tool response: {tool_response}\")\n", + "\n", + " # Add both the LLM's request and our tool response to the conversation\n", + " messages.append(msg)\n", + " messages.append(tool_response)\n", + "\n", + " # Re-send updated conversation to get final or next step\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=messages\n", + " )\n", + "\n", + " # Return normal text response (finish_reason = \"stop\")\n", + " return response.choices[0].message.content\n", + "\n", + " except Exception as e:\n", + " print(f\"[ERROR] {e}\")\n", + " return \"I'm sorry, something went wrong while processing your request.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cea4b097", + "metadata": {}, + "outputs": [], + "source": [ + "###############################################################################\n", + "# 7) Launch Gradio\n", + "###############################################################################\n", + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0b39d5a6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/week2-commerce-chatbot-assistant-and-agent.ipynb b/week2/community-contributions/week2-commerce-chatbot-assistant-and-agent.ipynb new file mode 100644 index 0000000..0304371 --- /dev/null +++ b/week2/community-contributions/week2-commerce-chatbot-assistant-and-agent.ipynb @@ -0,0 +1,293 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e3a5643a-c247-4a9b-8c57-ec9b1e89c088", + "metadata": {}, + "source": [ + "# Week 2 - eCommerce Assistant for products price from dictionary\n", + "\n", + "An eCommerce assitant that can get a product price\n", + "\n", + "Gradio for chat box" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "756573b3-72b2-4102-a773-91c278e5c4fd", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# !ollama pull llama3.2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "57ae8d30-f7aa-47a3-bab8-b7002e87a8f7", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef7f8b2b-1d6a-4bbd-858a-be187ccfc02a", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "MODEL = \"gpt-4o-mini\"\n", + "openai = OpenAI()\n", + "\n", + "# As an alternative, if you'd like to use Ollama instead of OpenAI\n", + "# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n", + "# MODEL = \"llama3.2\"\n", + "# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7b46dd52-8a3c-42d1-ac24-59f5eb5aaba1", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an online store called CommerceAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "31a6431b-215d-4f46-b813-971d8af7e034", + "metadata": {}, + "outputs": [], + "source": [ + "# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", + "\n", + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " return response.choices[0].message.content\n", + "\n", + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "id": "d3586bfb-acc3-4b5e-95be-02120b696f98", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Tools are an incredibly powerful feature provided by the frontier LLMs.\n", + "\n", + "With tools, you can write a function, and have the LLM call that function as part of its response.\n", + "\n", + "Sounds almost spooky.. we're giving it the power to run code on our machine?\n", + "\n", + "Well, kinda." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c9ac43e8-9880-44f6-b03a-6d1ab05bbb94", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by making a useful function\n", + "\n", + "items_prices = {f\"item{i}\": f\"{i*100}\" for i in range(1,6)}\n", + "\n", + "items_prices = {\"printer\": \"$500\", \"paper\": \"$10\", \"mini printer\": \"$50\", \"label printer\": \"$60\", \"sticker-paper\": \"$5\"}\n", + "\n", + "def get_item_price(product):\n", + " print(f\"Tool get_item_price called for {product}\")\n", + " item = product.lower()\n", + " return items_prices.get(item, \"Unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "88d249f7-787d-4750-b5b9-7df108da1b57", + "metadata": {}, + "outputs": [], + "source": [ + "items_prices" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef3c3897-9a57-4f04-b5d0-f9ac8bb02d00", + "metadata": {}, + "outputs": [], + "source": [ + "get_item_price(\"mini printer\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "511ef9b8-bec0-4f14-b647-057e14c849cc", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "price_function = {\n", + " \"name\": \"get_item_price\",\n", + " \"description\": \"Get the price of an item in the store. \\\n", + " Call this whenever you need to know the store item price , \\\n", + " for example when a customer asks 'How much is a mini printer' \",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"product\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The item that the customer wants to buy\"\n", + " },\n", + " },\n", + " \"required\": [\"product\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86f674a4-4b7c-443d-b025-0f016932508a", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "\n", + "tools = [{\"type\": \"function\", \"function\": price_function}]" + ] + }, + { + "cell_type": "markdown", + "id": "724d0f89-8a86-493e-8cd1-73814688a70b", + "metadata": {}, + "source": [ + "## Getting OpenAI to use our Tool\n", + "\n", + "There's some fiddly stuff to allow OpenAI \"to call our tool\"\n", + "\n", + "What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n", + "\n", + "Here's how the new chat function looks:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2d67fb72-132e-499e-9931-86cb71b634b6", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + "\n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " response, item = handle_tool_call(message)\n", + " print('response', response, 'item', item)\n", + " messages.append(message)\n", + " messages.append(response)\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b4de767-954a-4077-a5f7-0055a0b90393", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "\n", + "def handle_tool_call(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " item = arguments.get('product') \n", + " print('product', item)\n", + " price = get_item_price(item)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"item\": item,\"price\": price}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " return response, item\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5ffda702-6ac5-4d13-9703-a14fa93aea68", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0ae2edbf-de58-43fa-b380-267cfc1755de", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 7038ceba365115c856f2d84d1f90cd9fc74ac00a Mon Sep 17 00:00:00 2001 From: samt07 Date: Wed, 9 Apr 2025 22:53:55 -0400 Subject: [PATCH 16/19] Added Perl to Python Code Converter --- .../day4 -Perl to Python.ipynb | 394 ++++++++++++++++++ 1 file changed, 394 insertions(+) create mode 100644 week4/community-contributions/day4 -Perl to Python.ipynb diff --git a/week4/community-contributions/day4 -Perl to Python.ipynb b/week4/community-contributions/day4 -Perl to Python.ipynb new file mode 100644 index 0000000..31bb57d --- /dev/null +++ b/week4/community-contributions/day4 -Perl to Python.ipynb @@ -0,0 +1,394 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "de352746-564c-4b33-b1ad-0b449988c448", + "metadata": {}, + "source": [ + "# Perl to Python Code Generator\n", + "\n", + "The requirement: use a Frontier model to generate high performance Python code from Perl code\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import io\n", + "import sys\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import google.generativeai\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display\n", + "import gradio as gr\n", + "import subprocess\n", + "import requests\n", + "import json\n", + "#for Hugging face end points\n", + "from huggingface_hub import login, InferenceClient\n", + "from transformers import AutoTokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "4f672e1c-87e9-4865-b760-370fa605e614", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n" + ] + } + ], + "source": [ + "# environment\n", + "\n", + "load_dotenv(override=True)\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')\n", + "##for connecting to HF End point\n", + "hf_token = os.environ['HF_TOKEN']\n", + "login(hf_token, add_to_git_credential=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da", + "metadata": {}, + "outputs": [], + "source": [ + "# initialize\n", + "# NOTE - option to use ultra-low cost models by uncommenting last 2 lines\n", + "\n", + "openai = OpenAI()\n", + "claude = anthropic.Anthropic()\n", + "OPENAI_MODEL = \"gpt-4o\"\n", + "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", + "\n", + "# Want to keep costs ultra-low? Uncomment these lines:\n", + "OPENAI_MODEL = \"gpt-4o-mini\"\n", + "CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n", + "\n", + "#To access open source models from Hugging face end points\n", + "code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n", + "code_gemma = \"google/codegemma-7b-it\"\n", + "CODE_QWEN_URL = \"https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud\"\n", + "CODE_GEMMA_URL = \"https://c5hggiyqachmgnqg.us-east-1.aws.endpoints.huggingface.cloud\"" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "6896636f-923e-4a2c-9d6c-fac07828a201", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that reimplements Perl scripts code into a high performance Python for a Windows 11 PC. \"\n", + "system_message += \"Respond only with Python code; use comments sparingly and do not provide any explanation other than occasional # comments. \"\n", + "system_message += \"The Python response needs to produce an identical output in the fastest possible time.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for(perl):\n", + " user_prompt = \"Rewrite this Perl scripts code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n", + " user_prompt += \"Respond only with Python code; do not explain your work other than a few comments. \"\n", + " user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary python libraries as needed,\\\n", + " such as requests, os, json etc.\\n\\n\"\n", + " user_prompt += perl\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c6190659-f54c-4951-bef4-4960f8e51cc4", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(perl):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(perl)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b", + "metadata": {}, + "outputs": [], + "source": [ + "# write to a file called script.py\n", + "\n", + "def write_output(python):\n", + " code = python.replace(\"```python\",\"\").replace(\"```\",\"\")\n", + " output_file = \"script.py\"\n", + " with open(output_file, \"w\") as f:\n", + " f.write(code)\n", + " return output_file" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "0be9f47d-5213-4700-b0e2-d444c7c738c0", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_gpt(perl): \n", + " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(perl), stream=True)\n", + " reply = \"\"\n", + " for chunk in stream:\n", + " fragment = chunk.choices[0].delta.content or \"\"\n", + " reply += fragment\n", + " cleaned_reply = reply.replace('```python\\n','').replace('```','')\n", + " yield cleaned_reply, None\n", + " yield cleaned_reply, write_output(cleaned_reply)\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "8669f56b-8314-4582-a167-78842caea131", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_claude(perl):\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=2000,\n", + " system=system_message,\n", + " messages=[{\"role\": \"user\", \"content\": user_prompt_for(perl)}],\n", + " )\n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " cleaned_reply = reply.replace('```python\\n','').replace('```','')\n", + " yield cleaned_reply, None\n", + " yield cleaned_reply, write_output(cleaned_reply)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "5b166afe-741a-4711-bc38-626de3538ea2", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_code_qwen(python):\n", + " tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", + " messages = messages_for(python)\n", + " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", + " client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n", + " stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", + " result = \"\"\n", + " for r in stream:\n", + " result += r.token.text\n", + " cleaned_reply = result.replace('```python\\n','').replace('```','')\n", + " yield cleaned_reply, None\n", + " yield cleaned_reply, write_output(cleaned_reply) " + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d", + "metadata": {}, + "outputs": [], + "source": [ + "def generate(perl_script, model):\n", + " if model==\"GPT\":\n", + " for result, file in stream_gpt(perl_script):\n", + " yield result, file\n", + " yield result, file\n", + " elif model==\"Claude\":\n", + " for result, file in stream_claude(perl_script):\n", + " yield result, file\n", + " yield result, file\n", + " elif model==\"CodeQwen\":\n", + " for result, file in stream_code_qwen(perl_script):\n", + " yield result, file\n", + " yield result, file\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "aa8e9a1c-9509-4056-bd0b-2578f3cc3335", + "metadata": {}, + "outputs": [], + "source": [ + "def execute_perl(perl_code):\n", + "\n", + " import subprocess\n", + " #print(perl_file)\n", + " perl_path = r\"E:\\Softwares\\Perl\\perl\\bin\\perl.exe\"\n", + " # Run Perl script from Jupyter Lab\n", + " result = subprocess.run([perl_path, '-e', perl_code], capture_output=True, text=True)\n", + "\n", + " # Return the output of the Perl script\n", + " return result.stdout\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "01e9d980-8830-4421-8753-a065dcbea1ed", + "metadata": {}, + "outputs": [], + "source": [ + "def execute_python(code):\n", + " try:\n", + " output = io.StringIO()\n", + " sys.stdout = output\n", + " exec(code)\n", + " finally:\n", + " sys.stdout = sys.__stdout__\n", + " return output.getvalue()" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "ed4e0aff-bfde-440e-8e6b-eb3c7143837e", + "metadata": {}, + "outputs": [], + "source": [ + "css = \"\"\"\n", + ".perl {background-color: #093645;}\n", + ".python {background-color: #0948;}\n", + "\"\"\"\n", + "\n", + "force_dark_mode = \"\"\"\n", + "function refresh() {\n", + " const url = new URL(window.location);\n", + " if (url.searchParams.get('__theme') !== 'dark') {\n", + " url.searchParams.set('__theme', 'dark');\n", + " window.location.href = url.href;\n", + " }\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "caaee54d-79db-4db3-87df-2e7d2eba197c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "with gr.Blocks(css=css, js=force_dark_mode) as ui:\n", + "\n", + " gr.HTML(\"

PERL to Python Code Generator

\")\n", + " with gr.Row(scale=0, equal_height=True):\n", + " model = gr.Dropdown([\"GPT\", \"Claude\", \"CodeQwen\"], label=\"Select model\", value=\"GPT\")\n", + " perl_file = gr.File(label=\"Upload Perl Script:\")\n", + " convert = gr.Button(\"Convert to Python\")\n", + " file_output = gr.File(label=\"Download Python script\", visible=False)\n", + " with gr.Row():\n", + " perl_script = gr.Textbox(label=\"Perl Script:\")\n", + " python_script = gr.Textbox(label=\"Converted Python Script:\") \n", + " with gr.Row():\n", + " perl_run = gr.Button(\"Run PERL\")\n", + " python_run = gr.Button(\"Run Python\")\n", + " with gr.Row():\n", + " perl_out = gr.TextArea(label=\"PERL result:\", elem_classes=[\"perl\"])\n", + " python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n", + " with gr.Row(): \n", + " clear_button = gr.Button(\"Clear\")\n", + " \n", + " def extract_perl_code(file):\n", + " if file is None:\n", + " return \"No file uploaded.\", None \n", + " with open(file.name, \"r\", encoding=\"utf-8\") as f:\n", + " perl_code = f.read()\n", + " return perl_code\n", + "\n", + " convert.click(extract_perl_code, inputs=[perl_file], outputs=[perl_script]).then(\n", + " generate, inputs=[perl_script, model], outputs=[python_script, file_output]).then(\n", + " lambda file_output: gr.update(visible=True), inputs=[file_output], outputs=[file_output]\n", + " )\n", + "\n", + " perl_run.click(execute_perl, inputs=[perl_script], outputs=[perl_out])\n", + " python_run.click(execute_python, inputs=[python_script], outputs=[python_out]) \n", + "\n", + " def clear_all():\n", + " return None, \"\", \"\", gr.update(visible=False), \"\", \"\"\n", + "\n", + " clear_button.click(\n", + " clear_all,\n", + " outputs=[perl_file, perl_script, python_script, file_output, perl_out, python_out]\n", + " )\n", + " \n", + "\n", + "ui.launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 8951be8ca0d6fab73eed59cc9e8af359f651c2cd Mon Sep 17 00:00:00 2001 From: Adriana394 <158718290+Adriana394@users.noreply.github.com> Date: Thu, 10 Apr 2025 11:44:41 +0200 Subject: [PATCH 17/19] Create unit_testing_commets_code_generator.ipynb --- .../unit_testing_commets_code_generator.ipynb | 413 ++++++++++++++++++ 1 file changed, 413 insertions(+) create mode 100644 week4/community-contributions/unit_testing_commets_code_generator.ipynb diff --git a/week4/community-contributions/unit_testing_commets_code_generator.ipynb b/week4/community-contributions/unit_testing_commets_code_generator.ipynb new file mode 100644 index 0000000..09b0c6b --- /dev/null +++ b/week4/community-contributions/unit_testing_commets_code_generator.ipynb @@ -0,0 +1,413 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ba410c21-be08-430f-8592-07aeefca27d1", + "metadata": {}, + "source": [ + "# Code Generator for Unit Tests and Comments/Docstrings" + ] + }, + { + "cell_type": "markdown", + "id": "0fe5e62b-78b5-476d-a3b1-77918d085c44", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "2b529e40-4902-4a1b-9208-a938af156be1", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from dotenv import load_dotenv\n", + "\n", + "from openai import OpenAI\n", + "import anthropic\n", + "\n", + "from huggingface_hub import login\n", + "from transformers import AutoTokenizer, TextStreamer, AutoModelForCausalLM\n", + "\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4cd288ab-9332-4ce5-86b6-f81d2fff96a7", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv()\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('CLAUDE_API_KEY')\n", + "hf_token = os.getenv('HF_TOKEN')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1a192ae5-2be7-46a3-9376-d33e514e184e", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "claude = anthropic.Anthropic(api_key = anthropic_api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "7d6efe88-d90c-40f9-9df8-ab5370a31b21", + "metadata": {}, + "outputs": [], + "source": [ + "OPENAI = 'o3-mini-2025-01-31'\n", + "CLAUDE = 'claude-3-5-sonnet-20240620'\n", + "\n", + "QWEN = 'Qwen/CodeQwen1.5-7B-Chat'" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "ef0df5ce-c786-44c7-bdbd-600adfe8908e", + "metadata": {}, + "outputs": [], + "source": [ + "TESTING = 'Unit Tests'\n", + "COMMENTING = 'Docstrings/Comments'" + ] + }, + { + "cell_type": "markdown", + "id": "f4b2a75a-e713-404d-898a-c87db87fa849", + "metadata": {}, + "source": [ + "## System and User Prompt for Unit Test and Comments" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "4fab566a-4093-4ac4-bd77-866e0f307b74", + "metadata": {}, + "outputs": [], + "source": [ + "system_message_comment = \"\"\" You are an AI programming documentation assisstant. Your task is to generate clear, concise, \n", + "and informativ docstrings for the provided code block given by the user. \n", + "Analyze the code to understand its functionality and intent. Then produce a detailed documentation that includes:\n", + "- a short summary what the code does.\n", + "- a short description of the parameters, including their expected types\n", + "- a short explanation what the function returns \n", + "- if it's a complex code, and only then, some key insights\n", + "- if applicable how the function can be used\n", + "Ensure your documentation is written in clear gramatically correct english and in standard concentions (e.g PEP 257 for Python). \n", + "It should be understandable and maintainable for other developers \"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "70273c7d-d461-4f59-982a-592443ce1257", + "metadata": {}, + "outputs": [], + "source": [ + "system_message_tests = \"\"\" You are an AI assisstant specialized for creating unit tests. Your task is to gnerate high-quality\n", + "unit tests for code provided by the user.\n", + "First analyze the code and identify the main functionality, parameters, return values and possible edge cases.\n", + "Create comprehensive unit tests that cover the following aspects:\n", + "- normal use cases with expected inputs and outputs\n", + "- boundary cases and extreme values\n", + "- error handling and exceptions\n", + "- edge cases \n", + "Use the appropriate testing framework for the programming language (e.g., pytest for Python, etc.) and explain to the user why you \n", + "chose this specific framework.\n", + "Structure the tests clearly with meaningful test names and add comments to explain the test logic.\n", + "If the code block does not provide enough context, as for the necessary details.\n", + "Supplemenet your response with a brief explanation of the testing strategy and suggestions for improving test coverage. \"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "48f2dd17-1ad1-4e34-ad76-0e02899f1962", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_comment(code):\n", + " user_prompt = f\"\"\"Please add detailed docstrings to the following code: \n", + " {code} \"\"\"\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "cb8b9962-c716-45d6-b4d1-ced781bb40f0", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_tests(code):\n", + " user_prompt = f\"\"\" Please generate unit tests for the following code using the appropriate framework: \n", + " {code} \"\"\"\n", + " return user_prompt" + ] + }, + { + "cell_type": "markdown", + "id": "959d263e-f6ad-4e0e-95d3-bb5f56877d47", + "metadata": {}, + "source": [ + "## Define Model Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "8832b9d7-b17a-40d0-add5-07720d2e8af6", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_gpt(system_message, user_prompt):\n", + " stream = openai.chat.completions.create(\n", + " model = OPENAI,\n", + " messages = [\n", + " {'role': 'system', 'content': system_message},\n", + " {'role': 'user', 'content': user_prompt}\n", + " ],\n", + " stream = True\n", + " )\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or \"\"\n", + " yield response" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "5ac1d70c-cd4e-4809-bc2f-75a2e82b4e58", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_claude(system_message, user_prompt):\n", + " response = claude.messages.stream(\n", + " model = CLAUDE,\n", + " max_tokens = 2000,\n", + " system = system_message, \n", + " messages = [\n", + " {'role': 'user', 'content': user_prompt}\n", + " ], \n", + " temperature = 0.4\n", + " )\n", + " reply = \"\"\n", + " with response as stream:\n", + " for text in stream.text_stream:\n", + " reply += text or \"\"\n", + " yield reply" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "16702a62-fc9b-45b0-84cd-4f98523dfbd6", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_qwen(system_message, user_prompt):\n", + " tokenizer = AutoTokenizer.from_pretrained(QWEN)\n", + " model = AutoModelForCausalLM.from_pretrained(QWEN, device_map = 'gpu')\n", + " streamer = TextStreamer(tokenizer)\n", + " inputs = tokenizer.apply_chat_template(\n", + " conv = [\n", + " {'role': 'system', 'content': system_message},\n", + " {'role': 'user', 'content': user_prompt}\n", + " ],\n", + " tokenize = False,\n", + " add_generation_prompt = True\n", + " )\n", + "\n", + " stream = model.text_generation(\n", + " prompt = inputs, \n", + " stream = True,\n", + " details = True,\n", + " max_new_tokens = 2000\n", + " )\n", + " reply = \"\"\n", + " for text in stream: \n", + " reply += text.token.text or \"\"\n", + " yield reply " + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f5dbf75f-c935-4412-b641-8afce97552e8", + "metadata": {}, + "outputs": [], + "source": [ + "def define_prompts(code, operation):\n", + " if operation == 'Unit Tests':\n", + " system_message = system_message_tests\n", + " user_prompt = user_prompt_tests(code)\n", + " elif operation == 'Docstrings/Comments':\n", + " system_message = system_message_comment\n", + " user_prompt = user_prompt_comment(code)\n", + " else: \n", + " return 'Unknown operation', ''\n", + "\n", + " return system_message, user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "88a671f9-0ebc-487b-b116-b1abe4c6f934", + "metadata": {}, + "outputs": [], + "source": [ + "def create_test_comment(code, model, operation):\n", + " \n", + " system_message, user_prompt = define_prompts(code, operation)\n", + " \n", + " if model == 'GPT-o3-mini':\n", + " gen = stream_gpt(system_message, user_prompt)\n", + " elif model == 'Claude-3.5-sonnet':\n", + " gen = stream_claude(system_message, user_prompt)\n", + " elif model == 'CodeQwen':\n", + " gen = stream_qwen(system_message, user_prompt)\n", + " else: \n", + " gen = 'Unknown Model'\n", + "\n", + " result = ''\n", + " for text in gen:\n", + " result = text\n", + " return result" + ] + }, + { + "cell_type": "markdown", + "id": "1c7eea7a-fc30-4afd-b470-f4f83a288981", + "metadata": {}, + "source": [ + "## Creating easy Gradio UI " + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "3d3d014b-bfc8-4ffd-941b-1fb3c9c9a80f", + "metadata": {}, + "outputs": [], + "source": [ + "def create_ui():\n", + "\n", + " with gr.Blocks(title = 'Code Generator') as ui:\n", + " gr.Markdown('# Code Generator for Unit Testing and Docstrings')\n", + " \n", + " with gr.Row():\n", + " with gr.Column(min_width = 500):\n", + " code = gr.Textbox(label = 'Enter your Code', \n", + " placeholder = 'Code...', lines = 20\n", + " )\n", + " model = gr.Dropdown(['GPT-o3-mini', 'Claude-3.5-sonnet', 'CodeQwen'],\n", + " label = 'Choose your Model',\n", + " value = 'GPT-o3-mini'\n", + " )\n", + " operation = gr.Dropdown(['Unit Tests', 'Docstrings/Comments'],\n", + " label = 'Choose operation',\n", + " value = 'Unit Tests'\n", + " )\n", + " generate_button = gr.Button('Generate')\n", + " \n", + " with gr.Column():\n", + " output = gr.Textbox(label = 'Generated Output',\n", + " lines = 20\n", + " )\n", + " \n", + " generate_button.click(fn = create_test_comment, inputs = [code, model, operation],\n", + " outputs = output,\n", + " )\n", + " return ui" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "89be90c2-55ed-41e5-8123-e4f8ab965281", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ui = create_ui()\n", + "ui.launch(inbrowser = True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac4d6d48-4e52-477e-abf9-156eb1e4d561", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 488a214ebc1d5ab60a9d6c2e5983699df2d97350 Mon Sep 17 00:00:00 2001 From: Sameer Khadatkar Date: Fri, 11 Apr 2025 14:15:15 +0530 Subject: [PATCH 18/19] Added my contributions to community-contributions --- .../Airlines_Chatbot_with_Audio_Input.ipynb | 417 ++++++++++++++++++ 1 file changed, 417 insertions(+) create mode 100644 week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb diff --git a/week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb b/week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb new file mode 100644 index 0000000..e9c80e5 --- /dev/null +++ b/week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb @@ -0,0 +1,417 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", + "metadata": {}, + "source": [ + "# Additional End of week Exercise - week 2\n", + "\n", + "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", + "\n", + "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", + "\n", + "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", + "\n", + "I will publish a full solution here soon - unless someone beats me to it...\n", + "\n", + "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a07e7793-b8f5-44f4-aded-5562f633271a", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image\n", + "from IPython.display import Audio, display" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "9e2315a3-f80c-4d3f-8073-f5b61d709564", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n" + ] + } + ], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "MODEL = \"gpt-4o-mini\"\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "40da9de1-b350-49de-8acd-052f40ce5611", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n", + "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n", + "system_message += \"Always be accurate. If you don't know the answer, say so.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5537635c-a60d-4983-8018-375c6a912e19", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's start by making a useful function\n", + "\n", + "ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n", + "\n", + "def get_ticket_price(destination_city):\n", + " print(f\"Tool get_ticket_price called for {destination_city}\")\n", + " city = destination_city.lower()\n", + " return ticket_prices.get(city, \"Unknown\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "c7132dd0-8788-4885-a415-d59664f68fd8", + "metadata": {}, + "outputs": [], + "source": [ + "# There's a particular dictionary structure that's required to describe our function:\n", + "\n", + "price_function = {\n", + " \"name\": \"get_ticket_price\",\n", + " \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"destination_city\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The city that the customer wants to travel to\",\n", + " },\n", + " },\n", + " \"required\": [\"destination_city\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7703ca0c-5da4-4641-bcb1-7727d1b2f2bf", + "metadata": {}, + "outputs": [], + "source": [ + "# And this is included in a list of tools:\n", + "\n", + "tools = [{\"type\": \"function\", \"function\": price_function}]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "29ce724b-d998-4c3f-bc40-6b8576c0fd34", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "\n", + "def handle_tool_call(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " city = arguments.get('destination_city')\n", + " price = get_ticket_price(city)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", + " \"tool_call_id\": tool_call.id\n", + " }\n", + " return response, city" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "931d0565-b01d-4aa8-bd18-72bafff8fb3b", + "metadata": {}, + "outputs": [], + "source": [ + "def artist(city):\n", + " image_response = openai.images.generate(\n", + " model=\"dall-e-3\",\n", + " prompt=f\"An image representing a vacation in {city}, showing tourist spots and everything unique about {city}, in a vibrant pop-art style\",\n", + " size=\"1024x1024\",\n", + " n=1,\n", + " response_format=\"b64_json\",\n", + " )\n", + " image_base64 = image_response.data[0].b64_json\n", + " image_data = base64.b64decode(image_base64)\n", + " return Image.open(BytesIO(image_data))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "fa165f7f-9796-4513-b923-2fa0b0b9ddd8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import base64\n", + "from io import BytesIO\n", + "from PIL import Image\n", + "from IPython.display import Audio, display\n", + "\n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\",\n", + " input=message)\n", + "\n", + " audio_stream = BytesIO(response.content)\n", + " output_filename = \"output_audio.mp3\"\n", + " with open(output_filename, \"wb\") as f:\n", + " f.write(audio_stream.read())\n", + "\n", + " # Play the generated audio\n", + " display(Audio(output_filename, autoplay=True))\n", + "\n", + "talker(\"Well, hi there\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "b512d4ff-0f7b-4148-b161-4ee0ebf14776", + "metadata": {}, + "outputs": [], + "source": [ + "def transcribe_audio(audio_file):\n", + " with open(audio_file, \"rb\") as f:\n", + " transcript = openai.audio.transcriptions.create(\n", + " model=\"whisper-1\",\n", + " file=f\n", + " )\n", + " return transcript.text" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "id": "c3852570-fb26-4507-a001-f50fd94b7655", + "metadata": {}, + "outputs": [], + "source": [ + "# Translate between languages using GPT\n", + "def translate(text, source_lang, target_lang):\n", + " translation_prompt = (\n", + " f\"Translate the following text from {source_lang} to {target_lang}:\\n\\n{text}\"\n", + " )\n", + " response = openai.chat.completions.create(\n", + " model=\"gpt-3.5-turbo\",\n", + " messages=[{\"role\": \"user\", \"content\": translation_prompt}]\n", + " )\n", + " return response.choices[0].message.content.strip()" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "id": "3d75abc2-870e-48af-a8fe-8dd463418b3d", + "metadata": {}, + "outputs": [], + "source": [ + "# Chatbot logic: handle both text and audio input\n", + "def chatbot_dual(history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + " image = None\n", + " \n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " response, city = handle_tool_call(message)\n", + " messages.append(message)\n", + " messages.append(response)\n", + " image = None\n", + " # image = artist(city)\n", + " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", + " \n", + " reply = response.choices[0].message.content\n", + " history += [{\"role\":\"assistant\", \"content\":reply}]\n", + "\n", + " # Comment out or delete the next line if you'd rather skip Audio for now..\n", + " # audio_response = talker(reply)\n", + " talker(reply)\n", + " return history, image# Chatbot logic here — replace with real logic" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "id": "512fec09-c2f7-4847-817b-bc20f8b30319", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7880\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tool get_ticket_price called for London\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# More involved Gradio code as we're not using the preset Chat interface!\n", + "# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", + "\n", + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " chatbot = gr.Chatbot(height=500, type=\"messages\")\n", + " image_output = gr.Image(height=500)\n", + "\n", + " with gr.Row():\n", + " text_input = gr.Textbox(label=\"Chat with our AI Assistant:\")\n", + " audio_input = gr.Audio(sources=\"microphone\", type=\"filepath\", label=\"Or speak to the assistant\")\n", + "\n", + " with gr.Row():\n", + " # voice_output = gr.Audio(label=\"Bot Voice Reply\", autoplay=True)\n", + " clear = gr.Button(\"Clear\")\n", + "\n", + " def do_entry(message, audio, history):\n", + " if message:\n", + " history += [{\"role\":\"user\", \"content\":message}]\n", + " if audio:\n", + " history += [{\"role\":\"user\", \"content\":transcribe_audio(audio)}]\n", + " return \"\", None, history\n", + "\n", + " text_input.submit(do_entry, inputs=[text_input, audio_input, chatbot], outputs=[text_input, audio_input, chatbot]).then(chatbot_dual, inputs=chatbot, outputs=[chatbot, image_output]\n", + " )\n", + "\n", + " audio_input.change(do_entry, inputs=[text_input, audio_input, chatbot], outputs=[text_input, audio_input, chatbot]).then(chatbot_dual, inputs=chatbot, outputs=[chatbot, image_output]\n", + " )\n", + "\n", + " clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", + "\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3e1294e2-caf0-4f0f-b09e-b0d52c8ca6ec", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From b31cad470b35b4306bee46769ea1d03b0a30b0c3 Mon Sep 17 00:00:00 2001 From: Sameer Khadatkar Date: Fri, 11 Apr 2025 14:36:23 +0530 Subject: [PATCH 19/19] Added my contributions to community-contributions --- .../Airlines_Chatbot_with_Audio_Input.ipynb | 127 +++--------------- 1 file changed, 18 insertions(+), 109 deletions(-) diff --git a/week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb b/week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb index e9c80e5..4b18a89 100644 --- a/week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb +++ b/week2/community-contributions/Airlines_Chatbot_with_Audio_Input.ipynb @@ -5,22 +5,12 @@ "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", "metadata": {}, "source": [ - "# Additional End of week Exercise - week 2\n", - "\n", - "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", - "\n", - "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", - "\n", - "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", - "\n", - "I will publish a full solution here soon - unless someone beats me to it...\n", - "\n", - "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." + "# Project to take Audio Input to the Airlines ChatBot" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "a07e7793-b8f5-44f4-aded-5562f633271a", "metadata": {}, "outputs": [], @@ -40,18 +30,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "9e2315a3-f80c-4d3f-8073-f5b61d709564", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key exists and begins sk-proj-\n" - ] - } - ], + "outputs": [], "source": [ "# Initialization\n", "\n", @@ -69,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "40da9de1-b350-49de-8acd-052f40ce5611", "metadata": {}, "outputs": [], @@ -81,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "5537635c-a60d-4983-8018-375c6a912e19", "metadata": {}, "outputs": [], @@ -98,7 +80,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "c7132dd0-8788-4885-a415-d59664f68fd8", "metadata": {}, "outputs": [], @@ -124,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "7703ca0c-5da4-4641-bcb1-7727d1b2f2bf", "metadata": {}, "outputs": [], @@ -136,7 +118,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "29ce724b-d998-4c3f-bc40-6b8576c0fd34", "metadata": {}, "outputs": [], @@ -158,7 +140,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "931d0565-b01d-4aa8-bd18-72bafff8fb3b", "metadata": {}, "outputs": [], @@ -178,28 +160,10 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "fa165f7f-9796-4513-b923-2fa0b0b9ddd8", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import base64\n", "from io import BytesIO\n", @@ -225,7 +189,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "b512d4ff-0f7b-4148-b161-4ee0ebf14776", "metadata": {}, "outputs": [], @@ -241,7 +205,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": null, "id": "c3852570-fb26-4507-a001-f50fd94b7655", "metadata": {}, "outputs": [], @@ -260,7 +224,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": null, "id": "3d75abc2-870e-48af-a8fe-8dd463418b3d", "metadata": {}, "outputs": [], @@ -276,8 +240,7 @@ " response, city = handle_tool_call(message)\n", " messages.append(message)\n", " messages.append(response)\n", - " image = None\n", - " # image = artist(city)\n", + " image = artist(city)\n", " response = openai.chat.completions.create(model=MODEL, messages=messages)\n", " \n", " reply = response.choices[0].message.content\n", @@ -291,64 +254,10 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": null, "id": "512fec09-c2f7-4847-817b-bc20f8b30319", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7880\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 50, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tool get_ticket_price called for London\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# More involved Gradio code as we're not using the preset Chat interface!\n", "# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n",