{ "cells": [ { "cell_type": "markdown", "id": "8b0e11f2-9ea4-48c2-b8d2-d0a4ba967827", "metadata": {}, "source": [ "# Gradio Day!\n", "\n", "Today we will build User Interfaces using the outrageously simple Gradio framework.\n", "\n", "Prepare for joy!\n", "\n", "Please note: your Gradio screens may appear in 'dark mode' or 'light mode' depending on your computer settings." ] }, { "cell_type": "code", "execution_count": 2, "id": "c0242da2-967b-487e-aec3-bba9831c7300", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: google-generativeai in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (0.8.4)\n", "Requirement already satisfied: google-ai-generativelanguage==0.6.15 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-generativeai) (0.6.15)\n", "Requirement already satisfied: google-api-core in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-generativeai) (2.24.2)\n", "Requirement already satisfied: google-api-python-client in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-generativeai) (2.165.0)\n", "Requirement already satisfied: google-auth>=2.15.0 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-generativeai) (2.38.0)\n", "Requirement already satisfied: protobuf in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-generativeai) (5.29.4)\n", "Requirement already satisfied: pydantic in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-generativeai) (2.9.2)\n", "Requirement already satisfied: tqdm in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-generativeai) (4.67.1)\n", "Requirement already satisfied: typing-extensions in c:\\users\\kzk1kh\\appdata\\roaming\\python\\python312\\site-packages (from google-generativeai) (4.12.2)\n", "Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-ai-generativelanguage==0.6.15->google-generativeai) (1.26.1)\n", "Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai) (5.5.2)\n", "Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai) (0.4.1)\n", "Requirement already satisfied: rsa<5,>=3.1.4 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai) (4.9)\n", "Requirement already satisfied: googleapis-common-protos<2.0.0,>=1.56.2 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-api-core->google-generativeai) (1.69.2)\n", "Requirement already satisfied: requests<3.0.0,>=2.18.0 in c:\\users\\kzk1kh\\appdata\\roaming\\python\\python312\\site-packages (from google-api-core->google-generativeai) (2.32.3)\n", "Requirement already satisfied: httplib2<1.0.0,>=0.19.0 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-api-python-client->google-generativeai) (0.22.0)\n", "Requirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-api-python-client->google-generativeai) (0.2.0)\n", "Requirement already satisfied: uritemplate<5,>=3.0.1 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-api-python-client->google-generativeai) (4.1.1)\n", "Requirement already satisfied: annotated-types>=0.6.0 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from pydantic->google-generativeai) (0.7.0)\n", "Requirement already satisfied: pydantic-core==2.23.4 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from pydantic->google-generativeai) (2.23.4)\n", "Requirement already satisfied: colorama in c:\\users\\kzk1kh\\appdata\\roaming\\python\\python312\\site-packages (from tqdm->google-generativeai) (0.4.6)\n", "Requirement already satisfied: grpcio<2.0dev,>=1.33.2 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-api-core->google-generativeai) (1.71.0)\n", "Requirement already satisfied: grpcio-status<2.0.dev0,>=1.33.2 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from google-api-core->google-generativeai) (1.71.0)\n", "Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from httplib2<1.0.0,>=0.19.0->google-api-python-client->google-generativeai) (3.2.1)\n", "Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in c:\\adm017\\llm_engineering\\.venv\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth>=2.15.0->google-generativeai) (0.6.1)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\users\\kzk1kh\\appdata\\roaming\\python\\python312\\site-packages (from requests<3.0.0,>=2.18.0->google-api-core->google-generativeai) (3.4.1)\n", "Requirement already satisfied: idna<4,>=2.5 in c:\\users\\kzk1kh\\appdata\\roaming\\python\\python312\\site-packages (from requests<3.0.0,>=2.18.0->google-api-core->google-generativeai) (3.10)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\kzk1kh\\appdata\\roaming\\python\\python312\\site-packages (from requests<3.0.0,>=2.18.0->google-api-core->google-generativeai) (2.3.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\kzk1kh\\appdata\\roaming\\python\\python312\\site-packages (from requests<3.0.0,>=2.18.0->google-api-core->google-generativeai) (2025.1.31)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "[notice] A new release of pip is available: 23.2.1 -> 25.0.1\n", "[notice] To update, run: python.exe -m pip install --upgrade pip\n" ] } ], "source": [ "!pip install google-generativeai" ] }, { "cell_type": "code", "execution_count": 1, "id": "3e973b42-9f29-4e4a-87ac-d5da55809e19", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "C:\\adm017\\llm_engineering\\.venv\\Scripts\\python.exe\n" ] } ], "source": [ "import sys\n", "print(sys.executable)" ] }, { "cell_type": "code", "execution_count": 2, "id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330", "metadata": {}, "outputs": [], "source": [ "# imports\n", "\n", "import os\n", "import requests\n", "from bs4 import BeautifulSoup\n", "from typing import List\n", "from dotenv import load_dotenv\n", "from openai import OpenAI\n", "import google.generativeai\n", "import anthropic" ] }, { "cell_type": "code", "execution_count": 4, "id": "d1715421-cead-400b-99af-986388a97aff", "metadata": {}, "outputs": [], "source": [ "import gradio as gr # oh yeah!" ] }, { "cell_type": "code", "execution_count": 5, "id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "OpenAI API Key exists and begins sk-proj-\n", "Anthropic API Key exists and begins sk-ant-\n", "Google API Key exists and begins AIzaSyBX\n" ] } ], "source": [ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", "\n", "load_dotenv(override=True)\n", "openai_api_key = os.getenv('OPENAI_API_KEY')\n", "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", "google_api_key = os.getenv('GOOGLE_API_KEY')\n", "\n", "if openai_api_key:\n", " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", "else:\n", " print(\"OpenAI API Key not set\")\n", " \n", "if anthropic_api_key:\n", " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", "else:\n", " print(\"Anthropic API Key not set\")\n", "\n", "if google_api_key:\n", " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", "else:\n", " print(\"Google API Key not set\")" ] }, { "cell_type": "code", "execution_count": 6, "id": "22586021-1795-4929-8079-63f5bb4edd4c", "metadata": {}, "outputs": [], "source": [ "# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n", "\n", "openai = OpenAI()\n", "\n", "claude = anthropic.Anthropic()\n", "\n", "google.generativeai.configure()" ] }, { "cell_type": "code", "execution_count": 7, "id": "b16e6021-6dc4-4397-985a-6679d6c8ffd5", "metadata": {}, "outputs": [], "source": [ "# A generic system message - no more snarky adversarial AIs!\n", "\n", "system_message = \"You are a helpful assistant\"" ] }, { "cell_type": "code", "execution_count": 8, "id": "02ef9b69-ef31-427d-86d0-b8c799e1c1b1", "metadata": {}, "outputs": [], "source": [ "# Let's wrap a call to GPT-4o-mini in a simple function\n", "\n", "def message_gpt(prompt):\n", " messages = [\n", " {\"role\": \"system\", \"content\": system_message},\n", " {\"role\": \"user\", \"content\": prompt}\n", " ]\n", " completion = openai.chat.completions.create(\n", " model='gpt-4o-mini',\n", " messages=messages,\n", " )\n", " return completion.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": 9, "id": "aef7d314-2b13-436b-b02d-8de3b72b193f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "\"Today's date is October 3, 2023.\"" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# This can reveal the \"training cut off\", or the most recent date in the training data\n", "\n", "message_gpt(\"What is today's date?\")" ] }, { "cell_type": "markdown", "id": "f94013d1-4f27-4329-97e8-8c58db93636a", "metadata": {}, "source": [ "## User Interface time!" ] }, { "cell_type": "code", "execution_count": 10, "id": "bc664b7a-c01d-4fea-a1de-ae22cdd5141a", "metadata": {}, "outputs": [], "source": [ "# here's a simple function\n", "\n", "def shout(text):\n", " print(f\"Shout has been called with input {text}\")\n", " return text.upper()" ] }, { "cell_type": "code", "execution_count": 11, "id": "083ea451-d3a0-4d13-b599-93ed49b975e4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Shout has been called with input hello\n" ] }, { "data": { "text/plain": [ "'HELLO'" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "shout(\"hello\")" ] }, { "cell_type": "code", "execution_count": 12, "id": "08f1f15a-122e-4502-b112-6ee2817dda32", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7860\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "Shout has been called with input Test Gradio!\n", "Created dataset file at: .gradio\\flagged\\dataset1.csv\n" ] } ], "source": [ "# The simplicty of gradio. This might appear in \"light mode\" - I'll show you how to make this in dark mode later.\n", "\n", "gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\").launch()" ] }, { "cell_type": "code", "execution_count": 13, "id": "c9a359a4-685c-4c99-891c-bb4d1cb7f426", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7861\n", "\n", "Could not create share link. Missing file: C:\\adm017\\llm_engineering\\.venv\\Lib\\site-packages\\gradio\\frpc_windows_amd64_v0.3. \n", "\n", "Please check your internet connection. This can happen if your antivirus software blocks the download of this file. You can install manually by following these steps: \n", "\n", "1. Download this file: https://cdn-media.huggingface.co/frpc-gradio-0.3/frpc_windows_amd64.exe\n", "2. Rename the downloaded file to: frpc_windows_amd64_v0.3\n", "3. Move the file to this location: C:\\adm017\\llm_engineering\\.venv\\Lib\\site-packages\\gradio\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "Shout has been called with input hi\n" ] } ], "source": [ "# Adding share=True means that it can be accessed publically\n", "# A more permanent hosting is available using a platform called Spaces from HuggingFace, which we will touch on next week\n", "# NOTE: Some Anti-virus software and Corporate Firewalls might not like you using share=True. If you're at work on on a work network, I suggest skip this test.\n", "\n", "gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\").launch(share=True)" ] }, { "cell_type": "code", "execution_count": 14, "id": "cd87533a-ff3a-4188-8998-5bedd5ba2da3", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7862\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" }, { "name": "stdout", "output_type": "stream", "text": [ "Shout has been called with input war\n" ] } ], "source": [ "# Adding inbrowser=True opens up a new browser window automatically\n", "\n", "gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\").launch(inbrowser=True)" ] }, { "cell_type": "markdown", "id": "b42ec007-0314-48bf-84a4-a65943649215", "metadata": {}, "source": [ "## Forcing dark mode\n", "\n", "Gradio appears in light mode or dark mode depending on the settings of the browser and computer. There is a way to force gradio to appear in dark mode, but Gradio recommends against this as it should be a user preference (particularly for accessibility reasons). But if you wish to force dark mode for your screens, below is how to do it." ] }, { "cell_type": "code", "execution_count": 15, "id": "e8129afa-532b-4b15-b93c-aa9cca23a546", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7863\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Define this variable and then pass js=force_dark_mode when creating the Interface\n", "\n", "force_dark_mode = \"\"\"\n", "function refresh() {\n", " const url = new URL(window.location);\n", " if (url.searchParams.get('__theme') !== 'dark') {\n", " url.searchParams.set('__theme', 'dark');\n", " window.location.href = url.href;\n", " }\n", "}\n", "\"\"\"\n", "gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\", js=force_dark_mode).launch()" ] }, { "cell_type": "code", "execution_count": 16, "id": "3cc67b26-dd5f-406d-88f6-2306ee2950c0", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7864\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Inputs and Outputs\n", "\n", "view = gr.Interface(\n", " fn=shout,\n", " inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n", " outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n", " flagging_mode=\"never\"\n", ")\n", "view.launch()" ] }, { "cell_type": "code", "execution_count": 17, "id": "f235288e-63a2-4341-935b-1441f9be969b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7865\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# And now - changing the function from \"shout\" to \"message_gpt\"\n", "\n", "view = gr.Interface(\n", " fn=message_gpt,\n", " inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n", " outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n", " flagging_mode=\"never\"\n", ")\n", "view.launch()" ] }, { "cell_type": "code", "execution_count": 18, "id": "af9a3262-e626-4e4b-80b0-aca152405e63", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7866\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Let's use Markdown\n", "# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n", "# I'm taking advantage of system_message being a global variable, used back in the message_gpt function (go take a look)\n", "# Not a great software engineering practice, but quite sommon during Jupyter Lab R&D!\n", "\n", "system_message = \"You are a helpful assistant that responds in markdown\"\n", "\n", "view = gr.Interface(\n", " fn=message_gpt,\n", " inputs=[gr.Textbox(label=\"Your message:\")],\n", " outputs=[gr.Markdown(label=\"Response:\")],\n", " flagging_mode=\"never\"\n", ")\n", "view.launch()" ] }, { "cell_type": "code", "execution_count": 19, "id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f", "metadata": {}, "outputs": [], "source": [ "# Let's create a call that streams back results\n", "# If you'd like a refresher on Generators (the \"yield\" keyword),\n", "# Please take a look at the Intermediate Python notebook in week1 folder.\n", "\n", "def stream_gpt(prompt):\n", " messages = [\n", " {\"role\": \"system\", \"content\": system_message},\n", " {\"role\": \"user\", \"content\": prompt}\n", " ]\n", " stream = openai.chat.completions.create(\n", " model='gpt-4o-mini',\n", " messages=messages,\n", " stream=True\n", " )\n", " result = \"\"\n", " for chunk in stream:\n", " result += chunk.choices[0].delta.content or \"\"\n", " yield result" ] }, { "cell_type": "code", "execution_count": 20, "id": "0bb1f789-ff11-4cba-ac67-11b815e29d09", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7867\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "view = gr.Interface(\n", " fn=stream_gpt,\n", " inputs=[gr.Textbox(label=\"Your message:\")],\n", " outputs=[gr.Markdown(label=\"Response:\")],\n", " flagging_mode=\"never\"\n", ")\n", "view.launch()" ] }, { "cell_type": "code", "execution_count": 21, "id": "bbc8e930-ba2a-4194-8f7c-044659150626", "metadata": {}, "outputs": [], "source": [ "def stream_claude(prompt):\n", " result = claude.messages.stream(\n", " model=\"claude-3-haiku-20240307\",\n", " max_tokens=1000,\n", " temperature=0.7,\n", " system=system_message,\n", " messages=[\n", " {\"role\": \"user\", \"content\": prompt},\n", " ],\n", " )\n", " response = \"\"\n", " with result as stream:\n", " for text in stream.text_stream:\n", " response += text or \"\"\n", " yield response" ] }, { "cell_type": "code", "execution_count": 22, "id": "a0066ffd-196e-4eaf-ad1e-d492958b62af", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7868\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "view = gr.Interface(\n", " fn=stream_claude,\n", " inputs=[gr.Textbox(label=\"Your message:\")],\n", " outputs=[gr.Markdown(label=\"Response:\")],\n", " flagging_mode=\"never\"\n", ")\n", "view.launch()" ] }, { "cell_type": "markdown", "id": "bc5a70b9-2afe-4a7c-9bed-2429229e021b", "metadata": {}, "source": [ "## Minor improvement\n", "\n", "I've made a small improvement to this code.\n", "\n", "Previously, it had these lines:\n", "\n", "```\n", "for chunk in result:\n", " yield chunk\n", "```\n", "\n", "There's actually a more elegant way to achieve this (which Python people might call more 'Pythonic'):\n", "\n", "`yield from result`\n", "\n", "I cover this in more detail in the Intermediate Python notebook in the week1 folder - take a look if you'd like more." ] }, { "cell_type": "code", "execution_count": 23, "id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5", "metadata": {}, "outputs": [], "source": [ "def stream_model(prompt, model):\n", " if model==\"GPT\":\n", " result = stream_gpt(prompt)\n", " elif model==\"Claude\":\n", " result = stream_claude(prompt)\n", " else:\n", " raise ValueError(\"Unknown model\")\n", " yield from result" ] }, { "cell_type": "code", "execution_count": 24, "id": "8d8ce810-997c-4b6a-bc4f-1fc847ac8855", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7869\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 24, "metadata": {}, "output_type": "execute_result" } ], "source": [ "view = gr.Interface(\n", " fn=stream_model,\n", " inputs=[gr.Textbox(label=\"Your message:\"), gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")],\n", " outputs=[gr.Markdown(label=\"Response:\")],\n", " flagging_mode=\"never\"\n", ")\n", "view.launch()" ] }, { "cell_type": "markdown", "id": "d933865b-654c-4b92-aa45-cf389f1eda3d", "metadata": {}, "source": [ "# Building a company brochure generator\n", "\n", "Now you know how - it's simple!" ] }, { "cell_type": "markdown", "id": "92d7c49b-2e0e-45b3-92ce-93ca9f962ef4", "metadata": {}, "source": [ "\n", " \n", " \n", " \n", " \n", "
\n", " \n", " \n", "

Before you read the next few cells

\n", " \n", " Try to do this yourself - go back to the company brochure in week1, day5 and add a Gradio UI to the end. Then come and look at the solution.\n", " \n", "
" ] }, { "cell_type": "code", "execution_count": 25, "id": "1626eb2e-eee8-4183-bda5-1591b58ae3cf", "metadata": {}, "outputs": [], "source": [ "# A class to represent a Webpage\n", "\n", "class Website:\n", " url: str\n", " title: str\n", " text: str\n", "\n", " def __init__(self, url):\n", " self.url = url\n", " response = requests.get(url)\n", " self.body = response.content\n", " soup = BeautifulSoup(self.body, 'html.parser')\n", " self.title = soup.title.string if soup.title else \"No title found\"\n", " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", " irrelevant.decompose()\n", " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", "\n", " def get_contents(self):\n", " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" ] }, { "cell_type": "code", "execution_count": 26, "id": "c701ec17-ecd5-4000-9f68-34634c8ed49d", "metadata": {}, "outputs": [], "source": [ "# With massive thanks to Bill G. who noticed that a prior version of this had a bug! Now fixed.\n", "\n", "system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n", "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"" ] }, { "cell_type": "code", "execution_count": 27, "id": "5def90e0-4343-4f58-9d4a-0e36e445efa4", "metadata": {}, "outputs": [], "source": [ "def stream_brochure(company_name, url, model):\n", " prompt = f\"Please generate a company brochure for {company_name}. Here is their landing page:\\n\"\n", " prompt += Website(url).get_contents()\n", " if model==\"GPT\":\n", " result = stream_gpt(prompt)\n", " elif model==\"Claude\":\n", " result = stream_claude(prompt)\n", " else:\n", " raise ValueError(\"Unknown model\")\n", " yield from result" ] }, { "cell_type": "code", "execution_count": 28, "id": "66399365-5d67-4984-9d47-93ed26c0bd3d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "* Running on local URL: http://127.0.0.1:7870\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "view = gr.Interface(\n", " fn=stream_brochure,\n", " inputs=[\n", " gr.Textbox(label=\"Company name:\"),\n", " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", " gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\")],\n", " outputs=[gr.Markdown(label=\"Brochure:\")],\n", " flagging_mode=\"never\"\n", ")\n", "view.launch()" ] }, { "cell_type": "code", "execution_count": null, "id": "ede97ca3-a0f8-4f6e-be17-d1de7fef9cc0", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python (my-venv-name)", "language": "python", "name": "my-venv-name" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.2" } }, "nbformat": 4, "nbformat_minor": 5 }