diff --git a/week2/community-contributions/Gemini-api.ipynb b/week2/community-contributions/Gemini-api.ipynb new file mode 100644 index 0000000..afff70d --- /dev/null +++ b/week2/community-contributions/Gemini-api.ipynb @@ -0,0 +1,130 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 13, + "id": "147ce61d-b10e-478e-8300-2fb3101f617c", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "2dab29c1-3a8d-45bc-9f45-407419449ba9", + "metadata": {}, + "outputs": [], + "source": [ + "import google.generativeai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5fb5b749-d84b-4f8c-bfb9-2f5c4e8a2daa", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv()\n", + "# openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "# anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "d34ee171-2647-47cf-9336-2d016480656f", + "metadata": {}, + "outputs": [], + "source": [ + "google.generativeai.configure()" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "856212a1-d07a-400b-9cef-a198e22f26ac", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that is great at telling jokes\"\n", + "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "47289056-cc2b-4d2d-9e18-ecd65c0f3232", + "metadata": {}, + "outputs": [], + "source": [ + "prompts = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d331aaf-162b-499e-af7e-5e097e84f1bd", + "metadata": {}, + "outputs": [], + "source": [ + "# The API for Gemini has a slightly different structure.\n", + "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", + "# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", + "\n", + "gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=system_message\n", + ")\n", + "response = gemini.generate_content(user_prompt)\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b727ee91-92b8-4d62-9a03-1b85a76b905c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/day2-gemini.ipynb b/week2/community-contributions/day2-gemini.ipynb new file mode 100644 index 0000000..4474609 --- /dev/null +++ b/week2/community-contributions/day2-gemini.ipynb @@ -0,0 +1,393 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "1b89f103-fc49-487e-930e-14abff8bfab1", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import google.generativeai\n", + "import anthropic" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1a40e64b-14c6-4589-a671-6817f9cb09f0", + "metadata": {}, + "outputs": [], + "source": [ + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "c0990b15-313d-4cf8-bc5b-fc14d263ba27", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv()\n", + "os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "66a38e1f-db7e-4697-aa9c-a303f9828531", + "metadata": {}, + "outputs": [], + "source": [ + "google.generativeai.configure()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "beb9606e-9be9-4f2e-adfe-4e41fb99566e", + "metadata": {}, + "outputs": [], + "source": [ + "# A generic system message - no more snarky adversarial AIs!\n", + "\n", + "system_message = \"You are a helpful assistant\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "19ab23bc-59cf-48a3-8651-f7b1c52874db", + "metadata": {}, + "outputs": [], + "source": [ + "def message_gemini(prompt):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": prompt}\n", + " ]\n", + " gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=system_message\n", + ")\n", + " response = gemini.generate_content(prompt)\n", + " return response.text\n", + "\n", + "\n", + "# gemini = google.generativeai.GenerativeModel(\n", + "# model_name='gemini-1.5-flash',\n", + "# system_instruction=system_message\n", + "# )\n", + "# response = gemini.generate_content(user_prompt)\n", + "# print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8fe3c66c-d25d-4627-a401-d84c7d6613e7", + "metadata": {}, + "outputs": [], + "source": [ + "message_gemini(\"What is today's date?\")\n", + "# message_gemini(\"tell me a funny machine learning joke\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b27027ed-4bff-493c-a41e-8318003e0387", + "metadata": {}, + "outputs": [], + "source": [ + "import google.generativeai as genai\n", + "for model in genai.list_models():\n", + " print(model.name)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "2f82d61b-a7cd-4bee-994d-2e83d0a01bfc", + "metadata": {}, + "outputs": [], + "source": [ + "# here's a simple function\n", + "\n", + "def shout(text):\n", + " print(f\"Shout has been called with input {text}\")\n", + " return text.upper()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5941fe3f-aab9-47ba-b29f-d99aa3b40aed", + "metadata": {}, + "outputs": [], + "source": [ + "shout(\"hello\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d6470847-1cce-4bf0-8364-199504a5335f", + "metadata": {}, + "outputs": [], + "source": [ + "# Define this variable and then pass js=force_dark_mode when creating the Interface\n", + "\n", + "force_dark_mode = \"\"\"\n", + "function refresh() {\n", + " const url = new URL(window.location);\n", + " if (url.searchParams.get('__theme') !== 'dark') {\n", + " url.searchParams.set('__theme', 'dark');\n", + " window.location.href = url.href;\n", + " }\n", + "}\n", + "\"\"\"\n", + "gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\", js=force_dark_mode).launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69715604-cc64-4563-967f-b5720462ac69", + "metadata": {}, + "outputs": [], + "source": [ + "gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", js=force_dark_mode).launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dede1d8c-fb7a-456a-923b-e221eaa30bd9", + "metadata": {}, + "outputs": [], + "source": [ + "gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\").launch(share=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55ae11b9-e7af-449f-b737-48dd7dc1a5b2", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=shout,\n", + " inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n", + " outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cba667cf-d270-426e-b940-a01083352ecb", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=message_gemini,\n", + " inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n", + " outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b8bb7885-740f-41f0-95e3-dabe864cea14", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's use Markdown\n", + "# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n", + "# I'm taking advantage of system_message being a global variable, used back in the message_gpt function (go take a look)\n", + "# Not a great software engineering practice, but quite sommon during Jupyter Lab R&D!\n", + "\n", + "system_message = \"You are a helpful assistant that responds in markdown\"\n", + "\n", + "view = gr.Interface(\n", + " fn=message_gemini,\n", + " inputs=[gr.Textbox(label=\"Your message:\")],\n", + " outputs=[gr.Markdown(label=\"Response:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "43d17b00-f4bc-45ad-a679-3112a170f5fb", + "metadata": {}, + "outputs": [], + "source": [ + "import google.generativeai as genai\n", + "\n", + "def stream_gemini(prompt):\n", + " gemini = genai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " safety_settings=None,\n", + " system_instruction=system_message\n", + " )\n", + "\n", + " response = gemini.generate_content(prompt, safety_settings=[\n", + " {\"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\", \"threshold\": \"BLOCK_NONE\"},\n", + " {\"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\", \"threshold\": \"BLOCK_NONE\"},\n", + " {\"category\": \"HARM_CATEGORY_HATE_SPEECH\", \"threshold\": \"BLOCK_NONE\"},\n", + " {\"category\": \"HARM_CATEGORY_HARASSMENT\", \"threshold\": \"BLOCK_NONE\"}], stream=True)\n", + " \n", + " result = \"\"\n", + " for chunk in response:\n", + " result += chunk.text\n", + " yield result\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "840f3d11-e66b-4b6b-9b98-70e0f02be9e6", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=stream_gemini,\n", + " inputs=[gr.Textbox(label=\"Your message:\")],\n", + " outputs=[gr.Markdown(label=\"Response:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + }, + { + "cell_type": "markdown", + "id": "ea8a0081-8d2e-4960-b479-7c1ef346f524", + "metadata": {}, + "source": [ + "# Building a company brochure generator\n", + "\n", + "Now you know how - it's simple!" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "2d43360a-515e-4008-9eef-7a3c4e47cfba", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "class Website:\n", + " url: str\n", + " title: str\n", + " text: str\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "08a07e55-b05d-4360-8e05-61dd39cc019b", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_brochure(company_name, url, model, response_tone):\n", + " prompt = f\"Please generate a {response_tone} company brochure for {company_name}. Here is their landing page:\\n\"\n", + " prompt += Website(url).get_contents()\n", + " if model==\"GPT\":\n", + " result = stream_gpt(prompt)\n", + " elif model==\"Claude\":\n", + " result = stream_claude(prompt)\n", + " elif model==\"Gemini\":\n", + " result = stream_gemini(prompt)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " yield from result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d9554211-c832-4558-90c8-fceab95fd23c", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=stream_brochure,\n", + " inputs=[\n", + " gr.Textbox(label=\"Company name:\"),\n", + " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", + " gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\"),\n", + " gr.Dropdown([\"Informational\", \"Promotional\", \"Humorous\"], label=\"Select tone\")],\n", + " outputs=[gr.Markdown(label=\"Brochure:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d4e6efd-66e8-4388-bfc3-782bde4babfb", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/day3-gemini.ipynb b/week2/community-contributions/day3-gemini.ipynb new file mode 100644 index 0000000..714f93a --- /dev/null +++ b/week2/community-contributions/day3-gemini.ipynb @@ -0,0 +1,310 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "75e2ef28-594f-4c18-9d22-c6b8cd40ead2", + "metadata": {}, + "source": [ + "# Day 3 - Conversational AI - aka Chatbot!" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import google.generativeai\n", + "# import anthropic\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "231605aa-fccb-447e-89cf-8b187444536a", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv()\n", + "os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb", + "metadata": {}, + "outputs": [], + "source": [ + "google.generativeai.configure()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e16839b5-c03b-4d9d-add6-87a0f6f37575", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba2123e7-77ed-43b4-8c37-03658fb42b78", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that is great at telling jokes\"\n", + "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"\n", + "\n", + "prompts = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + "\n", + "# The API for Gemini has a slightly different structure.\n", + "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", + "# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", + "\n", + "gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=system_message\n", + ")\n", + "response = gemini.generate_content(user_prompt)\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "7b933ff3", + "metadata": {}, + "outputs": [], + "source": [ + "import google.generativeai as genai\n", + "\n", + "model = genai.GenerativeModel('gemini-1.5-flash')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91578b16", + "metadata": {}, + "outputs": [], + "source": [ + "chat = model.start_chat(history=[])\n", + "response = chat.send_message('Hello! My name is Shardul.')\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7c4bc38f", + "metadata": {}, + "outputs": [], + "source": [ + "response = chat.send_message('Can you tell something interesting about star wars?')\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "337bee91", + "metadata": {}, + "outputs": [], + "source": [ + "response = chat.send_message('Do you remember what my name is?')\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcaf4d95", + "metadata": {}, + "outputs": [], + "source": [ + "chat.history" + ] + }, + { + "cell_type": "markdown", + "id": "98e97227-f162-4d1a-a0b2-345ff248cbe7", + "metadata": {}, + "source": [ + "# Please read this! A change from the video:\n", + "\n", + "In the video, I explain how we now need to write a function called:\n", + "\n", + "`chat(message, history)`\n", + "\n", + "Which expects to receive `history` in a particular format, which we need to map to the OpenAI format before we call OpenAI:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "But Gradio has been upgraded! Now it will pass in `history` in the exact OpenAI format, perfect for us to send straight to OpenAI.\n", + "\n", + "So our work just got easier!\n", + "\n", + "We will write a function `chat(message, history)` where: \n", + "**message** is the prompt to use \n", + "**history** is the past conversation, in OpenAI format \n", + "\n", + "We will combine the system message, history and latest message, then call OpenAI." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " relevant_system_message = system_message\n", + " if 'belt' in message:\n", + " relevant_system_message += \" The store does not sell belts; if you are asked for belts, be sure to point out other items on sale.\"\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": relevant_system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = gemini.generate_content(message, safety_settings=[\n", + " {\"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\", \"threshold\": \"BLOCK_NONE\"},\n", + " {\"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\", \"threshold\": \"BLOCK_NONE\"},\n", + " {\"category\": \"HARM_CATEGORY_HATE_SPEECH\", \"threshold\": \"BLOCK_NONE\"},\n", + " {\"category\": \"HARM_CATEGORY_HARASSMENT\", \"threshold\": \"BLOCK_NONE\"}], stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " print(chunk) # Print the chunk to understand its structure\n", + " # Adjust the following line based on the actual structure of the chunk\n", + " response += chunk.get('content', '') or ''\n", + " yield response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6e745e1", + "metadata": {}, + "outputs": [], + "source": [ + "chat_model = genai.GenerativeModel('gemini-1.5-flash')\n", + "chat = chat_model.start_chat()\n", + "\n", + "msg = \"what is gen ai\"\n", + "stream = chat.send_message(msg, stream=True)\n", + "# print(\"Response:\", stream.text)\n", + "for chunk in stream:\n", + " print(chunk.text)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dce941ee", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "chat = model.start_chat(history=[])\n", + "\n", + "# Transform Gradio history to Gemini format\n", + "def transform_history(history):\n", + " new_history = []\n", + " for chat in history:\n", + " new_history.append({\"parts\": [{\"text\": chat[0]}], \"role\": \"user\"})\n", + " new_history.append({\"parts\": [{\"text\": chat[1]}], \"role\": \"model\"})\n", + " return new_history\n", + "\n", + "def response(message, history):\n", + " global chat\n", + " # The history will be the same as in Gradio, the 'Undo' and 'Clear' buttons will work correctly.\n", + " chat.history = transform_history(history)\n", + " response = chat.send_message(message)\n", + " response.resolve()\n", + "\n", + " # Each character of the answer is displayed\n", + " for i in range(len(response.text)):\n", + " time.sleep(0.01)\n", + " yield response.text[: i+1]\n", + "\n", + "gr.ChatInterface(response,\n", + " textbox=gr.Textbox(placeholder=\"Question to Gemini\")).launch(debug=True)" + ] + }, + { + "cell_type": "markdown", + "id": "82a57ee0-b945-48a7-a024-01b56a5d4b3e", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business Applications

\n", + " Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n", + "

\n", + "Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.
\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6dfb9e21-df67-4c2b-b952-5e7e7961b03d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llms", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}