From 8d08bc0bc6cd3353eb1f141b18390f9723af975c Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Mon, 23 Dec 2024 19:55:50 +1100 Subject: [PATCH] Refine user query by another AI agent instead of providing list of dictionry --- .../day3-refine-user-query-by-llama.ipynb | 365 ++++++++++++++++++ 1 file changed, 365 insertions(+) create mode 100644 week2/community-contributions/day3-refine-user-query-by-llama.ipynb diff --git a/week2/community-contributions/day3-refine-user-query-by-llama.ipynb b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb new file mode 100644 index 0000000..c54677e --- /dev/null +++ b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb @@ -0,0 +1,365 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "75e2ef28-594f-4c18-9d22-c6b8cd40ead2", + "metadata": {}, + "source": [ + "# Day 3 - Conversational AI - aka Chatbot!" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "231605aa-fccb-447e-89cf-8b187444536a", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv()\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize\n", + "\n", + "openai = OpenAI()\n", + "MODEL = 'gpt-4o-mini'" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e16839b5-c03b-4d9d-add6-87a0f6f37575", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant\"" + ] + }, + { + "cell_type": "markdown", + "id": "98e97227-f162-4d1a-a0b2-345ff248cbe7", + "metadata": {}, + "source": [ + "# Please read this! A change from the video:\n", + "\n", + "In the video, I explain how we now need to write a function called:\n", + "\n", + "`chat(message, history)`\n", + "\n", + "Which expects to receive `history` in a particular format, which we need to map to the OpenAI format before we call OpenAI:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "But Gradio has been upgraded! Now it will pass in `history` in the exact OpenAI format, perfect for us to send straight to OpenAI.\n", + "\n", + "So our work just got easier!\n", + "\n", + "We will write a function `chat(message, history)` where: \n", + "**message** is the prompt to use \n", + "**history** is the past conversation, in OpenAI format \n", + "\n", + "We will combine the system message, history and latest message, then call OpenAI." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1", + "metadata": {}, + "outputs": [], + "source": [ + "# Simpler than in my video - we can easily create this function that calls OpenAI\n", + "# It's now just 1 line of code to prepare the input to OpenAI!\n", + "\n", + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " print(\"History is:\")\n", + " print(history)\n", + " print(\"And messages is:\")\n", + " print(messages)\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response" + ] + }, + { + "cell_type": "markdown", + "id": "1334422a-808f-4147-9c4c-57d63d9780d0", + "metadata": {}, + "source": [ + "## And then enter Gradio's magic!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "1f91b414-8bab-472d-b9c9-3fa51259bdfe", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n", + "the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n", + "For example, if the customer says 'I'm looking to buy a hat', \\\n", + "you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales event.'\\\n", + "Encourage the customer to buy hats if they are unsure what to get.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "4e5be3ec-c26c-42bc-ac16-c39d369883f6", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "413e9e4e-7836-43ac-a0c3-e1ab5ed6b136", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "d75f0ffa-55c8-4152-b451-945021676837", + "metadata": {}, + "outputs": [], + "source": [ + "system_message += \"\\nIf the customer asks for shoes, you should respond that shoes are not on sale today, \\\n", + "but remind the customer to look at hats!\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c602a8dd-2df7-4eb7-b539-4e01865a6351", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "ce43fe80", + "metadata": {}, + "outputs": [], + "source": [ + "ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "4f28e3a8", + "metadata": {}, + "outputs": [], + "source": [ + "ollama_system_prompt = \"\"\"You assistant only to refine user query like check grammatical, capital, lower case that make sophisticated prompt.\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "667a7fbb", + "metadata": {}, + "outputs": [], + "source": [ + "ollama_user_prompt = \"\"\"You assistant like RAG technology use to add more informaiton about the content in the user query. \n", + "Please look at some content as following we don't have in our store:\n", + "pants\n", + "sunglasses\n", + "watch\n", + "underwear\n", + "If you find this items in user query, answer gently like The store does not sell like 'e.g. pants'; if they are asked for 'pants', be sure to point out other items on sale.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "4632f16b", + "metadata": {}, + "outputs": [], + "source": [ + "user_messages = [{\"role\": \"system\", \"content\": ollama_system_prompt}, {\"role\": \"user\", \"content\": ollama_user_prompt},]" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "0a987a66-1061-46d6-a83a-a30859dc88bf", + "metadata": {}, + "outputs": [], + "source": [ + "# Fixed a bug in this function brilliantly identified by student Gabor M.!\n", + "# I've also improved the structure of this function\n", + "\n", + "def chat(message, history):\n", + " relevant_system_message = system_message\n", + "\n", + " # Refine the user query\n", + " try:\n", + " refine_query = ollama_via_openai.chat.completions.create(model='llama3.2', messages=user_messages)\n", + " refined_content = refine_query.choices[0].message.content\n", + " except Exception as e:\n", + " print(f\"Error in refinement: {e}\")\n", + " refined_content = \"Error in refining query.\"\n", + "\n", + " # Log the original and refined queries\n", + " # print(f\"Original User Query: {message}\")\n", + " # print(f\"Refined Query: {refined_content}\")\n", + " # print(\"============================== END of REFINE CODE ===================================\")\n", + "\n", + " relevant_system_message += refined_content\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": relevant_system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20570de2-eaad-42cc-a92c-c779d71b48b6", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "id": "82a57ee0-b945-48a7-a024-01b56a5d4b3e", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business Applications

\n", + " Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n", + "

\n", + "Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.
\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6dfb9e21-df67-4c2b-b952-5e7e7961b03d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm_env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}