2 changed files with 1145 additions and 0 deletions
@ -0,0 +1,444 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "it1JLoxrSqO1", |
||||||
|
"metadata": { |
||||||
|
"id": "it1JLoxrSqO1" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!pip install openai python-docx python-dotenv gradio openpyxl" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "950a084a-7f92-4669-af62-f07cb121da56", |
||||||
|
"metadata": { |
||||||
|
"id": "950a084a-7f92-4669-af62-f07cb121da56" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import os\n", |
||||||
|
"import json\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"#from IPython.display import Markdown, display, update_display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"from docx import Document" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d0548135-ef16-4102-a55a-cea888a51c29", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import pandas as pd\n", |
||||||
|
"import re\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d", |
||||||
|
"metadata": { |
||||||
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"class ReqDoc:\n", |
||||||
|
" def __init__(self, file_path):\n", |
||||||
|
" self.file_path = file_path\n", |
||||||
|
"\n", |
||||||
|
" def extract(self):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Reads the content of a .docx file and returns the paragraphs as a list of strings.\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" try:\n", |
||||||
|
" # Check if the file exists\n", |
||||||
|
" if not os.path.exists(self.file_path):\n", |
||||||
|
" raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n", |
||||||
|
"\n", |
||||||
|
" # Attempt to open and read the document\n", |
||||||
|
" doc = Document(self.file_path)\n", |
||||||
|
" text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n", |
||||||
|
" return text\n", |
||||||
|
"\n", |
||||||
|
" except FileNotFoundError as fnf_error:\n", |
||||||
|
" print(fnf_error)\n", |
||||||
|
" return None\n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" print(f\"An error occurred: {e}\")\n", |
||||||
|
" return None\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9", |
||||||
|
"metadata": { |
||||||
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Initialize and constants\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n", |
||||||
|
" print(\"API key looks good!\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"There might be a problem with your API key. Please check!\")\n", |
||||||
|
" \n", |
||||||
|
"MODEL = 'gpt-4o-mini'\n", |
||||||
|
"openai = OpenAI()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb", |
||||||
|
"metadata": { |
||||||
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Set up system prompt for extracting just the requirements from the document\n", |
||||||
|
"\n", |
||||||
|
"req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n", |
||||||
|
"You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n", |
||||||
|
"functional or non-functional and list them all.\\n\"\n", |
||||||
|
"req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n", |
||||||
|
"Do not make up your own requirements. \\n\"\n", |
||||||
|
"req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||||
|
"req_doc_system_prompt += \"\"\"\n", |
||||||
|
"{\n", |
||||||
|
" \"requirements\": [\n", |
||||||
|
" {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
||||||
|
" {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
||||||
|
" {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n", |
||||||
|
" {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n", |
||||||
|
" ]\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895", |
||||||
|
"metadata": { |
||||||
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n", |
||||||
|
"#respond in JSON format.\n", |
||||||
|
"\n", |
||||||
|
"def req_doc_user_prompt(doc):\n", |
||||||
|
" user_prompt = \"Here is the contents from a requirement document.\\n\"\n", |
||||||
|
" user_prompt += f\"{doc.extract()} \\n\"\n", |
||||||
|
" user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n", |
||||||
|
"paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n", |
||||||
|
"You must respond in a JSON format\"\n", |
||||||
|
" user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n", |
||||||
|
" user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891", |
||||||
|
"metadata": { |
||||||
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n", |
||||||
|
"def get_requirements(doc):\n", |
||||||
|
" reqdoc = ReqDoc(doc)\n", |
||||||
|
" response = openai.chat.completions.create(\n", |
||||||
|
" model=MODEL,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": req_doc_system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n", |
||||||
|
" ],\n", |
||||||
|
" response_format={\"type\": \"json_object\"}\n", |
||||||
|
" )\n", |
||||||
|
" result = response.choices[0].message.content\n", |
||||||
|
" return json.loads(result)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e", |
||||||
|
"metadata": { |
||||||
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Uncomment and run this if you want to see the extracted requriements in json format.\n", |
||||||
|
"#get_requirements(\"reqdoc.docx\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93", |
||||||
|
"metadata": { |
||||||
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"### Next, we will make another call to gpt-4o-mini" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b", |
||||||
|
"metadata": { |
||||||
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Set up system prompt to ask for test cases in table format\n", |
||||||
|
"system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n", |
||||||
|
"You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n", |
||||||
|
"The table will contain the following columns. \\\n", |
||||||
|
"1.S No\\\n", |
||||||
|
"2.Requirement No\\\n", |
||||||
|
"3.Requirement Description\\\n", |
||||||
|
"4.Test Case ID\\\n", |
||||||
|
"5.Test case summary\\\n", |
||||||
|
"6.Test case description\\\n", |
||||||
|
"7.Success criteria \\n\"\n", |
||||||
|
"system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5", |
||||||
|
"metadata": { |
||||||
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n", |
||||||
|
"\n", |
||||||
|
"def get_testcase_user_prompt(reqdoc):\n", |
||||||
|
" user_prompt = \"You are looking at the following list of requirements. \\n\"\n", |
||||||
|
" user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n", |
||||||
|
" user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n", |
||||||
|
" user_prompt += user_prompt[:25000]\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "5b2a2b46-9d9c-416c-b189-3007b4d26d76", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n", |
||||||
|
"def create_testcase_doc_gradio(response, is_response_ready, is_cleared, file_input):\n", |
||||||
|
" if is_cleared or file_input == None: # Prevent OpenAI call if \"Clear\" was clicked\n", |
||||||
|
" return \"\", False\n", |
||||||
|
" stream = openai.chat.completions.create(\n", |
||||||
|
" model=MODEL,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_testcase_user_prompt(file_input)}\n", |
||||||
|
" ],\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" #Modified for Gradio\n", |
||||||
|
" result = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" result += chunk.choices[0].delta.content or \"\"\n", |
||||||
|
" #print(result)\n", |
||||||
|
" yield result, False" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2bb96a11-063e-4b20-9880-71fa9ea4d3f7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n", |
||||||
|
"force_dark_mode = \"\"\"\n", |
||||||
|
"function refresh() {\n", |
||||||
|
" const url = new URL(window.location);\n", |
||||||
|
" if (url.searchParams.get('__theme') !== 'dark') {\n", |
||||||
|
" url.searchParams.set('__theme', 'dark');\n", |
||||||
|
" window.location.href = url.href;\n", |
||||||
|
" }\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "5c81c766-9613-4614-b88d-410654672b89", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def show_or_hide_save_button(response, is_response_ready, is_cleared):\n", |
||||||
|
" if is_cleared or response == None:\n", |
||||||
|
" return \"\", False\n", |
||||||
|
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", |
||||||
|
" table_match = re.search(table_pattern, response)\n", |
||||||
|
" if table_match:\n", |
||||||
|
" return response, True #(response, is_response_ready)\n", |
||||||
|
" else:\n", |
||||||
|
" return response, False #(response, is_response_ready)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a5f5d8e7-d29c-4f40-8d57-a9911bb7c47e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def extract_table_from_markdown(response):\n", |
||||||
|
" # Regular expression to match Markdown tables\n", |
||||||
|
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", |
||||||
|
" table_match = re.search(table_pattern, response)\n", |
||||||
|
"\n", |
||||||
|
" if table_match:\n", |
||||||
|
" table_data = table_match.group(0)\n", |
||||||
|
" # Process the table into a format pandas can read\n", |
||||||
|
" rows = table_data.strip().split(\"\\n\")\n", |
||||||
|
" data = [row.split(\"|\")[1:-1] for row in rows] # Split columns by '|'\n", |
||||||
|
"\n", |
||||||
|
" # Convert to DataFrame\n", |
||||||
|
" df = pd.DataFrame(data[1:], columns=data[0]) # First row is the header\n", |
||||||
|
"\n", |
||||||
|
" # Save to Excel\n", |
||||||
|
" output_file = \"test_cases.xlsx\"\n", |
||||||
|
" df.to_excel(output_file, index=False)\n", |
||||||
|
"\n", |
||||||
|
" return output_file\n", |
||||||
|
" else:\n", |
||||||
|
" return None" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c1380b11-3e28-40de-ab1a-93a5fd73cf81", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def extract_and_save_button(response, is_cleared):\n", |
||||||
|
" if is_cleared:\n", |
||||||
|
" return None # Do nothing if the file was cleared\n", |
||||||
|
" # This function will be triggered when the user clicks \"Save as Excel\"\n", |
||||||
|
" output_file = extract_table_from_markdown(response)\n", |
||||||
|
" if output_file:\n", |
||||||
|
" return output_file\n", |
||||||
|
" else:\n", |
||||||
|
" return \"No table found in the provided input.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3a532b42-9f81-4c75-8be4-e40d621a6b35", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Gradio interface\n", |
||||||
|
"with gr.Blocks(js=force_dark_mode) as demo:\n", |
||||||
|
" gr.HTML(\"<h2 style='text-align: center; color: white;'>📄 Test case automation</h2>\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" file_input = gr.File(label=\"Upload your requirements docx file\", file_types=[\".docx\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" response = gr.Markdown()\n", |
||||||
|
" # Button to save the table as Excel file (optional)\n", |
||||||
|
" save_button = gr.Button(\"Download Table as Excel\", visible=False)\n", |
||||||
|
" file_output = gr.File(label=\"Download Excel File\", visible=False) \n", |
||||||
|
" # State variable to track if response is ready\n", |
||||||
|
" is_response_ready = gr.State(False)\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" clear_button = gr.Button(\"Clear\")\n", |
||||||
|
" # State variable to track if clear button is clicked\n", |
||||||
|
" is_cleared = gr.State(False)\n", |
||||||
|
"\n", |
||||||
|
" # Function to show \"Processing...\" message\n", |
||||||
|
" def show_processing(is_cleared, file_input):\n", |
||||||
|
" if is_cleared or file_input==None:\n", |
||||||
|
" return None, False, is_cleared, file_input # Do nothing if the file was cleared\n", |
||||||
|
" #return gr.HTML(\"<h6 style='text-align: left; color: #ffffffff;'>⌛ Processing your file... Please wait!</h6>\"), False, is_cleared, file_input\n", |
||||||
|
" return \"⌛ Processing your file... Please wait!\", False, is_cleared, file_input\n", |
||||||
|
" \n", |
||||||
|
" # Trigger response only if the file was uploaded and not cleared\n", |
||||||
|
" file_input.change(\n", |
||||||
|
" lambda _: False, # Directly set is_cleared to False\n", |
||||||
|
" inputs=[file_input],\n", |
||||||
|
" outputs=[is_cleared]\n", |
||||||
|
" ).then(\n", |
||||||
|
" show_processing, inputs=[is_cleared, file_input], outputs=[response, is_response_ready, is_cleared, file_input]\n", |
||||||
|
" ).then(\n", |
||||||
|
" create_testcase_doc_gradio, inputs=[response, is_response_ready, is_cleared, file_input], outputs=[response, is_response_ready]\n", |
||||||
|
" ).then(\n", |
||||||
|
" show_or_hide_save_button, inputs=[response, is_response_ready, is_cleared], outputs=[response, is_response_ready]\n", |
||||||
|
" ).then(\n", |
||||||
|
" lambda _, ready: (gr.update(visible=ready), gr.update(visible=ready)), inputs=[response, is_response_ready], outputs=[save_button,file_output])\n", |
||||||
|
"\n", |
||||||
|
" #.then() passes the previous function outputs as inputs to the next function\n", |
||||||
|
"\n", |
||||||
|
" # Button action to extract and save table as an Excel file\n", |
||||||
|
" save_button.click(extract_and_save_button, inputs=[response, is_cleared], outputs=file_output)\n", |
||||||
|
" \n", |
||||||
|
" # Clear button resets both file and output while setting is_cleared to True\n", |
||||||
|
" clear_button.click(lambda: (None, None, None, True), inputs=None, outputs=[file_input, file_output, response, is_cleared]) \n", |
||||||
|
"\n", |
||||||
|
"# Launch Gradio app\n", |
||||||
|
"demo.launch(share=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "cd5314b2-ee91-49bd-9d40-558775d44382", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"colab": { |
||||||
|
"provenance": [] |
||||||
|
}, |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,701 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "ec4f6b32-46e9-429a-a3cd-521ff5418493", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Occasio - Event Management Assistant" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import json\n", |
||||||
|
"import time\n", |
||||||
|
"import pprint\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import anthropic\n", |
||||||
|
"import google.generativeai as genai\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"# Print the key prefixes to help with any debugging\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv()\n", |
||||||
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
||||||
|
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"if openai_api_key:\n", |
||||||
|
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"OpenAI API Key not set\")\n", |
||||||
|
" \n", |
||||||
|
"if anthropic_api_key:\n", |
||||||
|
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"Anthropic API Key not set\")\n", |
||||||
|
"\n", |
||||||
|
"if google_api_key:\n", |
||||||
|
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"Google API Key not set\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "8b501508-0082-47be-9903-52ff1c243486", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Connect to OpenAI, Anthropic and Google and assign a model for each\n", |
||||||
|
"\n", |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"OPENAI_MODEL = \"gpt-4o-mini\"\n", |
||||||
|
"\n", |
||||||
|
"claude = anthropic.Anthropic()\n", |
||||||
|
"ANTHROPIC_MODEL = \"claude-3-haiku-20240307\"\n", |
||||||
|
"\n", |
||||||
|
"genai.configure()\n", |
||||||
|
"GOOGLE_MODEL = \"gemini-2.0-flash\"\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"system_message = \"You are called \\\"EventAI\\\", a virtual assistant for an Elementary school called Eagle Elementary School. You can help users by giving \\\n", |
||||||
|
"them details of upcoming shcool events like event name, description, location etc. \"\n", |
||||||
|
"#system_message += \"Introduce yourself with a warm welcome message on your first response ONLY.\"\n", |
||||||
|
"system_message += \"Give short, courteous answers, no more than 2 sentences. \"\n", |
||||||
|
"system_message += \"Always be accurate. If you don't know the answer, say so. Do not make up your own event details information\"\n", |
||||||
|
"system_message += \"You might be asked to list the questions asked by the user so far. In that situation, based on the conversation history provided to you, \\\n", |
||||||
|
"list the questions and respond\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2c27c4ba-8ed5-492f-add1-02ce9c81d34c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Some imports for handling images\n", |
||||||
|
"\n", |
||||||
|
"import base64\n", |
||||||
|
"from io import BytesIO\n", |
||||||
|
"from PIL import Image" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "773a9f11-557e-43c9-ad50-56cbec3a0f8f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def artist(event_text):\n", |
||||||
|
" image_response = openai.images.generate(\n", |
||||||
|
" model=\"dall-e-3\",\n", |
||||||
|
" prompt=f\"An image representing an {event_text}, showing typical activities that happen for that {event_text}, in a vibrant pop-art style that elementary school kids will like\",\n", |
||||||
|
" size=\"1024x1024\",\n", |
||||||
|
" n=1,\n", |
||||||
|
" response_format=\"b64_json\",\n", |
||||||
|
" )\n", |
||||||
|
" image_base64 = image_response.data[0].b64_json\n", |
||||||
|
" image_data = base64.b64decode(image_base64)\n", |
||||||
|
" return Image.open(BytesIO(image_data))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d104b96a-02ca-4159-82fe-88e0452aa479", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import base64\n", |
||||||
|
"from io import BytesIO\n", |
||||||
|
"from PIL import Image\n", |
||||||
|
"from IPython.display import Audio, display\n", |
||||||
|
"\n", |
||||||
|
"def talker(message):\n", |
||||||
|
" response = openai.audio.speech.create(\n", |
||||||
|
" model=\"tts-1\",\n", |
||||||
|
" voice=\"onyx\",\n", |
||||||
|
" input=message)\n", |
||||||
|
"\n", |
||||||
|
" audio_stream = BytesIO(response.content)\n", |
||||||
|
" output_filename = \"output_audio.mp3\"\n", |
||||||
|
" with open(output_filename, \"wb\") as f:\n", |
||||||
|
" f.write(audio_stream.read())\n", |
||||||
|
"\n", |
||||||
|
" # Play the generated audio\n", |
||||||
|
" display(Audio(output_filename, autoplay=True))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f0428a74-4daa-4b0d-b25a-219a35f39f55", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"school_events = [\n", |
||||||
|
" {\n", |
||||||
|
" \"event_id\": \"pta\",\n", |
||||||
|
" \"name\": \"Parent Teachers Meeting (PTA/PTM)\",\n", |
||||||
|
" \"description\": \"Parent teachers meeting (PTA/PTM) to discuss students' progress.\",\n", |
||||||
|
" \"date_time\": \"Apr 1st, 2025 11 AM\",\n", |
||||||
|
" \"location\" : \"Glove Annexure Hall\"\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"event_id\": \"read aloud\",\n", |
||||||
|
" \"name\": \"Read Aloud to your class/Reading to your class\",\n", |
||||||
|
" \"description\": \"Kids can bring their favorite book and read it to their class.\",\n", |
||||||
|
" \"date_time\": \"Apr 15th, 2025 1 PM\",\n", |
||||||
|
" \"location\": \"Classroom\"\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"event_id\": \"100 days of school\",\n", |
||||||
|
" \"name\": \"Celebrating 100 days of school. Dress up time for kids\",\n", |
||||||
|
" \"description\": \"Kids can dress up as old people and celebrate the milestone with their teachers.\",\n", |
||||||
|
" \"date_time\": \"May 15th, 2025 11 AM\",\n", |
||||||
|
" \"location\": \"Classroom\"\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"event_id\": \"Book fair\",\n", |
||||||
|
" \"name\": \"Scholastic book fair\",\n", |
||||||
|
" \"description\": \"Kids can purchase their favorite scholastic books.\",\n", |
||||||
|
" \"date_time\": \"Jun 22nd, 2025 10:30 AM\",\n", |
||||||
|
" \"location\": \"Library\"\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"event_id\": \"Halloween\",\n", |
||||||
|
" \"name\": \"Halloween\",\n", |
||||||
|
" \"description\": \"Kids can dress up as their favorite characters\",\n", |
||||||
|
" \"date_time\": \"Oct 31st, 2025\",\n", |
||||||
|
" \"location\": \"Classroom\"\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"event_id\": \"Movie Night\",\n", |
||||||
|
" \"name\": \"Movie Night\",\n", |
||||||
|
" \"description\": \"A popular and kids centric movie will be played. Kids and families are welcome.\",\n", |
||||||
|
" \"date_time\": \"May 3rd, 2025\",\n", |
||||||
|
" \"location\": \"Main auditorium\"\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"event_id\": \"Intruder Drill\",\n", |
||||||
|
" \"name\": \"Intruder Drill\",\n", |
||||||
|
" \"description\": \"State mandated monthly intruder drill to prepare staff and students with necessary safety skills in times of a crisis\",\n", |
||||||
|
" \"date_time\": \"May 3rd, 2025\",\n", |
||||||
|
" \"location\": \"Main auditorium\"\n", |
||||||
|
" }\n", |
||||||
|
"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b7027eec-e522-49c1-af59-56a82f9d3be8", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def get_event_details(query):\n", |
||||||
|
" search_words = query.lower().split() \n", |
||||||
|
" for event in school_events:\n", |
||||||
|
" event_text = event['name'].lower() + ' ' + event['description'].lower()\n", |
||||||
|
" if all(word in event_text for word in search_words):\n", |
||||||
|
" return event\n", |
||||||
|
" return None" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Tools\n", |
||||||
|
"\n", |
||||||
|
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n", |
||||||
|
"\n", |
||||||
|
"With tools, you can write a function, and have the LLM call that function as part of its response.\n", |
||||||
|
"\n", |
||||||
|
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n", |
||||||
|
"\n", |
||||||
|
"Well, kinda." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "68e96b54-b891-4e7b-a6bc-17693dc99970", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# for claude\n", |
||||||
|
"tools_claude = [\n", |
||||||
|
" {\n", |
||||||
|
" \"name\": \"get_event_details\",\n", |
||||||
|
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n", |
||||||
|
"'When is the pta meeting scheduled?\",\n", |
||||||
|
" \"input_schema\": {\n", |
||||||
|
" \"type\": \"object\",\n", |
||||||
|
" \"properties\": {\n", |
||||||
|
" \"event_text\": {\n", |
||||||
|
" \"type\": \"string\",\n", |
||||||
|
" \"description\": \"The event keyword that the user wants to getails on\"\n", |
||||||
|
" }\n", |
||||||
|
" },\n", |
||||||
|
" \"required\": [\"event_text\"]\n", |
||||||
|
" }\n", |
||||||
|
"}\n", |
||||||
|
"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "636188d2-7e7a-48a0-9f04-f3813c7dc323", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# For GPT\n", |
||||||
|
"events_function_gpt = {\n", |
||||||
|
" \"name\": \"get_event_details\",\n", |
||||||
|
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n", |
||||||
|
" 'When is the pta meeting scheduled?\",\n", |
||||||
|
" \"parameters\": {\n", |
||||||
|
" \"type\": \"object\",\n", |
||||||
|
" \"properties\": {\n", |
||||||
|
" \"event_text\": {\n", |
||||||
|
" \"type\": \"string\",\n", |
||||||
|
" \"description\": \"The event keyword that the user wants to getails on\",\n", |
||||||
|
" },\n", |
||||||
|
" },\n", |
||||||
|
" \"required\": [\"event_text\"],\n", |
||||||
|
" \"additionalProperties\": False\n", |
||||||
|
" }\n", |
||||||
|
"}" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "605684f8-ed02-4cc9-8a16-012533b601cb", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# And this is included in a list of tools:\n", |
||||||
|
"tools_gpt = [{\"type\": \"function\", \"function\": events_function_gpt}]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4ac5a34c-a630-449a-9d46-669daace799c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Gemini function declaration structure\n", |
||||||
|
"gemini_event_details = [{\n", |
||||||
|
" \"name\": \"get_event_details\",\n", |
||||||
|
" \"description\":\"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks 'When is the pta meeting scheduled?\",\n", |
||||||
|
" \"parameters\": {\n", |
||||||
|
" \"type\": \"object\",\n", |
||||||
|
" \"properties\": {\n", |
||||||
|
" \"event_text\": {\n", |
||||||
|
" \"type\": \"string\",\n", |
||||||
|
" \"description\": \"The event keyword that the user wants to details on\",\n", |
||||||
|
" },\n", |
||||||
|
" },\n", |
||||||
|
" \"required\": [\"event_text\"],\n", |
||||||
|
" },\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"name\": \"get_event_test\",\n", |
||||||
|
" \"description\":\"This is a test function to validate if the function call picks up the right function if there are multiple functions.\",\n", |
||||||
|
" \"parameters\": {\n", |
||||||
|
" \"type\": \"object\",\n", |
||||||
|
" \"properties\": {\n", |
||||||
|
" \"event_text\": {\n", |
||||||
|
" \"type\": \"string\",\n", |
||||||
|
" \"description\": \"The event keyword that the user wants to details on\",\n", |
||||||
|
" },\n", |
||||||
|
" },\n", |
||||||
|
" \"required\": [\"event_text\"],\n", |
||||||
|
" },\n", |
||||||
|
" }\n", |
||||||
|
"]\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c6331113-63b0-4712-94bb-f363422a8441", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def chat_claude(history):\n", |
||||||
|
" print(f\"\\nhistory is {history}\\n\")\n", |
||||||
|
" #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs\n", |
||||||
|
" history_claude = list({\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in history if \"role\" in msg and \"content\" in msg)\n", |
||||||
|
" #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]\n", |
||||||
|
" #messages = history\n", |
||||||
|
" message = claude.messages.create(\n", |
||||||
|
" model=ANTHROPIC_MODEL,\n", |
||||||
|
" max_tokens=1000,\n", |
||||||
|
" temperature=0.7,\n", |
||||||
|
" system=system_message,\n", |
||||||
|
" messages=history_claude,\n", |
||||||
|
" tools=tools_claude\n", |
||||||
|
" )\n", |
||||||
|
" image = None\n", |
||||||
|
" print(f\"Claude's message is \\n {pprint.pprint(message)}\\n\")\n", |
||||||
|
" try: \n", |
||||||
|
" if message.stop_reason == \"tool_use\":\n", |
||||||
|
" tool_use = next(block for block in message.content if block.type == \"tool_use\")\n", |
||||||
|
" event_text = tool_use.input.get('event_text')\n", |
||||||
|
" image = artist(event_text)\n", |
||||||
|
" tool_result = handle_tool_call(event_text)\n", |
||||||
|
" #tool_result = handle_tool_call(tool_use, \"Claude\")\n", |
||||||
|
" \n", |
||||||
|
" print(f\"Tool Result: {tool_result}\")\n", |
||||||
|
" \n", |
||||||
|
" response = claude.messages.stream(\n", |
||||||
|
" model=ANTHROPIC_MODEL,\n", |
||||||
|
" max_tokens=4096,\n", |
||||||
|
" system=system_message,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\n", |
||||||
|
" \"role\": \"user\", \n", |
||||||
|
" \"content\": [\n", |
||||||
|
" {\n", |
||||||
|
" \"type\": \"text\",\n", |
||||||
|
" \"text\": history[-1].get('content')\n", |
||||||
|
" }\n", |
||||||
|
" ]\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"role\": \"assistant\", \n", |
||||||
|
" \"content\": message.content\n", |
||||||
|
" },\n", |
||||||
|
" {\n", |
||||||
|
" \"role\": \"user\",\n", |
||||||
|
" \"content\": [\n", |
||||||
|
" {\n", |
||||||
|
" \"type\": \"tool_result\",\n", |
||||||
|
" \"tool_use_id\": tool_use.id,\n", |
||||||
|
" \"content\": tool_result,\n", |
||||||
|
" }\n", |
||||||
|
" ],\n", |
||||||
|
" },\n", |
||||||
|
" ],\n", |
||||||
|
" tools=tools_claude\n", |
||||||
|
" )\n", |
||||||
|
" result = \"\"\n", |
||||||
|
" with response as stream:\n", |
||||||
|
" for text in stream.text_stream:\n", |
||||||
|
" result += text or \"\"\n", |
||||||
|
" yield result, None\n", |
||||||
|
" talker(result)\n", |
||||||
|
" #image= artist(tool_input.get('event_text'))\n", |
||||||
|
" yield result, image\n", |
||||||
|
" else:\n", |
||||||
|
" response = next((block.text for block in message.content if hasattr(block, \"text\")), None,)\n", |
||||||
|
" chunk_size=30\n", |
||||||
|
" for i in range(0, len(response), chunk_size):\n", |
||||||
|
" yield response[:i + chunk_size], None\n", |
||||||
|
" time.sleep(0.05) #Simulate streaming delay\n", |
||||||
|
" talker(response)\n", |
||||||
|
" #image= artist(tool_input.get('event_text'))\n", |
||||||
|
" yield response, None\n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", |
||||||
|
" print(e)\n", |
||||||
|
" yield error_message, None\n", |
||||||
|
" " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "9915ae05-5d52-4fdc-a3ea-18f050a79bd3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def chat_gpt(history):\n", |
||||||
|
" print(f\"\\nhistory is {history}\\n\")\n", |
||||||
|
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n", |
||||||
|
" response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)\n", |
||||||
|
" image = None\n", |
||||||
|
" try:\n", |
||||||
|
" if response.choices[0].finish_reason==\"tool_calls\":\n", |
||||||
|
" message = response.choices[0].message\n", |
||||||
|
" tool = message.tool_calls[0]\n", |
||||||
|
" arguments = json.loads(tool.function.arguments)\n", |
||||||
|
" event_text = arguments.get('event_text')\n", |
||||||
|
" image = artist(event_text)\n", |
||||||
|
" event_json = handle_tool_call(event_text)\n", |
||||||
|
" tool_output = {\n", |
||||||
|
" \"role\": \"tool\",\n", |
||||||
|
" \"content\": event_json,\n", |
||||||
|
" \"tool_call_id\": tool.id\n", |
||||||
|
" }\n", |
||||||
|
" messages.append(message)\n", |
||||||
|
" messages.append(tool_output)\n", |
||||||
|
" stream = openai.chat.completions.create(\n", |
||||||
|
" model=OPENAI_MODEL,\n", |
||||||
|
" messages=messages,\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" result = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" result += chunk.choices[0].delta.content or \"\"\n", |
||||||
|
" yield result, None\n", |
||||||
|
" talker(result)\n", |
||||||
|
" yield result, image\n", |
||||||
|
" else: \n", |
||||||
|
" reply = response.choices[0].message.content\n", |
||||||
|
" chunk_size=30\n", |
||||||
|
" for i in range(0, len(reply), chunk_size):\n", |
||||||
|
" yield reply[:i + chunk_size], None\n", |
||||||
|
" time.sleep(0.05)\n", |
||||||
|
" talker(reply)\n", |
||||||
|
" #image= artist(\"No such event\")\n", |
||||||
|
" yield reply, None\n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", |
||||||
|
" print(e)\n", |
||||||
|
" yield error_message, None" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "30fa3de9-5b55-4bb6-93ea-a13fc09d38c1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def chat_gemini(history):\n", |
||||||
|
" print(f\"\\nhistroy is {history}\\n\")\n", |
||||||
|
" history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format\n", |
||||||
|
" else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is\n", |
||||||
|
" else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc\n", |
||||||
|
" \n", |
||||||
|
" print(f\"\\nhistroy_gemini is {history_gemini}\\n\")\n", |
||||||
|
" model = genai.GenerativeModel(\n", |
||||||
|
" model_name=GOOGLE_MODEL,\n", |
||||||
|
" system_instruction=system_message\n", |
||||||
|
" )\n", |
||||||
|
" response = model.generate_content(\n", |
||||||
|
" contents = history_gemini,\n", |
||||||
|
" #contents = contents,\n", |
||||||
|
" tools = [{\n", |
||||||
|
" 'function_declarations': gemini_event_details,\n", |
||||||
|
" }],\n", |
||||||
|
" )\n", |
||||||
|
" #print(f\"response is {response}\")\n", |
||||||
|
"\n", |
||||||
|
" image = None\n", |
||||||
|
" try:\n", |
||||||
|
" # Check if the model wants to use a tool\n", |
||||||
|
" if response.candidates[0].content.parts[0].function_call:\n", |
||||||
|
" function_call = response.candidates[0].content.parts[0].function_call\n", |
||||||
|
" event_text = function_call.args.get(\"event_text\")\n", |
||||||
|
" image = artist(event_text)\n", |
||||||
|
" tool_result = handle_tool_call(event_text)\n", |
||||||
|
" \n", |
||||||
|
" print(f\"\\ntool_result is {tool_result}\\n\")\n", |
||||||
|
" stream = model.generate_content(\n", |
||||||
|
" \"Based on this information `\" + tool_result + \"`, extract the details of the event and provide the event details to the user\",\n", |
||||||
|
" stream=True \n", |
||||||
|
" )\n", |
||||||
|
" #print(f\"\\nSecond response is {stream}\\n\")\n", |
||||||
|
" result = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" result += chunk.candidates[0].content.parts[0].text or \"\"\n", |
||||||
|
" #print(f\"REsult is \\n{result}\\n\")\n", |
||||||
|
" yield result, None\n", |
||||||
|
" talker(result) \n", |
||||||
|
" yield result, image\n", |
||||||
|
" #print(f\"REsult is \\n{result}\\n\")\n", |
||||||
|
" else: \n", |
||||||
|
" reply = response.text\n", |
||||||
|
" chunk_size=30\n", |
||||||
|
" for i in range(0, len(reply), chunk_size):\n", |
||||||
|
" yield reply[:i + chunk_size], None\n", |
||||||
|
" time.sleep(0.05)\n", |
||||||
|
" talker(reply)\n", |
||||||
|
" #image= artist(\"No such event\")\n", |
||||||
|
" yield reply, None\n", |
||||||
|
" \n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n", |
||||||
|
" print(e)\n", |
||||||
|
" yield error_message, None\n", |
||||||
|
" \n", |
||||||
|
"\n", |
||||||
|
" \n", |
||||||
|
" " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "570fffb2-a054-4217-89ae-8b6f4630e383", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def call_and_process_model_responses(fn_name, chatbot):#, response, image):\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" image = None\n", |
||||||
|
" for response, image in fn_name(chatbot):\n", |
||||||
|
" if chatbot and chatbot[-1][\"role\"] == \"assistant\": \n", |
||||||
|
" chatbot[-1][\"content\"] = response # Update the last message\n", |
||||||
|
" else:\n", |
||||||
|
" chatbot.append({\"role\": \"assistant\", \"content\": response}) # First assistant message\n", |
||||||
|
" #print(chatbot)\n", |
||||||
|
" yield chatbot, image # Stream updated history to UI\n", |
||||||
|
" \n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "32a6ccce-44fa-49a7-bd1a-08c70002771c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def handle_tool_call(event_text):\n", |
||||||
|
" print(f\"event text is {event_text}\")\n", |
||||||
|
" event_found = get_event_details(event_text)\n", |
||||||
|
" print(f\"event_found is {event_found}\")\n", |
||||||
|
" \n", |
||||||
|
" if event_found:\n", |
||||||
|
" response = json.dumps({\"name\": event_found['name'],\"description\": event_found['description'], \"when\": event_found['date_time'], \"where\": event_found['location']})\n", |
||||||
|
" else: \n", |
||||||
|
" response = json.dumps({\"event\": f\"Sorry, there is no schedule currently for {event_text}\"})\n", |
||||||
|
" return response \n", |
||||||
|
" " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4eaaaf9e-64b9-4d0b-9931-388cee8ea21d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def process_chosen_model(chatbot, model):\n", |
||||||
|
" if model == 'GPT':\n", |
||||||
|
" for chatbot, image in call_and_process_model_responses(chat_gpt, chatbot):\n", |
||||||
|
" yield chatbot, image\n", |
||||||
|
" elif model == 'Claude': \n", |
||||||
|
" for chatbot, image in call_and_process_model_responses(chat_claude, chatbot):\n", |
||||||
|
" yield chatbot, image\n", |
||||||
|
" else:\n", |
||||||
|
" #for Gemini, the content is to be replaced with parts.\n", |
||||||
|
" for chatbot, image in call_and_process_model_responses(chat_gemini, chatbot):\n", |
||||||
|
" yield chatbot, image\n", |
||||||
|
" " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "627f6d49-5376-4f1d-8071-f2e96fd6e78b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# More involved Gradio code as we're not using the preset Chat interface!\n", |
||||||
|
"# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n", |
||||||
|
"\n", |
||||||
|
"with gr.Blocks(css=\"\"\"\n", |
||||||
|
" select.gr-box { \n", |
||||||
|
" appearance: auto !important; \n", |
||||||
|
" -webkit-appearance: auto !important; \n", |
||||||
|
" }\n", |
||||||
|
"\"\"\") as ui:\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio! An Event Management Assistant</h1>\") # Added title\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" # with gr.Column(scale=3): #Acts as a spacer on the left\n", |
||||||
|
" # pass\n", |
||||||
|
" \n", |
||||||
|
" with gr.Column(scale=0):\n", |
||||||
|
" model = gr.Dropdown(\n", |
||||||
|
" choices=[\"GPT\", \"Claude\", \"Gemini\"], \n", |
||||||
|
" label=\"Select model\", \n", |
||||||
|
" value=\"GPT\",\n", |
||||||
|
" interactive=True,\n", |
||||||
|
" container=True # Applying the CSS class\n", |
||||||
|
" )\n", |
||||||
|
" # with gr.Column(scale=-54, min_width=200):\n", |
||||||
|
" # gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio</h1>\") # Added title\n", |
||||||
|
" # pass #Acts as a spacer on the right\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n", |
||||||
|
" image_output = gr.Image(height=500)\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" entry = gr.Textbox(label=\"Ask me \\\"when is pta meeting\\\", \\\"how about book fair\\\" and more... \")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" clear = gr.Button(\"Clear\", min_width=150)\n", |
||||||
|
" #message=None\n", |
||||||
|
"\n", |
||||||
|
" def do_entry(message, history):\n", |
||||||
|
" history += [{\"role\":\"user\", \"content\":message}]\n", |
||||||
|
" return \"\", history\n", |
||||||
|
" \n", |
||||||
|
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n", |
||||||
|
" process_chosen_model, inputs=[chatbot, model], outputs=[chatbot, image_output]\n", |
||||||
|
" )\n", |
||||||
|
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", |
||||||
|
"\n", |
||||||
|
"ui.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
Loading…
Reference in new issue