From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
444 lines
16 KiB
444 lines
16 KiB
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "it1JLoxrSqO1", |
|
"metadata": { |
|
"id": "it1JLoxrSqO1" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"!pip install openai python-docx python-dotenv gradio openpyxl" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "950a084a-7f92-4669-af62-f07cb121da56", |
|
"metadata": { |
|
"id": "950a084a-7f92-4669-af62-f07cb121da56" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"import os\n", |
|
"import json\n", |
|
"from dotenv import load_dotenv\n", |
|
"#from IPython.display import Markdown, display, update_display\n", |
|
"from openai import OpenAI\n", |
|
"from docx import Document" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "d0548135-ef16-4102-a55a-cea888a51c29", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"import pandas as pd\n", |
|
"import re\n", |
|
"import gradio as gr" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d", |
|
"metadata": { |
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"class ReqDoc:\n", |
|
" def __init__(self, file_path):\n", |
|
" self.file_path = file_path\n", |
|
"\n", |
|
" def extract(self):\n", |
|
" \"\"\"\n", |
|
" Reads the content of a .docx file and returns the paragraphs as a list of strings.\n", |
|
" \"\"\"\n", |
|
" try:\n", |
|
" # Check if the file exists\n", |
|
" if not os.path.exists(self.file_path):\n", |
|
" raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n", |
|
"\n", |
|
" # Attempt to open and read the document\n", |
|
" doc = Document(self.file_path)\n", |
|
" text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n", |
|
" return text\n", |
|
"\n", |
|
" except FileNotFoundError as fnf_error:\n", |
|
" print(fnf_error)\n", |
|
" return None\n", |
|
" except Exception as e:\n", |
|
" print(f\"An error occurred: {e}\")\n", |
|
" return None\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9", |
|
"metadata": { |
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"# Initialize and constants\n", |
|
"load_dotenv(override=True)\n", |
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
|
"\n", |
|
"if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n", |
|
" print(\"API key looks good!\")\n", |
|
"else:\n", |
|
" print(\"There might be a problem with your API key. Please check!\")\n", |
|
" \n", |
|
"MODEL = 'gpt-4o-mini'\n", |
|
"openai = OpenAI()" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb", |
|
"metadata": { |
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"#Set up system prompt for extracting just the requirements from the document\n", |
|
"\n", |
|
"req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n", |
|
"You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n", |
|
"functional or non-functional and list them all.\\n\"\n", |
|
"req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n", |
|
"Do not make up your own requirements. \\n\"\n", |
|
"req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n", |
|
"req_doc_system_prompt += \"\"\"\n", |
|
"{\n", |
|
" \"requirements\": [\n", |
|
" {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
|
" {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
|
" {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n", |
|
" {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n", |
|
" ]\n", |
|
"}\n", |
|
"\"\"\"" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895", |
|
"metadata": { |
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n", |
|
"#respond in JSON format.\n", |
|
"\n", |
|
"def req_doc_user_prompt(doc):\n", |
|
" user_prompt = \"Here is the contents from a requirement document.\\n\"\n", |
|
" user_prompt += f\"{doc.extract()} \\n\"\n", |
|
" user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n", |
|
"paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n", |
|
"You must respond in a JSON format\"\n", |
|
" user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n", |
|
" user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n", |
|
" return user_prompt" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891", |
|
"metadata": { |
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n", |
|
"def get_requirements(doc):\n", |
|
" reqdoc = ReqDoc(doc)\n", |
|
" response = openai.chat.completions.create(\n", |
|
" model=MODEL,\n", |
|
" messages=[\n", |
|
" {\"role\": \"system\", \"content\": req_doc_system_prompt},\n", |
|
" {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n", |
|
" ],\n", |
|
" response_format={\"type\": \"json_object\"}\n", |
|
" )\n", |
|
" result = response.choices[0].message.content\n", |
|
" return json.loads(result)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e", |
|
"metadata": { |
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"#Uncomment and run this if you want to see the extracted requriements in json format.\n", |
|
"#get_requirements(\"reqdoc.docx\")" |
|
] |
|
}, |
|
{ |
|
"cell_type": "markdown", |
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93", |
|
"metadata": { |
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93" |
|
}, |
|
"source": [ |
|
"### Next, we will make another call to gpt-4o-mini" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b", |
|
"metadata": { |
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"#Set up system prompt to ask for test cases in table format\n", |
|
"system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n", |
|
"You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n", |
|
"The table will contain the following columns. \\\n", |
|
"1.S No\\\n", |
|
"2.Requirement No\\\n", |
|
"3.Requirement Description\\\n", |
|
"4.Test Case ID\\\n", |
|
"5.Test case summary\\\n", |
|
"6.Test case description\\\n", |
|
"7.Success criteria \\n\"\n", |
|
"system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\"" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5", |
|
"metadata": { |
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5" |
|
}, |
|
"outputs": [], |
|
"source": [ |
|
"# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n", |
|
"\n", |
|
"def get_testcase_user_prompt(reqdoc):\n", |
|
" user_prompt = \"You are looking at the following list of requirements. \\n\"\n", |
|
" user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n", |
|
" user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n", |
|
" user_prompt += user_prompt[:25000]\n", |
|
" return user_prompt" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "5b2a2b46-9d9c-416c-b189-3007b4d26d76", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n", |
|
"def create_testcase_doc_gradio(response, is_response_ready, is_cleared, file_input):\n", |
|
" if is_cleared or file_input == None: # Prevent OpenAI call if \"Clear\" was clicked\n", |
|
" return \"\", False\n", |
|
" stream = openai.chat.completions.create(\n", |
|
" model=MODEL,\n", |
|
" messages=[\n", |
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
|
" {\"role\": \"user\", \"content\": get_testcase_user_prompt(file_input)}\n", |
|
" ],\n", |
|
" stream=True\n", |
|
" )\n", |
|
" #Modified for Gradio\n", |
|
" result = \"\"\n", |
|
" for chunk in stream:\n", |
|
" result += chunk.choices[0].delta.content or \"\"\n", |
|
" #print(result)\n", |
|
" yield result, False" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "2bb96a11-063e-4b20-9880-71fa9ea4d3f7", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n", |
|
"force_dark_mode = \"\"\"\n", |
|
"function refresh() {\n", |
|
" const url = new URL(window.location);\n", |
|
" if (url.searchParams.get('__theme') !== 'dark') {\n", |
|
" url.searchParams.set('__theme', 'dark');\n", |
|
" window.location.href = url.href;\n", |
|
" }\n", |
|
"}\n", |
|
"\"\"\"" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "5c81c766-9613-4614-b88d-410654672b89", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"def show_or_hide_save_button(response, is_response_ready, is_cleared):\n", |
|
" if is_cleared or response == None:\n", |
|
" return \"\", False\n", |
|
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", |
|
" table_match = re.search(table_pattern, response)\n", |
|
" if table_match:\n", |
|
" return response, True #(response, is_response_ready)\n", |
|
" else:\n", |
|
" return response, False #(response, is_response_ready)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "a5f5d8e7-d29c-4f40-8d57-a9911bb7c47e", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"def extract_table_from_markdown(response):\n", |
|
" # Regular expression to match Markdown tables\n", |
|
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n", |
|
" table_match = re.search(table_pattern, response)\n", |
|
"\n", |
|
" if table_match:\n", |
|
" table_data = table_match.group(0)\n", |
|
" # Process the table into a format pandas can read\n", |
|
" rows = table_data.strip().split(\"\\n\")\n", |
|
" data = [row.split(\"|\")[1:-1] for row in rows] # Split columns by '|'\n", |
|
"\n", |
|
" # Convert to DataFrame\n", |
|
" df = pd.DataFrame(data[1:], columns=data[0]) # First row is the header\n", |
|
"\n", |
|
" # Save to Excel\n", |
|
" output_file = \"test_cases.xlsx\"\n", |
|
" df.to_excel(output_file, index=False)\n", |
|
"\n", |
|
" return output_file\n", |
|
" else:\n", |
|
" return None" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "c1380b11-3e28-40de-ab1a-93a5fd73cf81", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"def extract_and_save_button(response, is_cleared):\n", |
|
" if is_cleared:\n", |
|
" return None # Do nothing if the file was cleared\n", |
|
" # This function will be triggered when the user clicks \"Save as Excel\"\n", |
|
" output_file = extract_table_from_markdown(response)\n", |
|
" if output_file:\n", |
|
" return output_file\n", |
|
" else:\n", |
|
" return \"No table found in the provided input.\"" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "3a532b42-9f81-4c75-8be4-e40d621a6b35", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Gradio interface\n", |
|
"with gr.Blocks(js=force_dark_mode) as demo:\n", |
|
" gr.HTML(\"<h2 style='text-align: center; color: white;'>📄 Test case automation</h2>\")\n", |
|
" with gr.Row():\n", |
|
" file_input = gr.File(label=\"Upload your requirements docx file\", file_types=[\".docx\"])\n", |
|
" with gr.Row():\n", |
|
" response = gr.Markdown()\n", |
|
" # Button to save the table as Excel file (optional)\n", |
|
" save_button = gr.Button(\"Download Table as Excel\", visible=False)\n", |
|
" file_output = gr.File(label=\"Download Excel File\", visible=False) \n", |
|
" # State variable to track if response is ready\n", |
|
" is_response_ready = gr.State(False)\n", |
|
" with gr.Row():\n", |
|
" clear_button = gr.Button(\"Clear\")\n", |
|
" # State variable to track if clear button is clicked\n", |
|
" is_cleared = gr.State(False)\n", |
|
"\n", |
|
" # Function to show \"Processing...\" message\n", |
|
" def show_processing(is_cleared, file_input):\n", |
|
" if is_cleared or file_input==None:\n", |
|
" return None, False, is_cleared, file_input # Do nothing if the file was cleared\n", |
|
" #return gr.HTML(\"<h6 style='text-align: left; color: #ffffffff;'>⌛ Processing your file... Please wait!</h6>\"), False, is_cleared, file_input\n", |
|
" return \"⌛ Processing your file... Please wait!\", False, is_cleared, file_input\n", |
|
" \n", |
|
" # Trigger response only if the file was uploaded and not cleared\n", |
|
" file_input.change(\n", |
|
" lambda _: False, # Directly set is_cleared to False\n", |
|
" inputs=[file_input],\n", |
|
" outputs=[is_cleared]\n", |
|
" ).then(\n", |
|
" show_processing, inputs=[is_cleared, file_input], outputs=[response, is_response_ready, is_cleared, file_input]\n", |
|
" ).then(\n", |
|
" create_testcase_doc_gradio, inputs=[response, is_response_ready, is_cleared, file_input], outputs=[response, is_response_ready]\n", |
|
" ).then(\n", |
|
" show_or_hide_save_button, inputs=[response, is_response_ready, is_cleared], outputs=[response, is_response_ready]\n", |
|
" ).then(\n", |
|
" lambda _, ready: (gr.update(visible=ready), gr.update(visible=ready)), inputs=[response, is_response_ready], outputs=[save_button,file_output])\n", |
|
"\n", |
|
" #.then() passes the previous function outputs as inputs to the next function\n", |
|
"\n", |
|
" # Button action to extract and save table as an Excel file\n", |
|
" save_button.click(extract_and_save_button, inputs=[response, is_cleared], outputs=file_output)\n", |
|
" \n", |
|
" # Clear button resets both file and output while setting is_cleared to True\n", |
|
" clear_button.click(lambda: (None, None, None, True), inputs=None, outputs=[file_input, file_output, response, is_cleared]) \n", |
|
"\n", |
|
"# Launch Gradio app\n", |
|
"demo.launch(share=True)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "cd5314b2-ee91-49bd-9d40-558775d44382", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [] |
|
} |
|
], |
|
"metadata": { |
|
"colab": { |
|
"provenance": [] |
|
}, |
|
"kernelspec": { |
|
"display_name": "Python 3 (ipykernel)", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.11.11" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 5 |
|
}
|
|
|