Maksym Solomyanov 3 months ago
parent
commit
ece322a5f7
  1. 2
      extras/trading/prototype_trader.ipynb
  2. 194
      week1/community-contributions/day1-wiki-summary.ipynb
  3. 356
      week1/community-contributions/day1_analyze_CV_Write_cover_letter.ipynb
  4. 8
      week1/troubleshooting.ipynb
  5. 616
      week2/community-contributions/day1-conversation-with-gemini.ipynb
  6. 30
      week2/community-contributions/oh_sheet_its_spark.ipynb
  7. 2
      week2/day3.ipynb
  8. 565
      week4/community-contributions/day5-homework.ipynb
  9. 462
      week4/community-contributions/unit-tests-generator.ipynb
  10. 2
      week6/day4.ipynb
  11. 3
      week8/day5.ipynb
  12. 18
      week8/memory.json
  13. 1
      week8/price_is_right_final.py

2
extras/trading/prototype_trader.ipynb

@ -346,7 +346,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.10" "version": "3.11.11"
} }
}, },
"nbformat": 4, "nbformat": 4,

194
week1/community-contributions/day1-wiki-summary.ipynb

@ -0,0 +1,194 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "2112166e-3629-4167-a4cb-0a1a6e549e97",
"metadata": {},
"source": [
"# Hello everyone, \n",
"The community contributions folder is super motivating. Thanks to Ed for democratising learning with this great idea of sharing. The below small piece is my novice attempt in summarizing content from wikipedia page. It is pretty straightforward, but a good learning exercise for me nevertheless. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "947028c8-30c6-456a-8e0c-25e0de1ecbb6",
"metadata": {},
"outputs": [],
"source": [
"!pip install wikipedia"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aa18a060-6dbe-42c9-bc11-c8b079397d6b",
"metadata": {},
"outputs": [],
"source": [
"# Import statements\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"import wikipedia\n",
"import warnings"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d9c128d-ed7d-4e58-8cd1-1468242c7967",
"metadata": {},
"outputs": [],
"source": [
"#To supress a warning from wikipedia module when there are multiple options.\n",
"warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"wikipedia\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5371f405-e628-4b6a-a5ab-5774c1431749",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e6610504-bd7b-459f-9722-0044b3101e05",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
"# If it STILL doesn't work (horrors!) then please see the troubleshooting notebook, or try the below line instead:\n",
"# openai = OpenAI(api_key=\"your-key-here-starting-sk-proj-\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ac37741a-2608-4760-8ba8-163fb9155f0f",
"metadata": {},
"outputs": [],
"source": [
"class Wikipedia:\n",
" def __init__(self, searchText):\n",
" \"\"\"\n",
" Create this object to extract the summary of wikipedia page for a text entered by user\n",
" \"\"\"\n",
" self.searchText = searchText\n",
" self.summary_text = None\n",
" self.user_prompt = None\n",
" \n",
" self._fetch_summary()\n",
"\n",
" def _fetch_summary(self):\n",
" \"\"\"\n",
" Fetches the summary from wikipedia page based on user entered search text and sets user prompt accordingly\n",
" \"\"\"\n",
" try:\n",
" # Try to get the summary of the text from Wikipedia based on user entered text. Using starightforward summary module in wikipedia.\n",
" self.summary_text = wikipedia.summary(self.searchText)\n",
" self.user_prompt = f\"You are looking a summary extract from a wikipedia page. The content is as follows\\n {self.summary_text}.\\nProvide \\\n",
" a summary taking key points from each sections listed on the page\"\n",
" except wikipedia.DisambiguationError as e:\n",
" #Modify user and system prompts if there are multiple options for a user search text\n",
" self.user_prompt = f\"You have received quite a few options {e.options} for the keyword {self.searchText}. Please request user to choose one of them\"\n",
" except wikipedia.PageError:\n",
" #To handle when there is no page\n",
" self.user_prompt = f\"There is no wiki page for {self.searchText}. Apparently it is not your fault!\"\n",
" except Exception as e:\n",
" # To handle any other exceptions\n",
" self.user_prompt = f\"Sorry, something seems to be wrong on my end. Please try again later\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "143c203e-bb99-49c6-89a2-2a32ea429719",
"metadata": {},
"outputs": [],
"source": [
"# Our by-now familiar sumamrize function\n",
"def summarize(searchText):\n",
" wiki = Wikipedia(searchText)\n",
" system_prompt = f\"You are an assitant trying to summarize content from Wikipedia. You will have three scenarios to handle your responses \\\n",
" 1. You will have the summary text content and you will just show that to user\\\n",
" 2. You will have multiple options for the user entered keyword, and you will respond by asking user to choose from that and request again \\\n",
" 3. You will not have the content due to a page not found error. Respond accordingly.\\\n",
" Respond all of these in Markdown format.\"\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": wiki.user_prompt}\n",
" ]\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages\n",
" )\n",
" return response.choices[0].message.content\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b61532fc-189c-4cd8-9402-93d8d8fa8c59",
"metadata": {},
"outputs": [],
"source": [
"summary = summarize(\"mukhari\")\n",
"display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5c3f05f6-acb5-41e4-a521-8d8b8ace0192",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

356
week1/community-contributions/day1_analyze_CV_Write_cover_letter.ipynb

@ -0,0 +1,356 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "31d3c4a4-5442-4074-b812-42d60e0a0c04",
"metadata": {},
"outputs": [],
"source": [
"#In this example we will fetch the job description by pasting the URL,then we upload CV. Only then ChatGPT will\n",
"#analyze CV against the fetched job description. If the CV is a good match then it will write a cover letter.\n",
"\n",
"#If \n",
" ##job posting url is fake/random text or \n",
" ##job posting is fake/random tex or \n",
" ##CV is fake/random text\n",
"#then ChatGPT will not analyze CV, it will give a generic response to enter the info correctly."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bc2eafe6-5255-4317-8ddd-a93695296043",
"metadata": {},
"outputs": [],
"source": [
"pip install PyPDF2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cf45e9d5-4913-416c-9880-5be60a96c0e6",
"metadata": {},
"outputs": [],
"source": [
"# Imports\n",
"import os\n",
"import io\n",
"import time\n",
"import requests\n",
"import PyPDF2\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"from bs4 import BeautifulSoup\n",
"from openai import OpenAI\n",
"from ipywidgets import Textarea, FileUpload, Button, VBox, HTML"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af8fea69-60aa-430c-a16c-8757b487e07a",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "daee94d2-f82b-43f0-95d1-15370eda1bc7",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0712dd1d-b6bc-41c6-84ec-d965f696f7aa",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"You are an assistant who analyzes user's CV against the job description \\\n",
" and provide a short summary if the user is fit for this job. If the user is fit for the job \\\n",
" write a cover letter for the user to apply for the job. Keep the cover letter professional, short, \\\n",
" and formal. \\\n",
" Important things to notice before analyzing CV:\\\n",
" 1. Always check if the CV is actually a CV or just random text\\\n",
" 2. Check if the job description fetched from the website is the job description or not\\\n",
" and ignore text related to navigation\\\n",
" 3. Also check the link of the job posting, if it actually resembles a job posting or is just random \\\n",
" fake website\\\n",
" 4. if any one of these two checks fails, do not analyze the CV against the Job description and give an\\\n",
" appropriate response as you think\\\n",
" 5. Always respond in Markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70c972a6-8af6-4ff2-a338-6d7ba90e2045",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "426dfd9b-3446-4543-9819-63040abd9644",
"metadata": {},
"outputs": [],
"source": [
"for_user_prompt = {\n",
" 'job_posting_url':'',\n",
" 'job_posting': '',\n",
" 'cv_text': ''\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "79d9ccd6-f5fe-4ce8-982c-7235d2cf6a9f",
"metadata": {},
"outputs": [],
"source": [
"# Create widgets - to create a box for the job posting text\n",
"job_posting_url_area = Textarea(\n",
" placeholder='Paste the URL of the job posting here, ONLY URL PLEASE',\n",
" description='Fetching job:',\n",
" disabled=False,\n",
" layout={'width': '800px', 'height': '50px'}\n",
")\n",
"\n",
"status_job_posting = HTML(value=\"<b>Status:</b> Waiting for inputs...\")\n",
"\n",
"# Create Submit Buttons\n",
"fetch_job_posting_button = Button(description='Fetch Job Posting', button_style='primary')\n",
"\n",
"def fetch_job_posting_action(b):\n",
" for_user_prompt['job_posting_url'] = job_posting_url_area.value\n",
" if for_user_prompt['job_posting_url']:\n",
" ed = Website(for_user_prompt['job_posting_url'])\n",
" status_job_posting.value = \"<b>Status:</b> Job posting fetched successfully!\"\n",
" fetch_job_posting_button.button_style='success'\n",
" for_user_prompt['job_posting']=ed.text\n",
" else:\n",
" status_job_posting.value = \"<b>Status:</b> Please enter a job posting url before submitting.\"\n",
"\n",
"# Attach actions to buttons\n",
"fetch_job_posting_button.on_click(fetch_job_posting_action)\n",
"\n",
"# Layout\n",
"job_posting_box = VBox([job_posting_url_area, fetch_job_posting_button])\n",
"\n",
"# Display all widgets\n",
"display(VBox([\n",
" HTML(value=\"<h2>Input Job Posting Url</h2>\"),\n",
" job_posting_box,\n",
" status_job_posting\n",
"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "58d42786-1580-4d3f-b44f-5c52250c2935",
"metadata": {},
"outputs": [],
"source": [
"# Print fetched job description\n",
"\n",
"#print(for_user_prompt['job_posting'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cd258dec-9b57-40ce-b37c-2627acbcb5af",
"metadata": {},
"outputs": [],
"source": [
"# Define file upload for CV\n",
"cv_upload = FileUpload(\n",
" accept='.pdf', # Only accept PDF files\n",
" multiple=False, # Only allow single file selection\n",
" description='Upload CV (PDF)'\n",
")\n",
"\n",
"status = HTML(value=\"<b>Status:</b> Waiting for inputs...\")\n",
"\n",
"# Create Submit Buttons\n",
"submit_cv_button = Button(description='Submit CV', button_style='success')\n",
"\n",
"# Functions\n",
"def submit_cv_action(change):\n",
"\n",
" if not for_user_prompt['cv_text']:\n",
" status.value = \"<b>Status:</b> Please upload a CV before submitting.\"\n",
" \n",
" if cv_upload.value:\n",
" # Get the uploaded file\n",
" uploaded_file = cv_upload.value[0]\n",
" content = io.BytesIO(uploaded_file['content'])\n",
" \n",
" try:\n",
" pdf_reader = PyPDF2.PdfReader(content) \n",
" cv_text = \"\"\n",
" for page in pdf_reader.pages: \n",
" cv_text += page.extract_text() \n",
" \n",
" # Store CV text in for_user_prompt\n",
" for_user_prompt['cv_text'] = cv_text\n",
" status.value = \"<b>Status:</b> CV uploaded and processed successfully!\"\n",
" except Exception as e:\n",
" status.value = f\"<b>Status:</b> Error processing PDF: {str(e)}\"\n",
"\n",
" time.sleep(0.5) # Short pause between upload and submit messages to display both\n",
" \n",
" if for_user_prompt['cv_text']:\n",
" #print(\"CV Submitted:\")\n",
" #print(for_user_prompt['cv_text'])\n",
" status.value = \"<b>Status:</b> CV submitted successfully!\"\n",
" \n",
"\n",
"# Attach actions to buttons\n",
"submit_cv_button.on_click(submit_cv_action)\n",
"\n",
"# Layout\n",
"cv_buttons = VBox([submit_cv_button])\n",
"\n",
"# Display all widgets\n",
"display(VBox([\n",
" HTML(value=\"<h2>Import CV and submit</h2>\"),\n",
" cv_upload,\n",
" cv_buttons,\n",
" status\n",
"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a7dd22a4-ca7b-4b8c-a328-6205cec689cb",
"metadata": {},
"outputs": [],
"source": [
"# Prepare the user prompt that we will send to open ai (added URL for the context)\n",
"user_prompt = f\"\"\"\n",
"Job Posting: \n",
"{for_user_prompt['job_posting']}\n",
"\n",
"CV: \n",
"{for_user_prompt['cv_text']}\n",
"\n",
"Url:\n",
"{for_user_prompt['job_posting_url']}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "82b71c1a-895a-48e7-a945-13e615bb0096",
"metadata": {},
"outputs": [],
"source": [
"# Define messages with system_prompt and user_prompt\n",
"def messages_for(system_prompt_input, user_prompt_input):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt_input},\n",
" {\"role\": \"user\", \"content\": user_prompt_input}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "854dc42e-2bbd-493b-958f-c20484908300",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the OpenAI API. \n",
"response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(system_prompt, user_prompt)\n",
")\n",
"\n",
"# Response is provided in Markdown and displayed accordingly\n",
"display(Markdown(response.choices[0].message.content))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "758d2cbe-0f80-4572-8724-7cba77f701dd",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

8
week1/troubleshooting.ipynb

@ -405,6 +405,14 @@
"from diagnostics import Diagnostics\n", "from diagnostics import Diagnostics\n",
"Diagnostics().run()" "Diagnostics().run()"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e1955b9a-d344-4782-b448-2770d0edd90c",
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {

616
week2/community-contributions/day1-conversation-with-gemini.ipynb

@ -0,0 +1,616 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
"metadata": {},
"source": [
"# Welcome to Week 2!\n",
"\n",
"## Frontier Model APIs\n",
"\n",
"In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n",
"\n",
"Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI."
]
},
{
"cell_type": "markdown",
"id": "2b268b6e-0ba4-461e-af86-74a41f4d681f",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Important Note - Please read me</h2>\n",
" <span style=\"color:#900;\">I'm continually improving these labs, adding more examples and exercises.\n",
" At the start of each week, it's worth checking you have the latest code.<br/>\n",
" First do a <a href=\"https://chatgpt.com/share/6734e705-3270-8012-a074-421661af6ba9\">git pull and merge your changes as needed</a>. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!<br/><br/>\n",
" After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:<br/>\n",
" <code>conda env update --f environment.yml</code><br/>\n",
" Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):<br/>\n",
" <code>pip install -r requirements.txt</code>\n",
" <br/>Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n",
" </span>\n",
" </td>\n",
" </tr>\n",
"</table>\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#f71;\">Reminder about the resources page</h2>\n",
" <span style=\"color:#f71;\">Here's a link to resources for the course. This includes links to all the slides.<br/>\n",
" <a href=\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\">https://edwarddonner.com/2024/11/13/llm-engineering-resources/</a><br/>\n",
" Please keep this bookmarked, and I'll continue to add more useful links there over time.\n",
" </span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "85cfe275-4705-4d30-abea-643fbddf1db0",
"metadata": {},
"source": [
"## Setting up your keys\n",
"\n",
"If you haven't done so already, you could now create API keys for Anthropic and Google in addition to OpenAI.\n",
"\n",
"**Please note:** if you'd prefer to avoid extra API costs, feel free to skip setting up Anthopic and Google! You can see me do it, and focus on OpenAI for the course. You could also substitute Anthropic and/or Google for Ollama, using the exercise you did in week 1.\n",
"\n",
"For OpenAI, visit https://openai.com/api/ \n",
"For Anthropic, visit https://console.anthropic.com/ \n",
"For Google, visit https://ai.google.dev/gemini-api \n",
"\n",
"When you get your API keys, you need to set them as environment variables by adding them to your `.env` file.\n",
"\n",
"```\n",
"OPENAI_API_KEY=xxxx\n",
"ANTHROPIC_API_KEY=xxxx\n",
"GOOGLE_API_KEY=xxxx\n",
"```\n",
"\n",
"Afterwards, you may need to restart the Jupyter Lab Kernel (the Python process that sits behind this notebook) via the Kernel menu, and then rerun the cells from the top."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
"metadata": {},
"outputs": [],
"source": [
"# import for google\n",
"# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n",
"# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n",
"\n",
"import google.generativeai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv()\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claude = anthropic.Anthropic()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c",
"metadata": {},
"outputs": [],
"source": [
"# This is the set up code for Gemini\n",
"# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n",
"\n",
"google.generativeai.configure()"
]
},
{
"cell_type": "markdown",
"id": "42f77b59-2fb1-462a-b90d-78994e4cef33",
"metadata": {},
"source": [
"## Asking LLMs to tell a joke\n",
"\n",
"It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n",
"Later we will be putting LLMs to better use!\n",
"\n",
"### What information is included in the API\n",
"\n",
"Typically we'll pass to the API:\n",
"- The name of the model that should be used\n",
"- A system message that gives overall context for the role the LLM is playing\n",
"- A user message that provides the actual prompt\n",
"\n",
"There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "378a0296-59a2-45c6-82eb-941344d3eeff",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that is great at telling jokes\"\n",
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4",
"metadata": {},
"outputs": [],
"source": [
"prompts = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397",
"metadata": {},
"outputs": [],
"source": [
"# GPT-3.5-Turbo\n",
"\n",
"completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n",
"print(completion.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf",
"metadata": {},
"outputs": [],
"source": [
"# GPT-4o-mini\n",
"# Temperature setting controls creativity\n",
"\n",
"completion = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=prompts,\n",
" temperature=0.7\n",
")\n",
"print(completion.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26",
"metadata": {},
"outputs": [],
"source": [
"# GPT-4o\n",
"\n",
"completion = openai.chat.completions.create(\n",
" model='gpt-4o',\n",
" messages=prompts,\n",
" temperature=0.4\n",
")\n",
"print(completion.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76",
"metadata": {},
"outputs": [],
"source": [
"# Claude 3.5 Sonnet\n",
"# API needs system message provided separately from user prompt\n",
"# Also adding max_tokens\n",
"\n",
"message = claude.messages.create(\n",
" model=\"claude-3-5-sonnet-20241022\",\n",
" max_tokens=200,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": user_prompt},\n",
" ],\n",
")\n",
"\n",
"print(message.content[0].text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f",
"metadata": {},
"outputs": [],
"source": [
"# Claude 3.5 Sonnet again\n",
"# Now let's add in streaming back results\n",
"\n",
"result = claude.messages.stream(\n",
" model=\"claude-3-5-sonnet-20241022\",\n",
" max_tokens=200,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": user_prompt},\n",
" ],\n",
")\n",
"\n",
"with result as stream:\n",
" for text in stream.text_stream:\n",
" print(text, end=\"\", flush=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad",
"metadata": {},
"outputs": [],
"source": [
"# The API for Gemini has a slightly different structure.\n",
"# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n",
"# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n",
"\n",
"gemini_client = google.generativeai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" system_instruction=system_message\n",
")\n",
"response = gemini_client.generate_content(user_prompt)\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "49009a30-037d-41c8-b874-127f61c4aa3a",
"metadata": {},
"outputs": [],
"source": [
"# As an alternative way to use Gemini that bypasses Google's python API library,\n",
"# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n",
"\n",
"gemini_via_openai_client = OpenAI(\n",
" api_key=google_api_key, \n",
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
")\n",
"\n",
"response = gemini_via_openai_client.chat.completions.create(\n",
" model=\"gemini-1.5-flash\",\n",
" messages=prompts\n",
")\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573",
"metadata": {},
"outputs": [],
"source": [
"# To be serious! GPT-4o-mini with the original question\n",
"\n",
"prompts = [\n",
" {\"role\": \"system\", \"content\": \"You are a helpful assistant that responds in Markdown\"},\n",
" {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution? Please respond in Markdown.\"}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2",
"metadata": {},
"outputs": [],
"source": [
"# Have it stream back results in markdown\n",
"\n",
"stream = openai.chat.completions.create(\n",
" model='gpt-4o',\n",
" messages=prompts,\n",
" temperature=0.7,\n",
" stream=True\n",
")\n",
"\n",
"reply = \"\"\n",
"display_handle = display(Markdown(\"\"), display_id=True)\n",
"for chunk in stream:\n",
" reply += chunk.choices[0].delta.content or ''\n",
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n",
" update_display(Markdown(reply), display_id=display_handle.display_id)"
]
},
{
"cell_type": "markdown",
"id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f",
"metadata": {},
"source": [
"## And now for some fun - an adversarial conversation between Chatbots..\n",
"\n",
"You're already familar with prompts being organized into lists like:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
" {\"role\": \"user\", \"content\": \"user prompt here\"}\n",
"]\n",
"```\n",
"\n",
"In fact this structure can be used to reflect a longer conversation history:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
" {\"role\": \"user\", \"content\": \"first user prompt here\"},\n",
" {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n",
" {\"role\": \"user\", \"content\": \"the new user prompt\"},\n",
"]\n",
"```\n",
"\n",
"And we can use this approach to engage in a longer interaction with history."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between GPT-4o-mini and gemini-1.5-flash\n",
"# We're using cheap versions of models so the costs will be minimal\n",
"\n",
"gpt_model = \"gpt-4o-mini\"\n",
"gemini_model = \"gemini-1.5-flash\"\n",
"\n",
"gpt_system = \"You are a chatbot who is very argumentative; \\\n",
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
"\n",
"gemini_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"you try to calm them down and keep chatting.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"gemini_messages = [\"Hi\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1df47dc7-b445-4852-b21b-59f0e6c2030f",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606",
"metadata": {},
"outputs": [],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "302586ca-645d-41f1-9738-efd8e7581bcf",
"metadata": {},
"outputs": [],
"source": [
"def call_gemini():\n",
" client = google.generativeai.GenerativeModel(\n",
" model_name=gemini_model,\n",
" system_instruction=gemini_system\n",
" )\n",
" messages = []\n",
" for gpt, gemini in zip(gpt_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"parts\": gpt})\n",
" messages.append({\"role\": \"model\", \"parts\": gemini})\n",
" last_message = messages.pop() \n",
" chat = client.start_chat(\n",
" history=messages\n",
" )\n",
" response = chat.send_message(last_message[\"parts\"])\n",
" return response.text"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e322e1e-9a99-4488-a3bf-6d5562163553",
"metadata": {},
"outputs": [],
"source": [
"call_gemini()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd",
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"gemini_messages = [\"Hi\"]\n",
"\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" gemini_next = call_gemini()\n",
" print(f\"Gemini:\\n{gemini_next}\\n\")\n",
" gemini_messages.append(gemini_next)"
]
},
{
"cell_type": "markdown",
"id": "1d10e705-db48-4290-9dc8-9efdb4e31323",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Before you continue</h2>\n",
" <span style=\"color:#900;\">\n",
" Be sure you understand how the conversation above is working, and in particular how the <code>messages</code> list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?<br/>\n",
" </span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac",
"metadata": {},
"source": [
"# More advanced exercises\n",
"\n",
"Try creating a 3-way, perhaps bringing Claude into the conversation!\n",
"\n",
"Try doing this yourself before you look at the solutions.\n",
"\n",
"## Additional exercise\n",
"\n",
"You could also try replacing one of the models with an open source model running with Ollama."
]
},
{
"cell_type": "markdown",
"id": "446c81e3-b67e-4cd9-8113-bc3092b93063",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business relevance</h2>\n",
" <span style=\"color:#181;\">This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c23224f6-7008-44ed-a57f-718975f4e291",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

30
week2/community-contributions/oh_sheet_its_spark.ipynb

@ -0,0 +1,30 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Repo link to a LLM App that can help you convert any Excel Spreadsheet with formulas into Pyspark equivalent transformations in a matter of few clicks "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"https://github.com/jasjyotsinghjaswal/llm_custom_apps"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

2
week2/day3.ipynb

@ -184,7 +184,7 @@
"system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n", "system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n",
"the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n", "the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n",
"For example, if the customer says 'I'm looking to buy a hat', \\\n", "For example, if the customer says 'I'm looking to buy a hat', \\\n",
"you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales evemt.'\\\n", "you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales event.'\\\n",
"Encourage the customer to buy hats if they are unsure what to get.\"" "Encourage the customer to buy hats if they are unsure what to get.\""
] ]
}, },

565
week4/community-contributions/day5-homework.ipynb

@ -0,0 +1,565 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ff022957-2e81-4ea9-84d3-e52d5753e133",
"metadata": {},
"source": [
"### Comment and Unit Test Generater \n",
"\n",
"The requirement: \n",
"* use an LLM to generate docstring and comments for Python code\n",
"* use an LLM to generate unit test\n",
"\n",
"This is my week 4 day 5 project."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ea1841f6-4afc-4d29-ace8-5ca5a3915c8c",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import io\n",
"import sys\n",
"import json\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import gradio as gr\n",
"import subprocess\n",
"from huggingface_hub import login, InferenceClient\n",
"from transformers import AutoTokenizer"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "11957fd3-6c61-4496-aef1-8223cb9ec4ce",
"metadata": {},
"outputs": [],
"source": [
"# environment\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ee7b08fd-e678-4234-895e-4e3a925e60f0",
"metadata": {},
"outputs": [],
"source": [
"# initialize\n",
"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"OPENAI_MODEL = \"gpt-4o\"\n",
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c8023255-9c98-4fbc-92e4-c553bed3b605",
"metadata": {},
"outputs": [],
"source": [
"hf_token = os.environ['HF_TOKEN']\n",
"login(hf_token, add_to_git_credential=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f8ce3f5e-74c4-4d35-bfbc-91c5be85e094",
"metadata": {},
"outputs": [],
"source": [
"code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n",
"CODE_QWEN_URL = \"https://g39mbjooiiwkbgyz.us-east-1.aws.endpoints.huggingface.cloud\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1bbc66b6-52ae-465e-a368-edc8f097fe9d",
"metadata": {},
"outputs": [],
"source": [
"def system_prompt_for_comment():\n",
" system=\"\"\"\n",
" You are a Python documentation expert. When writing documentation:\n",
" - Follow PEP 257 and Google docstring style guidelines\n",
" - Write clear, concise explanations\n",
" - Include practical examples\n",
" - Highlight edge cases and limitations\n",
" - Use type hints in docstrings\n",
" - Add inline comments only for complex logic\n",
" - Never skip documenting parameters or return values\n",
" - Validate that all documentation is accurate and complete\n",
" \"\"\"\n",
" return system"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b089f87b-53ae-40ad-8d06-b9924bb998a0",
"metadata": {},
"outputs": [],
"source": [
"def system_prompt_for_unit_test():\n",
" system=\"\"\"\n",
" You are an expert Python testing engineer who specializes in creating comprehensive unit tests. Follow these principles:\n",
" - Use pytest as the testing framework\n",
" - Follow the Arrange-Act-Assert pattern\n",
" - Test both valid and invalid inputs\n",
" - Include edge cases and boundary conditions\n",
" - Write descriptive test names that explain the scenario being tested\n",
" - Create independent tests that don't rely on each other\n",
" - Use appropriate fixtures and parametrize when needed\n",
" - Add clear comments explaining complex test logic\n",
" - Cover error cases and exceptions\n",
" - Achieve high code coverage while maintaining meaningful tests\n",
" \"\"\"\n",
" return system"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "22193622-f3a0-4894-a6c4-eb6d88097861",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for_comment(code):\n",
" user = f\"\"\"\n",
" Please document this Python code with:\n",
" \n",
" 1. A docstring containing:\n",
" - A clear description of purpose and functionality\n",
" - All parameters with types and descriptions\n",
" - Return values with types\n",
" - Exceptions that may be raised\n",
" - Any important notes or limitations\n",
" \n",
" 2. Strategic inline comments for:\n",
" - Complex algorithms or business logic\n",
" - Non-obvious implementation choices\n",
" - Performance considerations\n",
" - Edge cases\n",
" \n",
" Here's the code to document:\n",
" \\n{code}\n",
" \"\"\"\n",
" return user;"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "81e61752-ec2f-44c1-86a2-ff3234a0358c",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for_unit_test(code):\n",
" user = f\"\"\"\n",
" Please generate unit tests for the following Python code. Include:\n",
" \n",
" 1. Test cases for:\n",
" - Normal/expected inputs\n",
" - Edge cases and boundary values\n",
" - Invalid inputs and error conditions\n",
" - Different combinations of parameters\n",
" - All public methods and functions\n",
" \n",
" 2. For each test:\n",
" - Clear test function names describing the scenario\n",
" - Setup code (fixtures if needed)\n",
" - Test data preparation\n",
" - Expected outcomes\n",
" - Assertions checking results\n",
" - Comments explaining complex test logic\n",
" \n",
" 3. Include any necessary:\n",
" - Imports\n",
" - Fixtures\n",
" - Mock objects\n",
" - Helper functions\n",
" - Test data generators\n",
" \n",
" Here's the code to test:\n",
" \\n{code}\n",
" \"\"\"\n",
" return user"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f31ceed3-0eb2-4962-ab86-2d0302185560",
"metadata": {},
"outputs": [],
"source": [
"pi = \"\"\"\n",
"import time\n",
"\n",
"def calculate(iterations, param1, param2):\n",
" result = 1.0\n",
" for i in range(1, iterations+1):\n",
" j = i * param1 - param2\n",
" result -= (1/j)\n",
" j = i * param1 + param2\n",
" result += (1/j)\n",
" return result\n",
"\n",
"start_time = time.time()\n",
"result = calculate(100_000_000, 4, 1) * 4\n",
"end_time = time.time()\n",
"\n",
"print(f\"Result: {result:.12f}\")\n",
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "192c30f5-4be6-49b7-a054-11bfcffa91e0",
"metadata": {},
"outputs": [],
"source": [
"exec(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d4e920dc-4094-42d8-9255-18f2919df2d4",
"metadata": {},
"outputs": [],
"source": [
"def messages_for_comment(python):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt_for_comment()},\n",
" {\"role\": \"user\", \"content\": user_prompt_for_comment(python)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "77500cae-bf84-405c-8b03-2f984108951b",
"metadata": {},
"outputs": [],
"source": [
"def messages_for_unit_test(python):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt_for_unit_test()},\n",
" {\"role\": \"user\", \"content\": user_prompt_for_unit_test(python)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5ec58bf1-4a44-4c21-a71a-2cac359884e5",
"metadata": {},
"outputs": [],
"source": [
"def stream_comment_gpt(code):\n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for_comment(code), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" #print(fragment, end='', flush=True)\n",
" yield reply.replace('```','') \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "47c615e2-4eb6-4ce1-ad09-7f2e6dbc3934",
"metadata": {},
"outputs": [],
"source": [
"stream_comment_gpt(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0b990875-31fd-40e5-bc8c-f6099d362249",
"metadata": {},
"outputs": [],
"source": [
"def stream_unit_test_gpt(code):\n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for_unit_test(code), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" #print(fragment, end='', flush=True)\n",
" yield reply.replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3dc90578-4f5e-47f1-b30f-c21b5795e82f",
"metadata": {},
"outputs": [],
"source": [
"stream_unit_test_gpt(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "17380c0f-b851-472b-a234-d86f5c219e50",
"metadata": {},
"outputs": [],
"source": [
"def stream_comment_claude(code):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_prompt_for_comment(),\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for_comment(code)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" #print(text, end=\"\", flush=True)\n",
" yield reply.replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a2d016d-76a2-4752-bd4d-6f93ddec46be",
"metadata": {},
"outputs": [],
"source": [
"def stream_unit_test_claude(code):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_prompt_for_unit_test(),\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for_unit_test(code)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" #print(text, end=\"\", flush=True)\n",
" yield reply.replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ee43428e-b577-4e95-944d-399f2f3b89ff",
"metadata": {},
"outputs": [],
"source": [
"stream_comment_claude(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0565e33b-9f14-48b7-ae8d-d22dc03b93c9",
"metadata": {},
"outputs": [],
"source": [
"stream_unit_test_claude(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f13b3a5b-366d-4b28-adda-977a313e6b4d",
"metadata": {},
"outputs": [],
"source": [
"def stream_comment_model(model, model_url, code):\n",
" tokenizer = AutoTokenizer.from_pretrained(model)\n",
" messages = messages_for_comment(code)\n",
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
" client = InferenceClient(model_url, token=hf_token)\n",
" stream = client.text_generation(text, stream=True, details=True, max_new_tokens=5000)\n",
" result = \"\"\n",
" for r in stream:\n",
" #print(r.token.text, end = \"\")\n",
" result += r.token.text\n",
" yield result \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e2efdb92-fc7a-4952-ab46-ae942cb996bf",
"metadata": {},
"outputs": [],
"source": [
"def stream_unit_test_model(model, model_url, code):\n",
" tokenizer = AutoTokenizer.from_pretrained(model)\n",
" messages = messages_for_unit_test(code)\n",
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
" client = InferenceClient(model_url, token=hf_token)\n",
" stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n",
" result = \"\"\n",
" for r in stream:\n",
" #print(r.token.text, end = \"\")\n",
" result += r.token.text\n",
" yield result \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a756193-fcba-43da-a981-203c10d36488",
"metadata": {},
"outputs": [],
"source": [
"stream_comment_model(code_qwen, CODE_QWEN_URL, pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "12ddcbf4-6286-47a8-847b-5be78e7aa995",
"metadata": {},
"outputs": [],
"source": [
"stream_unit_test_model(code_qwen, CODE_QWEN_URL, pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "321609ee-b64a-44fc-9090-39f87e1f8e0e",
"metadata": {},
"outputs": [],
"source": [
"def comment_code(python, model):\n",
" if model==\"GPT\":\n",
" result = stream_comment_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = stream_comment_claude(python)\n",
" elif model==\"CodeQwen\":\n",
" result = stream_comment_model(code_qwen, CODE_QWEN_URL, python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d4c560c9-922d-4893-941f-42893373b1be",
"metadata": {},
"outputs": [],
"source": [
"def get_unit_test(python, model):\n",
" if model==\"GPT\":\n",
" result = stream_unit_test_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = stream_unit_test_claude(python)\n",
" elif model==\"CodeQwen\":\n",
" result = stream_unit_test_model(code_qwen, CODE_QWEN_URL, python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f85bc777-bebe-436b-88cc-b9ecdb6306c0",
"metadata": {},
"outputs": [],
"source": [
"css = \"\"\"\n",
".python {background-color: #306998;}\n",
".cpp {background-color: #050;}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ee27cc91-81e6-42c8-ae3c-c04161229d8c",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"with gr.Blocks(css=css) as ui:\n",
" gr.Markdown(\"## Convert code from Python to C++\")\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", value=pi, lines=10)\n",
" result = gr.Textbox(label=\"Result code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\",\"CodeQwen\"], label=\"Select model\", value=\"GPT\")\n",
" with gr.Row():\n",
" comment_button = gr.Button(\"Comment code\")\n",
" with gr.Row():\n",
" unit_test_button = gr.Button(\"Unit Test code\")\n",
" \n",
" comment_button.click(comment_code, inputs=[python, model], outputs=[result])\n",
" unit_test_button.click(get_unit_test, inputs=[python, model], outputs=[result])\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "06e8279c-b488-4807-9bed-9d26be11c057",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

462
week4/community-contributions/unit-tests-generator.ipynb

@ -0,0 +1,462 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Requirements\n",
"\n",
"1. Install pytest and pytest-cov library\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"#!pipenv install pytest pytest-cov"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Current flow:\n",
"\n",
"1. For a python code it generates the unit tests using `pytest` library. The dashboard supports tests execution along with a coverage report. If the unit tests are fine, there is an option to save them for future use. It can happen, especially with Ollama , the tests are having a typing error. In this case the code can be edited in the right window and executed afterwards. \n",
"\n",
"2. Supports 3 models: \n",
"\n",
"- gpt-4o-mini\n",
"- claude-3-5-sonnet-20240620\n",
"- llama3.2\n",
"\n",
"It is recommended though to use other models except Ollama, my tests showed the code returned from ollama required more supervision and editing. Some generated unit tests from ollama don't provide full coverage, but still it is a good starting point for building such a tool."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"import re\n",
"import os\n",
"import sys\n",
"import textwrap\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"import gradio as gr\n",
"from pathlib import Path\n",
"import subprocess\n",
"from IPython.display import Markdown"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialization\n",
"\n",
"load_dotenv()\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"OPENAI_MODEL = \"gpt-4o-mini\"\n",
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"HEADERS = {\"Content-Type\": \"application/json\"}\n",
"OLLAMA_MODEL = \"llama3.2\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Code execution"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"\n",
"def extract_code(text):\n",
" # Regular expression to find text between ``python and ``\n",
" match = re.search(r\"```python(.*?)```\", text, re.DOTALL)\n",
"\n",
" if match:\n",
" code = match.group(0).strip() # Extract and strip extra spaces\n",
" else:\n",
" code = \"\"\n",
" print(\"No matching substring found.\")\n",
"\n",
" return code.replace(\"```python\\n\", \"\").replace(\"```\", \"\")\n",
"\n",
"\n",
"def execute_coverage_report(python_interpreter=sys.executable):\n",
" if not python_interpreter:\n",
" raise EnvironmentError(\"Python interpreter not found in the specified virtual environment.\")\n",
" \n",
" command = [\"coverage\", \"run\", \"-m\", \"pytest\"]\n",
"\n",
" try:\n",
" result = subprocess.run(command, check=True, capture_output=True, text=True)\n",
" print(\"Tests ran successfully!\")\n",
" print(result.stdout)\n",
" return result.stdout\n",
" except subprocess.CalledProcessError as e:\n",
" print(\"Some tests failed!\")\n",
" print(\"Output:\\n\", e.stdout)\n",
" print(\"Errors:\\n\", e.stderr)\n",
" # Extracting failed test information\n",
" return e.stdout\n",
"\n",
"def save_unit_tests(code):\n",
"\n",
" match = re.search(r\"def\\s+(\\w+)\\(\", code, re.DOTALL)\n",
"\n",
" if match:\n",
" function_name = match.group(1).strip() # Extract and strip extra spaces\n",
" else:\n",
" function_name = \"\"\n",
" print(\"No matching substring found.\")\n",
"\n",
" test_code_path = Path(\"tests\")\n",
" (test_code_path / f\"test_{function_name}.py\").write_text(extract_code(code))\n",
" Path(\"tests\", \"test_code.py\").unlink()\n",
" \n",
"\n",
"def execute_tests_in_venv(code_to_test, tests, python_interpreter=sys.executable):\n",
" \"\"\"\n",
" Execute the given Python code string within the specified virtual environment.\n",
" \n",
" Args:\n",
" - code_str: str, the Python code to execute.\n",
" - venv_dir: str, the directory path to the virtual environment created by pipenv.\n",
" \"\"\"\n",
" \n",
" if not python_interpreter:\n",
" raise EnvironmentError(\"Python interpreter not found in the specified virtual environment.\")\n",
"\n",
" # Prepare the command to execute the code\n",
" code_str = textwrap.dedent(code_to_test) + \"\\n\" + extract_code(tests)\n",
" test_code_path = Path(\"tests\")\n",
" test_code_path.mkdir(parents=True, exist_ok=True)\n",
" (test_code_path / f\"test_code.py\").write_text(code_str)\n",
" command = [\"pytest\", str(test_code_path)]\n",
"\n",
" try:\n",
" result = subprocess.run(command, check=True, capture_output=True, text=True)\n",
" print(\"Tests ran successfully!\")\n",
" print(result.stderr)\n",
" return result.stdout\n",
" except subprocess.CalledProcessError as e:\n",
" print(\"Some tests failed!\")\n",
" print(\"Output:\\n\", e.stdout)\n",
" print(\"Errors:\\n\", e.stderr)\n",
" # Extracting failed test information\n",
" failed_tests = []\n",
" for line in e.stdout.splitlines():\n",
" if \"FAILED\" in line and \"::\" in line:\n",
" failed_tests.append(line.strip())\n",
" if failed_tests:\n",
" print(\"Failed Tests:\")\n",
" for test in failed_tests:\n",
" print(test)\n",
" \n",
" return e.stdout\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prompts and calls to the models"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"system_message = \"\"\"You are a helpful assistant which helps developers to write unit test cases for their code.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def get_user_prompt(code):\n",
"\n",
" user_prompt = \"\"\"Test include:\n",
"\n",
" - Valid inputs with expected results.\n",
" - Inputs that test the boundaries or limits of the function's behavior.\n",
" - Invalid inputs or scenarios where the function is expected to raise exceptions.\n",
"\n",
" Structure:\n",
"\n",
" - Begin with all necessary imports. \n",
" - Do not create custom imports. \n",
" - Do not insert in the response the function for the tests.\n",
" - Ensure proper error handling for tests that expect exceptions.\n",
" - Clearly name the test functions to indicate their purpose (e.g., test_function_name).\n",
"\n",
" Example Structure:\n",
"\n",
" - Use pytest.raises to validate exceptions.\n",
" - Use assertions to verify correct outputs for successful and edge cases.\n",
"\n",
" Documentation:\n",
"\n",
" - Add docstrings explaining what each test verifies.\"\"\"\n",
" user_prompt += code\n",
"\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(code):\n",
"\n",
" user_prompt = get_user_prompt(code)\n",
" stream = openai.chat.completions.create(\n",
" model=OPENAI_MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": user_prompt,\n",
" },\n",
" ],\n",
" stream=True,\n",
" )\n",
"\n",
" response = \"\"\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or \"\"\n",
" yield response\n",
" \n",
" return response\n",
"\n",
"def stream_ollama(code):\n",
"\n",
" user_prompt = get_user_prompt(code)\n",
" ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
" stream = ollama_via_openai.chat.completions.create(\n",
" model=OLLAMA_MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": user_prompt,\n",
" },\n",
" ],\n",
" stream=True,\n",
" )\n",
"\n",
" response = \"\"\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or \"\"\n",
" yield response\n",
" \n",
" return response\n",
"\n",
"\n",
"def stream_claude(code):\n",
" user_prompt = get_user_prompt(code)\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_message,\n",
" messages=[\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": user_prompt,\n",
" }\n",
" ],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" yield reply\n",
" print(text, end=\"\", flush=True)\n",
" return reply"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Code examples to test the inteface"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"function_to_test = \"\"\"\n",
" def lengthOfLongestSubstring(s):\n",
" if not isinstance(s, str):\n",
" raise TypeError(\"Input must be a string\")\n",
" max_length = 0\n",
" substring = \"\"\n",
" start_idx = 0\n",
" while start_idx < len(s):\n",
" string = s[start_idx:]\n",
" for i, x in enumerate(string):\n",
" substring += x\n",
" if len(substring) == len(set((list(substring)))):\n",
" \n",
" if len(set((list(substring)))) > max_length:\n",
" \n",
" max_length = len(substring)\n",
"\n",
" start_idx += 1\n",
" substring = \"\"\n",
" \n",
" \n",
" return max_length\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"test_code = \"\"\"```python\n",
"import pytest\n",
"\n",
"# Unit tests using pytest\n",
"def test_lengthOfLongestSubstring():\n",
" assert lengthOfLongestSubstring(\"abcabcbb\") == 3 # Case with repeating characters\n",
" assert lengthOfLongestSubstring(\"bbbbb\") == 1 # Case with all same characters\n",
" assert lengthOfLongestSubstring(\"pwwkew\") == 3 # Case with mixed characters\n",
" assert lengthOfLongestSubstring(\"\") == 0 # Empty string case\n",
" assert lengthOfLongestSubstring(\"abcdef\") == 6 # All unique characters\n",
" assert lengthOfLongestSubstring(\"abca\") == 3 # Case with pattern and repeat\n",
" assert lengthOfLongestSubstring(\"dvdf\") == 3 # Case with repeated characters separated\n",
" assert lengthOfLongestSubstring(\"a\") == 1 # Case with single character\n",
" assert lengthOfLongestSubstring(\"au\") == 2 # Case with unique two characters\n",
"```\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def optimize(code, model):\n",
" if model == \"GPT\":\n",
" result = stream_gpt(code)\n",
" elif model == \"Claude\":\n",
" result = stream_claude(code)\n",
" elif model == \"Ollama\":\n",
" result = stream_ollama(code)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far\n",
" return result"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Gradio interface"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks() as ui:\n",
" gr.Markdown(\"## Write unit tests for Python code\")\n",
" with gr.Row():\n",
" with gr.Column(scale=1, min_width=300):\n",
" python = gr.Textbox(label=\"Python code:\", value=function_to_test, lines=10)\n",
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Ollama\"], label=\"Select model\", value=\"GPT\")\n",
" unit_tests = gr.Button(\"Write unit tests\")\n",
" with gr.Column(scale=1, min_width=300):\n",
" unit_tests_out = gr.TextArea(label=\"Unit tests\", value=test_code, elem_classes=[\"python\"])\n",
" unit_tests_run = gr.Button(\"Run unit tests\")\n",
" coverage_run = gr.Button(\"Coverage report\")\n",
" save_test_run = gr.Button(\"Save unit tests\")\n",
" with gr.Row():\n",
" \n",
" python_out = gr.TextArea(label=\"Unit tests result\", elem_classes=[\"python\"])\n",
" coverage_out = gr.TextArea(label=\"Coverage report\", elem_classes=[\"python\"])\n",
" \n",
"\n",
" unit_tests.click(optimize, inputs=[python, model], outputs=[unit_tests_out])\n",
" unit_tests_run.click(execute_tests_in_venv, inputs=[python, unit_tests_out], outputs=[python_out])\n",
" coverage_run.click(execute_coverage_report, outputs=[coverage_out])\n",
" save_test_run.click(save_unit_tests, inputs=[unit_tests_out])\n",
"\n",
"\n",
"ui.launch(inbrowser=True)\n",
"# ui.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llm_engineering-yg2xCEUG",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

2
week6/day4.ipynb

@ -398,7 +398,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.10" "version": "3.11.11"
} }
}, },
"nbformat": 4, "nbformat": 4,

3
week8/day5.ipynb

@ -88,6 +88,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"agent_framework = DealAgentFramework()\n", "agent_framework = DealAgentFramework()\n",
"agent_framework.init_agents_as_needed()\n",
"\n", "\n",
"with gr.Blocks(title=\"The Price is Right\", fill_width=True) as ui:\n", "with gr.Blocks(title=\"The Price is Right\", fill_width=True) as ui:\n",
"\n", "\n",
@ -176,7 +177,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "096397f9-1215-4814-ab4b-e32002ff4ceb", "id": "f9dd0a27-7d46-4c9e-bbe4-a61c9c899c99",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": []

18
week8/memory.json

@ -16,23 +16,5 @@
}, },
"estimate": 930.8824204895075, "estimate": 930.8824204895075,
"discount": 225.88242048950747 "discount": 225.88242048950747
},
{
"deal": {
"product_description": "The Insignia Class F30 Series NS-55F301NA25 is a 55\" 4K HDR UHD Smart TV with a native resolution of 3840x2160. Featuring HDR support, it enhances color and contrast for a more dynamic viewing experience. The TV integrates seamlessly with Amazon Fire TV, working with both Amazon Alexa and Google Home for voice control. It offers three HDMI ports for multiple device connections, making it a perfect entertainment hub for your living space.",
"price": 200.0,
"url": "https://www.dealnews.com/products/Insignia/Insignia-Class-F30-Series-NS-55-F301-NA25-55-4-K-HDR-LED-UHD-Smart-TV/467523.html?iref=rss-f1912"
},
"estimate": 669.1921927283588,
"discount": 469.1921927283588
},
{
"deal": {
"product_description": "The Samsung 27-Cu. Ft. Mega Capacity 3-Door French Door Counter Depth Refrigerator combines style with spacious organization. This model features a dual auto ice maker, which ensures you always have ice on hand, and adjustable shelves that provide versatile storage options for your groceries. Designed with a sleek, fingerprint resistant finish, it not only looks modern but also simplifies cleaning. With its generous capacity, this refrigerator is perfect for large households or those who love to entertain.",
"price": 1299.0,
"url": "https://www.dealnews.com/products/Samsung/Samsung-27-Cu-Ft-Mega-Capacity-3-Door-French-Door-Counter-Depth-Refrigerator/454702.html?iref=rss-c196"
},
"estimate": 2081.647127763905,
"discount": 782.6471277639048
} }
] ]

1
week8/price_is_right_final.py

@ -45,6 +45,7 @@ class App:
def get_agent_framework(self): def get_agent_framework(self):
if not self.agent_framework: if not self.agent_framework:
self.agent_framework = DealAgentFramework() self.agent_framework = DealAgentFramework()
self.agent_framework.init_agents_as_needed()
return self.agent_framework return self.agent_framework
def run(self): def run(self):

Loading…
Cancel
Save