From 6b7cac0fa331b7517c3bf1d6bad7ce855e103b89 Mon Sep 17 00:00:00 2001 From: jasjyotsinghjaswal Date: Wed, 8 Jan 2025 13:21:18 -0400 Subject: [PATCH 01/16] Added notebook for link to repo that has the LLM app OhSheet!!!ItsSpark to Convert Formula driven Excel Spreadsheets to Pyspark formukas --- week2/oh_sheet_its_spark!!!!.ipynb | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 week2/oh_sheet_its_spark!!!!.ipynb diff --git a/week2/oh_sheet_its_spark!!!!.ipynb b/week2/oh_sheet_its_spark!!!!.ipynb new file mode 100644 index 0000000..4187c73 --- /dev/null +++ b/week2/oh_sheet_its_spark!!!!.ipynb @@ -0,0 +1,30 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Repo link to a LLM App that can help you convert any Excel Spreadsheet with formulas into Pyspark equivalent transformations in a matter of few clicks " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "https://github.com/jasjyotsinghjaswal/llm_custom_apps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 0e285830ae1178b66ab4d3c91cc49b8e09a17309 Mon Sep 17 00:00:00 2001 From: jasjyotsinghjaswal Date: Wed, 8 Jan 2025 13:21:45 -0400 Subject: [PATCH 02/16] Delete week2/oh_sheet_its_spark!!!!.ipynb --- week2/oh_sheet_its_spark!!!!.ipynb | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 week2/oh_sheet_its_spark!!!!.ipynb diff --git a/week2/oh_sheet_its_spark!!!!.ipynb b/week2/oh_sheet_its_spark!!!!.ipynb deleted file mode 100644 index 4187c73..0000000 --- a/week2/oh_sheet_its_spark!!!!.ipynb +++ /dev/null @@ -1,30 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Repo link to a LLM App that can help you convert any Excel Spreadsheet with formulas into Pyspark equivalent transformations in a matter of few clicks " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "https://github.com/jasjyotsinghjaswal/llm_custom_apps" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - } - ], - "metadata": { - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 85afe7d95a330995cd2214a9f3c6badf03ba1cbc Mon Sep 17 00:00:00 2001 From: jasjyotsinghjaswal Date: Wed, 8 Jan 2025 13:23:07 -0400 Subject: [PATCH 03/16] Added notebook for link to repo that has the LLM app OhSheet!!!ItsSpark to Convert Formula driven Excel Spreadsheets to Pyspark transformations --- .../oh_sheet_its_spark!!!!.ipynb | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 week2/community-contributions/oh_sheet_its_spark!!!!.ipynb diff --git a/week2/community-contributions/oh_sheet_its_spark!!!!.ipynb b/week2/community-contributions/oh_sheet_its_spark!!!!.ipynb new file mode 100644 index 0000000..4187c73 --- /dev/null +++ b/week2/community-contributions/oh_sheet_its_spark!!!!.ipynb @@ -0,0 +1,30 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Repo link to a LLM App that can help you convert any Excel Spreadsheet with formulas into Pyspark equivalent transformations in a matter of few clicks " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "https://github.com/jasjyotsinghjaswal/llm_custom_apps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 84c8aded5e4b1aec2ffa0a012b986b76777b4c89 Mon Sep 17 00:00:00 2001 From: Elena Shirokova Date: Sat, 18 Jan 2025 14:39:53 +0100 Subject: [PATCH 04/16] adding the notebook for unit tests generation assignment --- .../unit-tests-generator.ipynb | 432 ++++++++++++++++++ 1 file changed, 432 insertions(+) create mode 100644 week4/community-contributions/unit-tests-generator.ipynb diff --git a/week4/community-contributions/unit-tests-generator.ipynb b/week4/community-contributions/unit-tests-generator.ipynb new file mode 100644 index 0000000..4825544 --- /dev/null +++ b/week4/community-contributions/unit-tests-generator.ipynb @@ -0,0 +1,432 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Requirements\n", + "\n", + "1. Install pytest and pytest-cov library\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pipenv install pytest pytest-cov" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import re\n", + "import os\n", + "import sys\n", + "import textwrap\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "import gradio as gr\n", + "from pathlib import Path\n", + "import subprocess\n", + "from IPython.display import Markdown" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialization\n", + "\n", + "load_dotenv()\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "OPENAI_MODEL = \"gpt-4o-mini\"\n", + "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", + "openai = OpenAI()\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "OLLAMA_MODEL = \"llama3.2\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Code execution" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "def extract_code(text):\n", + " # Regular expression to find text between ``python and ``\n", + " match = re.search(r\"```python(.*?)```\", text, re.DOTALL)\n", + "\n", + " if match:\n", + " code = match.group(0).strip() # Extract and strip extra spaces\n", + " else:\n", + " code = \"\"\n", + " print(\"No matching substring found.\")\n", + "\n", + " return code.replace(\"```python\\n\", \"\").replace(\"```\", \"\")\n", + "\n", + "\n", + "def execute_coverage_report(python_interpreter=sys.executable):\n", + " if not python_interpreter:\n", + " raise EnvironmentError(\"Python interpreter not found in the specified virtual environment.\")\n", + " # test_code_path = Path(\"tests\")\n", + " # command = [\"pytest\", \"-cov\",\"--capture=no\"]\n", + " command = [\"coverage\", \"run\", \"-m\", \"pytest\"]\n", + " # command =[\"pytest\", \"--cov=your_package\", \"--cov-report=term-missing\"]\n", + "\n", + " try:\n", + " result = subprocess.run(command, check=True, capture_output=True, text=True)\n", + " print(\"Tests ran successfully!\")\n", + " print(result.stdout)\n", + " return result.stdout\n", + " except subprocess.CalledProcessError as e:\n", + " print(\"Some tests failed!\")\n", + " print(\"Output:\\n\", e.stdout)\n", + " print(\"Errors:\\n\", e.stderr)\n", + " # Extracting failed test information\n", + " failed_tests = []\n", + " for line in e.stdout.splitlines():\n", + " if \"FAILED\" in line and \"::\" in line:\n", + " failed_tests.append(line.strip())\n", + " if failed_tests:\n", + " print(\"Failed Tests:\")\n", + " for test in failed_tests:\n", + " print(test)\n", + " return failed_tests\n", + "\n", + "def save_unit_tests(code):\n", + "\n", + " match = re.search(r\"def\\s+(\\w+)\\(\", code, re.DOTALL)\n", + "\n", + " if match:\n", + " function_name = match.group(1).strip() # Extract and strip extra spaces\n", + " else:\n", + " function_name = \"\"\n", + " print(\"No matching substring found.\")\n", + "\n", + " test_code_path = Path(\"tests\")\n", + " (test_code_path / f\"test_{function_name}.py\").write_text(extract_code(code))\n", + " Path(\"tests\", \"test_code.py\").unlink()\n", + " \n", + "\n", + "def execute_tests_in_venv(code_to_test, tests, python_interpreter=sys.executable):\n", + " \"\"\"\n", + " Execute the given Python code string within the specified virtual environment.\n", + " \n", + " Args:\n", + " - code_str: str, the Python code to execute.\n", + " - venv_dir: str, the directory path to the virtual environment created by pipenv.\n", + " \"\"\"\n", + " \n", + " if not python_interpreter:\n", + " raise EnvironmentError(\"Python interpreter not found in the specified virtual environment.\")\n", + "\n", + " # Prepare the command to execute the code\n", + " code_str = textwrap.dedent(code_to_test) + \"\\n\" + extract_code(tests)\n", + " test_code_path = Path(\"tests\")\n", + " test_code_path.mkdir(parents=True, exist_ok=True)\n", + " (test_code_path / f\"test_code.py\").write_text(code_str)\n", + " command = [\"pytest\", str(test_code_path)]\n", + "\n", + " try:\n", + " result = subprocess.run(command, check=True, capture_output=True, text=True)\n", + " print(\"Tests ran successfully!\")\n", + " print(result.stderr)\n", + " return result.stdout\n", + " except subprocess.CalledProcessError as e:\n", + " print(\"Some tests failed!\")\n", + " print(\"Output:\\n\", e.stdout)\n", + " print(\"Errors:\\n\", e.stderr)\n", + " # Extracting failed test information\n", + " failed_tests = []\n", + " for line in e.stdout.splitlines():\n", + " if \"FAILED\" in line and \"::\" in line:\n", + " failed_tests.append(line.strip())\n", + " if failed_tests:\n", + " print(\"Failed Tests:\")\n", + " for test in failed_tests:\n", + " print(test)\n", + " return e.stderr\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prompts and calls to the models" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"\"\"You are a helpful assistant which helps developers to write unit test cases for their code.\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def get_user_prompt(code):\n", + "\n", + " user_prompt = \"Write for a python code the unit test cases.\"\n", + " user_prompt += \"Return unit tests cases using pytest library, do not create any custom imports; do not explain your work other than a few comments.\"\n", + " user_prompt += \"Do not insert the function to be tested in the output before the tests. Validate both the case where the function is executed successfully and where it is expected to fail.\"\n", + " user_prompt += code\n", + "\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def stream_gpt(code):\n", + "\n", + " user_prompt = get_user_prompt(code)\n", + " stream = openai.chat.completions.create(\n", + " model=OPENAI_MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": user_prompt,\n", + " },\n", + " ],\n", + " stream=True,\n", + " )\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or \"\"\n", + " yield response\n", + " \n", + " return response\n", + "\n", + "def stream_ollama(code):\n", + "\n", + " user_prompt = get_user_prompt(code)\n", + " ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + " stream = ollama_via_openai.chat.completions.create(\n", + " model=OLLAMA_MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": user_prompt,\n", + " },\n", + " ],\n", + " stream=True,\n", + " )\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or \"\"\n", + " yield response\n", + " \n", + " return response\n", + "\n", + "\n", + "def stream_claude(code):\n", + " user_prompt = get_user_prompt(code)\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=2000,\n", + " system=system_message,\n", + " messages=[\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": user_prompt,\n", + " }\n", + " ],\n", + " )\n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " yield reply\n", + " print(text, end=\"\", flush=True)\n", + " return reply" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Code examples to test the inteface" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "function_to_test = \"\"\"\n", + " def lengthOfLongestSubstring(s):\n", + " max_length = 0\n", + " substring = \"\"\n", + " start_idx = 0\n", + " while start_idx < len(s):\n", + " string = s[start_idx:]\n", + " for i, x in enumerate(string):\n", + " substring += x\n", + " if len(substring) == len(set((list(substring)))):\n", + " \n", + " if len(set((list(substring)))) > max_length:\n", + " \n", + " max_length = len(substring)\n", + "\n", + " start_idx += 1\n", + " substring = \"\"\n", + " \n", + " \n", + " return max_length\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "test_code = \"\"\"```python\n", + "import pytest\n", + "\n", + "# Unit tests using pytest\n", + "def test_lengthOfLongestSubstring():\n", + " assert lengthOfLongestSubstring(\"abcabcbb\") == 3 # Case with repeating characters\n", + " assert lengthOfLongestSubstring(\"bbbbb\") == 1 # Case with all same characters\n", + " assert lengthOfLongestSubstring(\"pwwkew\") == 3 # Case with mixed characters\n", + " assert lengthOfLongestSubstring(\"\") == 0 # Empty string case\n", + " assert lengthOfLongestSubstring(\"abcdef\") == 6 # All unique characters\n", + " assert lengthOfLongestSubstring(\"abca\") == 3 # Case with pattern and repeat\n", + " assert lengthOfLongestSubstring(\"dvdf\") == 3 # Case with repeated characters separated\n", + " assert lengthOfLongestSubstring(\"a\") == 1 # Case with single character\n", + " assert lengthOfLongestSubstring(\"au\") == 2 # Case with unique two characters\n", + "```\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def optimize(code, model):\n", + " if model == \"GPT\":\n", + " result = stream_gpt(code)\n", + " elif model == \"Claude\":\n", + " result = stream_claude(code)\n", + " elif model == \"Ollama\":\n", + " result = stream_ollama(code)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " for stream_so_far in result:\n", + " yield stream_so_far\n", + " return result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Gradio interface" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with gr.Blocks() as ui:\n", + " gr.Markdown(\"## Write unit tests for Python code\")\n", + " with gr.Row():\n", + " with gr.Column(scale=1, min_width=300):\n", + " python = gr.Textbox(label=\"Python code:\", value=function_to_test, lines=10)\n", + " model = gr.Dropdown([\"GPT\", \"Claude\", \"Ollama\"], label=\"Select model\", value=\"GPT\")\n", + " unit_tests = gr.Button(\"Write unit tests\")\n", + " with gr.Column(scale=1, min_width=300):\n", + " unit_tests_out = gr.TextArea(label=\"Unit tests\", value=test_code, elem_classes=[\"python\"])\n", + " unit_tests_run = gr.Button(\"Run unit tests\")\n", + " coverage_run = gr.Button(\"Coverage report\")\n", + " save_test_run = gr.Button(\"Save unit tests\")\n", + " with gr.Row():\n", + " \n", + " python_out = gr.TextArea(label=\"Unit tests result\", elem_classes=[\"python\"])\n", + " coverage_out = gr.TextArea(label=\"Coverage report\", elem_classes=[\"python\"])\n", + " \n", + "\n", + " unit_tests.click(optimize, inputs=[python, model], outputs=[unit_tests_out])\n", + " unit_tests_run.click(execute_tests_in_venv, inputs=[python, unit_tests_out], outputs=[python_out])\n", + " coverage_run.click(execute_coverage_report, outputs=[coverage_out])\n", + " save_test_run.click(save_unit_tests, inputs=[unit_tests_out])\n", + "\n", + "\n", + "ui.launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm_engineering-yg2xCEUG", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 94f47af388fc5911e77663c105ba13efed4ee904 Mon Sep 17 00:00:00 2001 From: samt07 Date: Sat, 18 Jan 2025 16:00:19 -0500 Subject: [PATCH 05/16] Added Wiki page summary notebook to community-contributions --- .../day1-wiki-summary.ipynb | 194 ++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 week1/community-contributions/day1-wiki-summary.ipynb diff --git a/week1/community-contributions/day1-wiki-summary.ipynb b/week1/community-contributions/day1-wiki-summary.ipynb new file mode 100644 index 0000000..dfd8f68 --- /dev/null +++ b/week1/community-contributions/day1-wiki-summary.ipynb @@ -0,0 +1,194 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "2112166e-3629-4167-a4cb-0a1a6e549e97", + "metadata": {}, + "source": [ + "# Hello everyone, \n", + "The community contributions folder is super motivating. Thanks to Ed for democratising learning with this great idea of sharing. The below small piece is my novice attempt in summarizing content from wikipedia page. It is pretty straightforward, but a good learning exercise for me nevertheless. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "947028c8-30c6-456a-8e0c-25e0de1ecbb6", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install wikipedia" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aa18a060-6dbe-42c9-bc11-c8b079397d6b", + "metadata": {}, + "outputs": [], + "source": [ + "# Import statements\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "import wikipedia\n", + "import warnings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d9c128d-ed7d-4e58-8cd1-1468242c7967", + "metadata": {}, + "outputs": [], + "source": [ + "#To supress a warning from wikipedia module when there are multiple options.\n", + "warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"wikipedia\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5371f405-e628-4b6a-a5ab-5774c1431749", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6610504-bd7b-459f-9722-0044b3101e05", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the troubleshooting notebook, or try the below line instead:\n", + "# openai = OpenAI(api_key=\"your-key-here-starting-sk-proj-\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ac37741a-2608-4760-8ba8-163fb9155f0f", + "metadata": {}, + "outputs": [], + "source": [ + "class Wikipedia:\n", + " def __init__(self, searchText):\n", + " \"\"\"\n", + " Create this object to extract the summary of wikipedia page for a text entered by user\n", + " \"\"\"\n", + " self.searchText = searchText\n", + " self.summary_text = None\n", + " self.user_prompt = None\n", + " \n", + " self._fetch_summary()\n", + "\n", + " def _fetch_summary(self):\n", + " \"\"\"\n", + " Fetches the summary from wikipedia page based on user entered search text and sets user prompt accordingly\n", + " \"\"\"\n", + " try:\n", + " # Try to get the summary of the text from Wikipedia based on user entered text. Using starightforward summary module in wikipedia.\n", + " self.summary_text = wikipedia.summary(self.searchText)\n", + " self.user_prompt = f\"You are looking a summary extract from a wikipedia page. The content is as follows\\n {self.summary_text}.\\nProvide \\\n", + " a summary taking key points from each sections listed on the page\"\n", + " except wikipedia.DisambiguationError as e:\n", + " #Modify user and system prompts if there are multiple options for a user search text\n", + " self.user_prompt = f\"You have received quite a few options {e.options} for the keyword {self.searchText}. Please request user to choose one of them\"\n", + " except wikipedia.PageError:\n", + " #To handle when there is no page\n", + " self.user_prompt = f\"There is no wiki page for {self.searchText}. Apparently it is not your fault!\"\n", + " except Exception as e:\n", + " # To handle any other exceptions\n", + " self.user_prompt = f\"Sorry, something seems to be wrong on my end. Please try again later\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "143c203e-bb99-49c6-89a2-2a32ea429719", + "metadata": {}, + "outputs": [], + "source": [ + "# Our by-now familiar sumamrize function\n", + "def summarize(searchText):\n", + " wiki = Wikipedia(searchText)\n", + " system_prompt = f\"You are an assitant trying to summarize content from Wikipedia. You will have three scenarios to handle your responses \\\n", + " 1. You will have the summary text content and you will just show that to user\\\n", + " 2. You will have multiple options for the user entered keyword, and you will respond by asking user to choose from that and request again \\\n", + " 3. You will not have the content due to a page not found error. Respond accordingly.\\\n", + " Respond all of these in Markdown format.\"\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": wiki.user_prompt}\n", + " ]\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages\n", + " )\n", + " return response.choices[0].message.content\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b61532fc-189c-4cd8-9402-93d8d8fa8c59", + "metadata": {}, + "outputs": [], + "source": [ + "summary = summarize(\"mukhari\")\n", + "display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5c3f05f6-acb5-41e4-a521-8d8b8ace0192", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 7a942cdf95a4b270540b9838f2f99447e92e7b10 Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 19 Jan 2025 18:52:13 +0100 Subject: [PATCH 06/16] my homework --- .../day5-homework.ipynb | 1276 +++++++++++++++++ 1 file changed, 1276 insertions(+) create mode 100644 week4/community-contributions/day5-homework.ipynb diff --git a/week4/community-contributions/day5-homework.ipynb b/week4/community-contributions/day5-homework.ipynb new file mode 100644 index 0000000..3d6bded --- /dev/null +++ b/week4/community-contributions/day5-homework.ipynb @@ -0,0 +1,1276 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "6d67dba5-38ec-459a-9132-4a56c6a814cd", + "metadata": {}, + "outputs": [], + "source": [ + "Comment and Unit Test Generater \n", + "\n", + "The requirement: \n", + "* use an LLM to generate docstring and comments for Python code\n", + "* use an LLM to generate unit test\n", + "\n", + "This is my week 4 day 5 project." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "ea1841f6-4afc-4d29-ace8-5ca5a3915c8c", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import io\n", + "import sys\n", + "import json\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import google.generativeai\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display\n", + "import gradio as gr\n", + "import subprocess\n", + "from huggingface_hub import login, InferenceClient\n", + "from transformers import AutoTokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "11957fd3-6c61-4496-aef1-8223cb9ec4ce", + "metadata": {}, + "outputs": [], + "source": [ + "# environment\n", + "\n", + "load_dotenv()\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ee7b08fd-e678-4234-895e-4e3a925e60f0", + "metadata": {}, + "outputs": [], + "source": [ + "# initialize\n", + "\n", + "openai = OpenAI()\n", + "claude = anthropic.Anthropic()\n", + "OPENAI_MODEL = \"gpt-4o\"\n", + "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "c8023255-9c98-4fbc-92e4-c553bed3b605", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n" + ] + } + ], + "source": [ + "hf_token = os.environ['HF_TOKEN']\n", + "login(hf_token, add_to_git_credential=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "f8ce3f5e-74c4-4d35-bfbc-91c5be85e094", + "metadata": {}, + "outputs": [], + "source": [ + "code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n", + "CODE_QWEN_URL = \"https://g39mbjooiiwkbgyz.us-east-1.aws.endpoints.huggingface.cloud\"" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "id": "1bbc66b6-52ae-465e-a368-edc8f097fe9d", + "metadata": {}, + "outputs": [], + "source": [ + "def system_prompt_for_comment():\n", + " system=\"\"\"\n", + " You are a Python documentation expert. When writing documentation:\n", + " - Follow PEP 257 and Google docstring style guidelines\n", + " - Write clear, concise explanations\n", + " - Include practical examples\n", + " - Highlight edge cases and limitations\n", + " - Use type hints in docstrings\n", + " - Add inline comments only for complex logic\n", + " - Never skip documenting parameters or return values\n", + " - Validate that all documentation is accurate and complete\n", + " \"\"\"\n", + " return system" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "id": "b089f87b-53ae-40ad-8d06-b9924bb998a0", + "metadata": {}, + "outputs": [], + "source": [ + "def system_prompt_for_unit_test():\n", + " system=\"\"\"\n", + " You are an expert Python testing engineer who specializes in creating comprehensive unit tests. Follow these principles:\n", + " - Use pytest as the testing framework\n", + " - Follow the Arrange-Act-Assert pattern\n", + " - Test both valid and invalid inputs\n", + " - Include edge cases and boundary conditions\n", + " - Write descriptive test names that explain the scenario being tested\n", + " - Create independent tests that don't rely on each other\n", + " - Use appropriate fixtures and parametrize when needed\n", + " - Add clear comments explaining complex test logic\n", + " - Cover error cases and exceptions\n", + " - Achieve high code coverage while maintaining meaningful tests\n", + " \"\"\"\n", + " return system" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "22193622-f3a0-4894-a6c4-eb6d88097861", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for_comment(code):\n", + " user = f\"\"\"\n", + " Please document this Python code with:\n", + " \n", + " 1. A docstring containing:\n", + " - A clear description of purpose and functionality\n", + " - All parameters with types and descriptions\n", + " - Return values with types\n", + " - Exceptions that may be raised\n", + " - At least one usage example\n", + " - Any important notes or limitations\n", + " \n", + " 2. Strategic inline comments for:\n", + " - Complex algorithms or business logic\n", + " - Non-obvious implementation choices\n", + " - Performance considerations\n", + " - Edge cases\n", + " \n", + " Here's the code to document:\n", + " \\n{code}\n", + " \"\"\"\n", + " return user;" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "id": "81e61752-ec2f-44c1-86a2-ff3234a0358c", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for_unit_test(code):\n", + " user = f\"\"\"\n", + " Please generate unit tests for the following Python code. Include:\n", + " \n", + " 1. Test cases for:\n", + " - Normal/expected inputs\n", + " - Edge cases and boundary values\n", + " - Invalid inputs and error conditions\n", + " - Different combinations of parameters\n", + " - All public methods and functions\n", + " \n", + " 2. For each test:\n", + " - Clear test function names describing the scenario\n", + " - Setup code (fixtures if needed)\n", + " - Test data preparation\n", + " - Expected outcomes\n", + " - Assertions checking results\n", + " - Comments explaining complex test logic\n", + " \n", + " 3. Include any necessary:\n", + " - Imports\n", + " - Fixtures\n", + " - Mock objects\n", + " - Helper functions\n", + " - Test data generators\n", + " \n", + " Here's the code to test:\n", + " \\n{code}\n", + " \"\"\"\n", + " return user" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f31ceed3-0eb2-4962-ab86-2d0302185560", + "metadata": {}, + "outputs": [], + "source": [ + "pi = \"\"\"\n", + "import time\n", + "\n", + "def calculate(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " j = i * param1 - param2\n", + " result -= (1/j)\n", + " j = i * param1 + param2\n", + " result += (1/j)\n", + " return result\n", + "\n", + "start_time = time.time()\n", + "result = calculate(100_000_000, 4, 1) * 4\n", + "end_time = time.time()\n", + "\n", + "print(f\"Result: {result:.12f}\")\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "192c30f5-4be6-49b7-a054-11bfcffa91e0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Result: 3.141592658589\n", + "Execution Time: 58.228012 seconds\n" + ] + } + ], + "source": [ + "exec(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "id": "d4e920dc-4094-42d8-9255-18f2919df2d4", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for_comment(python):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt_for_comment()},\n", + " {\"role\": \"user\", \"content\": user_prompt_for_comment(python)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "id": "77500cae-bf84-405c-8b03-2f984108951b", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for_unit_test(python):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt_for_unit_test()},\n", + " {\"role\": \"user\", \"content\": user_prompt_for_unit_test(python)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "id": "5ec58bf1-4a44-4c21-a71a-2cac359884e5", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_comment_gpt(code):\n", + " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for_comment(code), stream=True)\n", + " reply = \"\"\n", + " for chunk in stream:\n", + " fragment = chunk.choices[0].delta.content or \"\"\n", + " reply += fragment\n", + " #print(fragment, end='', flush=True)\n", + " yield reply.replace('```','') \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "47c615e2-4eb6-4ce1-ad09-7f2e6dbc3934", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "```python\n", + "import time\n", + "\n", + "def calculate(iterations: int, param1: float, param2: float) -> float:\n", + " \"\"\"\n", + " Performs a series of mathematical operations in a loop to calculate a result.\n", + "\n", + " This function iteratively modifies a result variable through a series of arithmetic\n", + " operations. Essentially, it calculates the sum of alternating series adjustments,\n", + " simulating a specific numerical approximation process.\n", + "\n", + " Args:\n", + " iterations (int): The number of iterations to perform. Must be a positive integer.\n", + " param1 (float): The factor applied for multiplication inside the iteration.\n", + " param2 (float): The factor subtracted and added inside the iteration for denominator adjustment.\n", + "\n", + " Returns:\n", + " float: The calculated result after completing all iterations.\n", + "\n", + " Raises:\n", + " ZeroDivisionError: If any calculated denominator becomes zero during execution,\n", + " which may happen if `i * param1 - param2` or `i * param1 + param2` evaluates to zero.\n", + "\n", + " Usage Example:\n", + " result = calculate(100_000_000, 4, 1)\n", + " print(f\"Calculated Result: {result * 4}\")\n", + "\n", + " Notes:\n", + " - The function can be computationally intensive depending on the number of iterations.\n", + " - Ensure that `param1` and `param2` are chosen to avoid division by zero.\n", + " - Floating-point precision issues might arise due to large iterations count.\n", + " \"\"\"\n", + " \n", + " result = 1.0\n", + " for i in range(1, iterations + 1):\n", + " # Calculate modified denominator by subtracting param2\n", + " j = i * param1 - param2\n", + " \n", + " # Subtract reciprocal from the result\n", + " # Potential ZeroDivisionError if (i * param1 - param2) == 0\n", + " result -= (1 / j)\n", + " \n", + " # Calculate modified denominator by adding param2\n", + " j = i * param1 + param2\n", + " \n", + " # Add reciprocal to the result\n", + " # Potential ZeroDivisionError if (i * param1 + param2) == 0\n", + " result += (1 / j)\n", + " \n", + " return result\n", + "\n", + "\n", + "start_time = time.time()\n", + "result = calculate(100_000_000, 4, 1) * 4 # Scaling final result by 4 for specific use case\n", + "end_time = time.time()\n", + "\n", + "# Output result with high precision and execution time for measurement\n", + "print(f\"Result: {result:.12f}\")\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "```\n", + "\n", + "### Explanation of Changes:\n", + "- **Docstring**: The docstring provides a comprehensive explanation of the function's purpose and the calculations it performs, specifying parameter types and behavior.\n", + "- **Exceptions**: A note about `ZeroDivisionError` is included, as the calculation might lead to division by zero with certain inputs.\n", + "- **Usage Example**: Demonstrates how to call the function with a specific configuration.\n", + "- **Notes**: Provides guidance on potential performance concerns and precision limitations.\n", + "- **Inline Comments**: Added to clarify key lines where logical computations occur and where division by zero might be a risk." + ] + } + ], + "source": [ + "stream_comment_gpt(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "id": "0b990875-31fd-40e5-bc8c-f6099d362249", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_unit_test_gpt(code):\n", + " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for_unit_test(code), stream=True)\n", + " reply = \"\"\n", + " for chunk in stream:\n", + " fragment = chunk.choices[0].delta.content or \"\"\n", + " reply += fragment\n", + " #print(fragment, end='', flush=True)\n", + " yield reply.replace('```','')" + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "id": "3dc90578-4f5e-47f1-b30f-c21b5795e82f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 73, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "stream_unit_test_gpt(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "id": "17380c0f-b851-472b-a234-d86f5c219e50", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_comment_claude(code):\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=2000,\n", + " system=system_prompt_for_comment(),\n", + " messages=[{\"role\": \"user\", \"content\": user_prompt_for_comment(code)}],\n", + " )\n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " #print(text, end=\"\", flush=True)\n", + " yield reply.replace('```','')" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "id": "0a2d016d-76a2-4752-bd4d-6f93ddec46be", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_unit_test_claude(code):\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=2000,\n", + " system=system_prompt_for_unit_test(),\n", + " messages=[{\"role\": \"user\", \"content\": user_prompt_for_unit_test(code)}],\n", + " )\n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " #print(text, end=\"\", flush=True)\n", + " yield reply.replace('```','')" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "ee43428e-b577-4e95-944d-399f2f3b89ff", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here's the documented version of your Python code:\n", + "\n", + "```python\n", + "import time\n", + "\n", + " float) -> float:rations: int, param1: float, param2:\n", + " \"\"\"\n", + "Calculates a series sum based on the given parameters.\n", + "\n", + " This function computes a series sum using the formula:\n", + "i*param1 + param2) - 1/(i*param1 - param2)) for i from 1 to iterations.\n", + "\n", + " Args:\n", + " iterations to perform. Must be positive.\n", + "float): The first parameter used in the calculation.\n", + "(float): The second parameter used in the calculation.\n", + "\n", + " Returns:\n", + ". float: The result of the series sum calculation\n", + "\n", + " Raises:\n", + ". ValueError: If iterations is not positive\n", + "is 0 or if param2 is equal to param1.\n", + "\n", + " Example:\n", + " = calculate(1000, 4, 1)\n", + ">>> print(f\"{result:.6f}\")\n", + ".392699 0\n", + "\n", + " Note:\n", + " The function may be computationally expensive for large numbers of iterations.\n", + ", floating-point precision limitations may affect accuracy.\n", + " \"\"\"\n", + " if iterations <= 0:\n", + " must be a positive integer\")rations\n", + "\n", + " result = 1.0\n", + " for i in range(1, iterations + 1):\n", + " the seriesalculate the denominators for both terms in\n", + "1 - param2 = i * param\n", + " param1 + param2\n", + "\n", + "d division by zero\n", + " 0 or j2 == 0:==\n", + " calculation\")ise ZeroDivisionError(\"Division by zero in\n", + "\n", + "d add the second terme first term an\n", + " result -= (1 / j1)\n", + " result += (1 / j2)\n", + "\n", + " return result\n", + "\n", + "# Measure execution time\n", + "()art_time = time.time\n", + "\n", + "# Perform calculation with 100 million iterations\n", + " The result is multiplied by 4 as per the original code\n", + "000, 4, 1) * 4late(100_000_\n", + "\n", + "d_time = time.time()\n", + "\n", + " with high precision for the calculated value\n", + "Result: {result:.12f}\")\n", + "(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "```\n", + "\n", + " this documented version:\n", + "\n", + " been added to the `calculate` function, following Google style guidelines and including all the requested elements.\n", + "\n", + " hints have been added to the function signature for better clarity and to support static type checking.\n", + "\n", + "d to explain the key steps in the calculation process.\n", + "\n", + " check for positive iterations has been added to prevent invalid input.\n", + "\n", + " been added to handle potential errors.\n", + "\n", + " Comments have been added to the main script to explain the purpose of each step.\n", + "\n", + " documentation provides a clear understanding of the function's purpose, its parameters, return value, potential exceptions, and includes an example of usage. It also notes potential limitations regarding computational cost and floating-point precision for very large numbers of iterations." + ] + } + ], + "source": [ + "stream_comment_claude(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "id": "0565e33b-9f14-48b7-ae8d-d22dc03b93c9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here's a comprehensive set of unit tests for the given Python code using pytest:\n", + "\n", + "```python\n", + "import pytest\n", + "import time\n", + " import isclose\n", + "from unittest.mock import patch\n", + "\n", + "# Import the function to be tested\n", + "# Assuming the code is in a file named your_module.py\n", + "\n", + "# Test data generator\n", + "_data():rate_test\n", + " return [\n", + ", 2, 1, 0.6931471805),\n", + " 3, 2, 0.6931471806),\n", + ", 3, 0.6931471806),\n", + ", 1, 0.6931471806),\n", + " ]\n", + "\n", + " datature for common test\n", + "@pytest.fixture\n", + "def common_data():\n", + "return {\n", + " 'iterations': 100,\n", + " 'param1': 4,\n", + " 'param2': 1\n", + " }\n", + "\n", + "# Normal case tests\n", + "rize(\"iterations, param1, param2, expected\", generate_test_data())\n", + "cases(iterations, param1, param2, expected):\n", + "1, param2) = calculate(iterations, param\n", + "(result, expected, rel_tol=1e-9), f\"Expected {expected}, but got {result}\"\n", + "\n", + " cases and boundary values\n", + "_cases():calculate_edge\n", + "d inputsst with minimum vali\n", + " 0) == 2.0 calculate(1, 1,\n", + " \n", + " # Test with very large iterations\n", + "_result = calculate(10**8, 4, 1)\n", + ", 0.6931471806, rel_tol=1e-9)lt\n", + "\n", + "# Invalid inputs and error conditions\n", + "def test_calculate_invalid_inputs():\n", + " with pytest.raises(ValueError):\n", + "0, 4, 1) # iterations should be positive\n", + " \n", + "(ZeroDivisionError):es\n", + "10, 1, 1) # This will cause division by zero\n", + "\n", + "TypeError):test.raises(\n", + "1) # iterations should be an integer\n", + "\n", + "# Test with different combinations of parameters\n", + "rize(\"iterations, param1, param2\", [\n", + "), (100, 2, 2\n", + " (1000, 3, 3),\n", + "(10000, 5, 5),\n", + " (100000, 10, 10)\n", + "])\n", + " param1, param2):e_parameter_combinations(iterations,\n", + " calculate(iterations, param1, param2)\n", + " assert isinstance(result, float)\n", + " assert result > 0\n", + "\n", + " execution time\n", + "common_data):ulate_execution_time(\n", + " time.time()me =\n", + " calculate(**common_data)\n", + " end_time = time.time()\n", + " execution_time = end_time - start_time\n", + " f\"Execution took {execution_time} seconds, which is too long\"\n", + "\n", + " result precision\n", + "data):st_calculate_precision(common_\n", + "data)esult = calculate(**common_\n", + "split('.')[1]) >= 10, \"Result should have at least 10 decimal places\"\n", + "\n", + "# Test with mocked time function\n", + ".time')'time\n", + "(mock_time, common_data):ocked_time\n", + ", 0.5] # Simulate 0.5 seconds execution time\n", + "_time = time.time()\n", + " = calculate(**common_data)\n", + "d_time = time.time()\n", + " end_time - start_time == 0.5\n", + "\n", + "# Helper function to test monotonicity\n", + "_monotonic(lst):\n", + " <= lst[i+1] for i in range(len(lst)-1)) or all(lst[i] >= lst[i+1] for i in range(len(lst)-1))\n", + "\n", + " increasing iterationscity with\n", + "def test_calculate_monotonicity():\n", + " 1) for i in range(1, 6)]10**i, 4,\n", + "), \"Results should be monotonic with increasing iterations\"\n", + "\n", + " Test with very small and very large parameters\n", + ", param1, param2\", [rize(\"iterations\n", + "(100, 1e-5, 1e-5),\n", + ", 1e5)00, 1e5\n", + "])\n", + "_parameters(iterations, param1, param2):\n", + "1, param2) = calculate(iterations, param\n", + "result == float('inf') or result == float('-inf')), \"Result should not be infinity\"\n", + "assert not isclose(result, 0, abs_tol=1e-10), \"Result should not be too close to zero\"\n", + "\n", + "```\n", + "\n", + " for the `calculate` function:range of scenarios\n", + "\n", + " with different inputs and expected outputs.\n", + " and boundary values, including minimum valid inputs and very large iterations.\n", + " Invalid inputs and error conditions, testing for expected exceptions.\n", + " Different combinations of parameters to ensure the function works correctly for various inputs.\n", + " to ensure the function performs within acceptable time limits.\n", + " Precision test to verify the result has sufficient decimal places.\n", + " A test with mocked time function to simulate and verify execution time measurement.\n", + " if results are consistent with increasing iterations.\n", + " with extreme parameters (very small and very large) to ensure numerical stability.\n", + "\n", + "rization, fixtures, and markers. It also includes necessary imports, helper functions, and a test data generator.\n", + "\n", + "d `test_your_module.py` in the same directory as your original code file (`your_module.py`). Then run `pytest test_your_module.py` from the command line.\n", + "\n", + " pytest (`pip install pytest`) before running the tests." + ] + } + ], + "source": [ + "stream_unit_test_claude(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "f13b3a5b-366d-4b28-adda-977a313e6b4d", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_comment_model(model, model_url, code):\n", + " tokenizer = AutoTokenizer.from_pretrained(model)\n", + " messages = messages_for_comment(code)\n", + " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", + " client = InferenceClient(model_url, token=hf_token)\n", + " stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", + " result = \"\"\n", + " for r in stream:\n", + " #print(r.token.text, end = \"\")\n", + " result += r.token.text\n", + " yield result \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 67, + "id": "e2efdb92-fc7a-4952-ab46-ae942cb996bf", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_unit_test_model(model, model_url, code):\n", + " tokenizer = AutoTokenizer.from_pretrained(model)\n", + " messages = messages_for_unit_test(code)\n", + " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", + " client = InferenceClient(model_url, token=hf_token)\n", + " stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", + " result = \"\"\n", + " for r in stream:\n", + " #print(r.token.text, end = \"\")\n", + " result += r.token.text\n", + " yield result \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "0a756193-fcba-43da-a981-203c10d36488", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 41, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "stream_comment_model(code_qwen, CODE_QWEN_URL, pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "id": "12ddcbf4-6286-47a8-847b-5be78e7aa995", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here are the unit tests for the given Python code:\n", + "\n", + "```python\n", + "import pytest\n", + "import time\n", + " unittest.mock import patch\n", + "\n", + "def calculate(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " i * param1 - param2\n", + "result -= (1/j)\n", + " j = i * param1 + param2\n", + "result += (1/j)\n", + " return result\n", + "\n", + "@pytest.fixture\n", + " mock_time():\n", + "('time.time') as mock_time:\n", + "yield mock_time\n", + "\n", + "_calculate_normal_inputs(mock_time):\n", + "mock_time.return_value = 0\n", + "result = calculate(100_000_000, 4, 1) * 4\n", + "expected_result = 0.0\n", + " == expected_result\n", + "\n", + "_calculate_edge_cases(mock_time):\n", + " mock_time.return_value = 0\n", + " calculate(0, 4, 1) * 4\n", + " expected_result = 0.0\n", + " result == expected_result\n", + "\n", + " = calculate(100_000_000, 0, 1) * 4\n", + "expected_result = 0.0\n", + " result == expected_result\n", + "\n", + " = calculate(100_000_000, 4, 0) * 4\n", + "_result = 0.0\n", + " assert result == expected_result\n", + "\n", + "def test_calculate_invalid_inputs(mock_time):\n", + " mock_time.return_value = 0\n", + ".raises(TypeError):\n", + "calculate(100_000_000, 'a', 1) * 4\n", + "with pytest.raises(TypeError):\n", + "100_000_000, 4, 'b') * 4\n", + ".raises(TypeError):\n", + "calculate('a', 4, 1) * 4\n", + "test.raises(TypeError):\n", + "(100_000_000, 4, 1, 'c') * 4\n", + "\n", + "def test_calculate_different_combinations(mock_time):\n", + " mock_time.return_value = 0\n", + "result = calculate(100_000_000, 4, 1) * 4\n", + " expected_result = 0.0\n", + " result == expected_result\n", + "\n", + " = calculate(100_000_000, 4, -1) * 4\n", + "expected_result = 0.0\n", + " == expected_result\n", + "\n", + " calculate(100_000_000, -4, 1) * 4\n", + "result = 0.0_\n", + "assert result == expected_result\n", + "\n", + "result = calculate(100_000_000, -4, -1) * 4\n", + " expected_result = 0.0\n", + " result == expected_result\n", + "\n", + "def test_calculate_execution_time(mock_time):\n", + "_time.return_value = 0\n", + "_time = mock_time.return_value\n", + "calculate(100_000_000, 4, 1) * 4\n", + "end_time = mock_time.return_value\n", + " expected_execution_time = 0.0\n", + " assert (end_time - start_time) == expected_execution_time\n", + "```\n", + "\n", + " covers all the scenarios mentioned in the problem description. It tests the function with normal inputs, edge cases, invalid inputs, different combinations of parameters, and checks the execution time.<|im_end|>" + ] + } + ], + "source": [ + "stream_unit_test_model(code_qwen, CODE_QWEN_URL, pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "321609ee-b64a-44fc-9090-39f87e1f8e0e", + "metadata": {}, + "outputs": [], + "source": [ + "def comment_code(python, model):\n", + " if model==\"GPT\":\n", + " result = stream_comment_gpt(python)\n", + " elif model==\"Claude\":\n", + " result = stream_comment_claude(python)\n", + " elif model==\"CodeQwen\":\n", + " result = stream_comment_model(code_qwen, CODE_QWEN_URL, python)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " for stream_so_far in result:\n", + " yield stream_so_far " + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "id": "d4c560c9-922d-4893-941f-42893373b1be", + "metadata": {}, + "outputs": [], + "source": [ + "def get_unit_test(python, model):\n", + " if model==\"GPT\":\n", + " result = stream_unit_test_gpt(python)\n", + " elif model==\"Claude\":\n", + " result = stream_unit_test_claude(python)\n", + " elif model==\"CodeQwen\":\n", + " result = stream_unit_test_model(code_qwen, CODE_QWEN_URL, python)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " for stream_so_far in result:\n", + " yield stream_so_far " + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "f85bc777-bebe-436b-88cc-b9ecdb6306c0", + "metadata": {}, + "outputs": [], + "source": [ + "css = \"\"\"\n", + ".python {background-color: #306998;}\n", + ".cpp {background-color: #050;}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "id": "ee27cc91-81e6-42c8-ae3c-c04161229d8c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7881\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 74, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here are the unit tests for the given Python code:\n", + "\n", + "```python\n", + "import pytest\n", + "import time\n", + " unittest.mock import patch\n", + "\n", + "def calculate(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " i * param1 - param2\n", + " result -= (1/j)\n", + " i * param1 + param2\n", + "result += (1/j)\n", + " return result\n", + "\n", + "@pytest.fixture\n", + " mock_time():\n", + " with patch('time.time') as mock_time:\n", + "ield mock_time\n", + "\n", + "calculate_normal_inputs(mock_time):\n", + "time.return_value = 0\n", + " calculate(100_000_000, 4, 1) * 4\n", + "result = 0.0_\n", + "assert result == expected_result\n", + "\n", + " test_calculate_edge_cases(mock_time):\n", + "time.return_value = 0\n", + " calculate(0, 4, 1) * 4\n", + "_result = 0.0\n", + " assert result == expected_result\n", + "\n", + " result = calculate(100_000_000, 0, 1) * 4\n", + "result = 0.0_\n", + "assert result == expected_result\n", + "\n", + "result = calculate(100_000_000, 4, 0) * 4\n", + " expected_result = 0.0\n", + "assert result == expected_result\n", + "\n", + " test_calculate_invalid_inputs(mock_time):\n", + "_time.return_value = 0\n", + "test.raises(TypeError):\n", + " calculate(100_000_000, 'a', 1) * 4\n", + "with pytest.raises(TypeError):\n", + "ulate(100_000_000, 4, 'b') * 4\n", + " pytest.raises(TypeError):\n", + "ulate('a', 4, 1) * 4\n", + "test.raises(TypeError):\n", + " calculate(100_000_000, 4, 1, 'c') * 4\n", + "\n", + "_calculate_different_combinations(mock_time):\n", + " mock_time.return_value = 0\n", + " result = calculate(100_000_000, 4, 1) * 4\n", + " expected_result = 0.0\n", + " == expected_result\n", + "\n", + " calculate(100_000_000, 4, -1) * 4\n", + "_result = 0.0\n", + " assert result == expected_result\n", + "\n", + " result = calculate(100_000_000, -4, 1) * 4\n", + " expected_result = 0.0\n", + " result == expected_result\n", + "\n", + " calculate(100_000_000, -4, -1) * 4\n", + "_result = 0.0\n", + " assert result == expected_result\n", + "\n", + "def test_calculate_execution_time(mock_time):\n", + "mock_time.return_value = 0\n", + "start_time = mock_time.return_value\n", + " calculate(100_000_000, 4, 1) * 4\n", + " end_time = mock_time.return_value\n", + " expected_execution_time = 0.0\n", + " assert (end_time - start_time) == expected_execution_time\n", + "```\n", + "\n", + " covers all the scenarios mentioned in the problem description. It tests the function with normal inputs, edge cases, invalid inputs, different combinations of parameters, and checks the execution time.<|im_end|>" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Traceback (most recent call last):\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\queueing.py\", line 625, in process_events\n", + " response = await route_utils.call_process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\route_utils.py\", line 322, in call_process_api\n", + " output = await app.get_blocks().process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 2047, in process_api\n", + " result = await self.call_function(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 1606, in call_function\n", + " prediction = await utils.async_iteration(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 714, in async_iteration\n", + " return await anext(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 708, in __anext__\n", + " return await anyio.to_thread.run_sync(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", + " return await get_async_backend().run_sync_in_worker_thread(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2505, in run_sync_in_worker_thread\n", + " return await future\n", + " ^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 1005, in run\n", + " result = context.run(func, *args)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 691, in run_sync_iterator_async\n", + " return next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 852, in gen_wrapper\n", + " response = next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\AppData\\Local\\Temp\\ipykernel_27660\\2822054561.py\", line 10, in get_unit_test\n", + " for stream_so_far in result:\n", + "TypeError: 'NoneType' object is not iterable\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here are the unit tests for the given Python code:\n", + "\n", + "```python\n", + "import pytest\n", + "import time\n", + "est.mock import patch\n", + "\n", + "(iterations, param1, param2):\n", + "result = 1.0\n", + "for i in range(1, iterations+1):\n", + "j = i * param1 - param2\n", + " -= (1/j)esult\n", + "j = i * param1 + param2\n", + " += (1/j)esult\n", + "return result\n", + "\n", + "pytest.fixture\n", + "_time():\n", + " with patch('time.time') as mock_time:\n", + "ield mock_time\n", + "\n", + "calculate_normal_inputs(mock_time):\n", + "time.return_value = 0\n", + " calculate(100_000_000, 4, 1) * 4\n", + "_result = 0.0\n", + " assert result == expected_result\n", + "\n", + "def test_calculate_edge_cases(mock_time):\n", + "mock_time.return_value = 0\n", + "result = calculate(0, 4, 1) * 4\n", + " expected_result = 0.0\n", + " result == expected_result\n", + "\n", + " = calculate(100_000_000, 0, 1) * 4\n", + "_result = 0.0\n", + "assert result == expected_result\n", + "\n", + "result = calculate(100_000_000, 4, 0) * 4\n", + " expected_result = 0.0\n", + " result == expected_result\n", + "\n", + "_calculate_invalid_inputs(mock_time):\n", + "time.return_value = 0\n", + " with pytest.raises(TypeError):\n", + "(100_000_000, 'a', 1) * 4\n", + "ises(TypeError):ra\n", + "ulate(100_000_000, 4, 'b') * 4\n", + " pytest.raises(TypeError):\n", + "ulate('a', 4, 1) * 4\n", + " pytest.raises(TypeError):\n", + "ulate(100_000_000, 4, 1, 'c') * 4\n", + "\n", + "calculate_different_combinations(mock_time):\n", + " mock_time.return_value = 0\n", + " result = calculate(100_000_000, 4, 1) * 4\n", + " = 0.0pected_result\n", + " result == expected_result\n", + "\n", + " = calculate(100_000_000, 4, -1) * 4\n", + "expected_result = 0.0\n", + " expected_resultt ==\n", + "\n", + " result = calculate(100_000_000, -4, 1) * 4\n", + "result = 0.0_\n", + " == expected_result\n", + "\n", + " calculate(100_000_000, -4, -1) * 4\n", + " = 0.0pected_result\n", + " result == expected_result\n", + "\n", + "def test_calculate_execution_time(mock_time):\n", + "_time.return_value = 0\n", + "_time = mock_time.return_value\n", + "100_000_000, 4, 1) * 4\n", + " end_time = mock_time.return_value\n", + "_execution_time = 0.0\n", + " (end_time - start_time) == expected_execution_time\n", + "``\n", + "\n", + " suite covers all the scenarios mentioned in the problem description. It tests the function with normal inputs, edge cases, invalid inputs, different combinations of parameters, and checks the execution time.<|im_end|>" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Traceback (most recent call last):\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\queueing.py\", line 625, in process_events\n", + " response = await route_utils.call_process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\route_utils.py\", line 322, in call_process_api\n", + " output = await app.get_blocks().process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 2047, in process_api\n", + " result = await self.call_function(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 1606, in call_function\n", + " prediction = await utils.async_iteration(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 714, in async_iteration\n", + " return await anext(iterator)\n", + " ^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 708, in __anext__\n", + " return await anyio.to_thread.run_sync(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", + " return await get_async_backend().run_sync_in_worker_thread(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2505, in run_sync_in_worker_thread\n", + " return await future\n", + " ^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 1005, in run\n", + " result = context.run(func, *args)\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 691, in run_sync_iterator_async\n", + " return next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 852, in gen_wrapper\n", + " response = next(iterator)\n", + " ^^^^^^^^^^^^^^\n", + " File \"C:\\Users\\ebaba\\AppData\\Local\\Temp\\ipykernel_27660\\2822054561.py\", line 10, in get_unit_test\n", + " for stream_so_far in result:\n", + "TypeError: 'NoneType' object is not iterable\n" + ] + } + ], + "source": [ + "with gr.Blocks(css=css) as ui:\n", + " gr.Markdown(\"## Convert code from Python to C++\")\n", + " with gr.Row():\n", + " python = gr.Textbox(label=\"Python code:\", value=pi, lines=10)\n", + " result = gr.Textbox(label=\"Result code:\", lines=10)\n", + " with gr.Row():\n", + " model = gr.Dropdown([\"GPT\", \"Claude\",\"CodeQwen\"], label=\"Select model\", value=\"GPT\")\n", + " with gr.Row():\n", + " comment_button = gr.Button(\"Comment code\")\n", + " with gr.Row():\n", + " unit_test_button = gr.Button(\"Unit Test code\")\n", + " \n", + " comment_button.click(comment_code, inputs=[python, model], outputs=[result])\n", + " unit_test_button.click(get_unit_test, inputs=[python, model], outputs=[result])\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "06e8279c-b488-4807-9bed-9d26be11c057", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 2eac15b4799da5c6fce37b363ebb7a4e88a0a2b7 Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 19 Jan 2025 19:59:23 +0100 Subject: [PATCH 07/16] clean up --- .../day5-homework.ipynb | 801 ++---------------- 1 file changed, 50 insertions(+), 751 deletions(-) diff --git a/week4/community-contributions/day5-homework.ipynb b/week4/community-contributions/day5-homework.ipynb index 3d6bded..c34be7b 100644 --- a/week4/community-contributions/day5-homework.ipynb +++ b/week4/community-contributions/day5-homework.ipynb @@ -2,10 +2,19 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "6d67dba5-38ec-459a-9132-4a56c6a814cd", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "SyntaxError", + "evalue": "invalid syntax (2447672335.py, line 1)", + "output_type": "error", + "traceback": [ + "\u001b[1;36m Cell \u001b[1;32mIn[1], line 1\u001b[1;36m\u001b[0m\n\u001b[1;33m Comment and Unit Test Generater\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n" + ] + } + ], "source": [ "Comment and Unit Test Generater \n", "\n", @@ -18,7 +27,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "ea1841f6-4afc-4d29-ace8-5ca5a3915c8c", "metadata": {}, "outputs": [], @@ -43,7 +52,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "11957fd3-6c61-4496-aef1-8223cb9ec4ce", "metadata": {}, "outputs": [], @@ -58,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "ee7b08fd-e678-4234-895e-4e3a925e60f0", "metadata": {}, "outputs": [], @@ -73,18 +82,10 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "c8023255-9c98-4fbc-92e4-c553bed3b605", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n" - ] - } - ], + "outputs": [], "source": [ "hf_token = os.environ['HF_TOKEN']\n", "login(hf_token, add_to_git_credential=True)" @@ -92,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "id": "f8ce3f5e-74c4-4d35-bfbc-91c5be85e094", "metadata": {}, "outputs": [], @@ -103,7 +104,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": null, "id": "1bbc66b6-52ae-465e-a368-edc8f097fe9d", "metadata": {}, "outputs": [], @@ -125,7 +126,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": null, "id": "b089f87b-53ae-40ad-8d06-b9924bb998a0", "metadata": {}, "outputs": [], @@ -149,7 +150,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": null, "id": "22193622-f3a0-4894-a6c4-eb6d88097861", "metadata": {}, "outputs": [], @@ -180,7 +181,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": null, "id": "81e61752-ec2f-44c1-86a2-ff3234a0358c", "metadata": {}, "outputs": [], @@ -219,7 +220,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "f31ceed3-0eb2-4962-ab86-2d0302185560", "metadata": {}, "outputs": [], @@ -247,26 +248,17 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "192c30f5-4be6-49b7-a054-11bfcffa91e0", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Result: 3.141592658589\n", - "Execution Time: 58.228012 seconds\n" - ] - } - ], + "outputs": [], "source": [ "exec(pi)" ] }, { "cell_type": "code", - "execution_count": 53, + "execution_count": null, "id": "d4e920dc-4094-42d8-9255-18f2919df2d4", "metadata": {}, "outputs": [], @@ -280,7 +272,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": null, "id": "77500cae-bf84-405c-8b03-2f984108951b", "metadata": {}, "outputs": [], @@ -294,7 +286,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": null, "id": "5ec58bf1-4a44-4c21-a71a-2cac359884e5", "metadata": {}, "outputs": [], @@ -312,91 +304,17 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "47c615e2-4eb6-4ce1-ad09-7f2e6dbc3934", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "```python\n", - "import time\n", - "\n", - "def calculate(iterations: int, param1: float, param2: float) -> float:\n", - " \"\"\"\n", - " Performs a series of mathematical operations in a loop to calculate a result.\n", - "\n", - " This function iteratively modifies a result variable through a series of arithmetic\n", - " operations. Essentially, it calculates the sum of alternating series adjustments,\n", - " simulating a specific numerical approximation process.\n", - "\n", - " Args:\n", - " iterations (int): The number of iterations to perform. Must be a positive integer.\n", - " param1 (float): The factor applied for multiplication inside the iteration.\n", - " param2 (float): The factor subtracted and added inside the iteration for denominator adjustment.\n", - "\n", - " Returns:\n", - " float: The calculated result after completing all iterations.\n", - "\n", - " Raises:\n", - " ZeroDivisionError: If any calculated denominator becomes zero during execution,\n", - " which may happen if `i * param1 - param2` or `i * param1 + param2` evaluates to zero.\n", - "\n", - " Usage Example:\n", - " result = calculate(100_000_000, 4, 1)\n", - " print(f\"Calculated Result: {result * 4}\")\n", - "\n", - " Notes:\n", - " - The function can be computationally intensive depending on the number of iterations.\n", - " - Ensure that `param1` and `param2` are chosen to avoid division by zero.\n", - " - Floating-point precision issues might arise due to large iterations count.\n", - " \"\"\"\n", - " \n", - " result = 1.0\n", - " for i in range(1, iterations + 1):\n", - " # Calculate modified denominator by subtracting param2\n", - " j = i * param1 - param2\n", - " \n", - " # Subtract reciprocal from the result\n", - " # Potential ZeroDivisionError if (i * param1 - param2) == 0\n", - " result -= (1 / j)\n", - " \n", - " # Calculate modified denominator by adding param2\n", - " j = i * param1 + param2\n", - " \n", - " # Add reciprocal to the result\n", - " # Potential ZeroDivisionError if (i * param1 + param2) == 0\n", - " result += (1 / j)\n", - " \n", - " return result\n", - "\n", - "\n", - "start_time = time.time()\n", - "result = calculate(100_000_000, 4, 1) * 4 # Scaling final result by 4 for specific use case\n", - "end_time = time.time()\n", - "\n", - "# Output result with high precision and execution time for measurement\n", - "print(f\"Result: {result:.12f}\")\n", - "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", - "```\n", - "\n", - "### Explanation of Changes:\n", - "- **Docstring**: The docstring provides a comprehensive explanation of the function's purpose and the calculations it performs, specifying parameter types and behavior.\n", - "- **Exceptions**: A note about `ZeroDivisionError` is included, as the calculation might lead to division by zero with certain inputs.\n", - "- **Usage Example**: Demonstrates how to call the function with a specific configuration.\n", - "- **Notes**: Provides guidance on potential performance concerns and precision limitations.\n", - "- **Inline Comments**: Added to clarify key lines where logical computations occur and where division by zero might be a risk." - ] - } - ], + "outputs": [], "source": [ "stream_comment_gpt(pi)" ] }, { "cell_type": "code", - "execution_count": 59, + "execution_count": null, "id": "0b990875-31fd-40e5-bc8c-f6099d362249", "metadata": {}, "outputs": [], @@ -413,28 +331,17 @@ }, { "cell_type": "code", - "execution_count": 73, + "execution_count": null, "id": "3dc90578-4f5e-47f1-b30f-c21b5795e82f", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 73, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "stream_unit_test_gpt(pi)" ] }, { "cell_type": "code", - "execution_count": 60, + "execution_count": null, "id": "17380c0f-b851-472b-a234-d86f5c219e50", "metadata": {}, "outputs": [], @@ -456,7 +363,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": null, "id": "0a2d016d-76a2-4752-bd4d-6f93ddec46be", "metadata": {}, "outputs": [], @@ -478,249 +385,27 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "ee43428e-b577-4e95-944d-399f2f3b89ff", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Here's the documented version of your Python code:\n", - "\n", - "```python\n", - "import time\n", - "\n", - " float) -> float:rations: int, param1: float, param2:\n", - " \"\"\"\n", - "Calculates a series sum based on the given parameters.\n", - "\n", - " This function computes a series sum using the formula:\n", - "i*param1 + param2) - 1/(i*param1 - param2)) for i from 1 to iterations.\n", - "\n", - " Args:\n", - " iterations to perform. Must be positive.\n", - "float): The first parameter used in the calculation.\n", - "(float): The second parameter used in the calculation.\n", - "\n", - " Returns:\n", - ". float: The result of the series sum calculation\n", - "\n", - " Raises:\n", - ". ValueError: If iterations is not positive\n", - "is 0 or if param2 is equal to param1.\n", - "\n", - " Example:\n", - " = calculate(1000, 4, 1)\n", - ">>> print(f\"{result:.6f}\")\n", - ".392699 0\n", - "\n", - " Note:\n", - " The function may be computationally expensive for large numbers of iterations.\n", - ", floating-point precision limitations may affect accuracy.\n", - " \"\"\"\n", - " if iterations <= 0:\n", - " must be a positive integer\")rations\n", - "\n", - " result = 1.0\n", - " for i in range(1, iterations + 1):\n", - " the seriesalculate the denominators for both terms in\n", - "1 - param2 = i * param\n", - " param1 + param2\n", - "\n", - "d division by zero\n", - " 0 or j2 == 0:==\n", - " calculation\")ise ZeroDivisionError(\"Division by zero in\n", - "\n", - "d add the second terme first term an\n", - " result -= (1 / j1)\n", - " result += (1 / j2)\n", - "\n", - " return result\n", - "\n", - "# Measure execution time\n", - "()art_time = time.time\n", - "\n", - "# Perform calculation with 100 million iterations\n", - " The result is multiplied by 4 as per the original code\n", - "000, 4, 1) * 4late(100_000_\n", - "\n", - "d_time = time.time()\n", - "\n", - " with high precision for the calculated value\n", - "Result: {result:.12f}\")\n", - "(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", - "```\n", - "\n", - " this documented version:\n", - "\n", - " been added to the `calculate` function, following Google style guidelines and including all the requested elements.\n", - "\n", - " hints have been added to the function signature for better clarity and to support static type checking.\n", - "\n", - "d to explain the key steps in the calculation process.\n", - "\n", - " check for positive iterations has been added to prevent invalid input.\n", - "\n", - " been added to handle potential errors.\n", - "\n", - " Comments have been added to the main script to explain the purpose of each step.\n", - "\n", - " documentation provides a clear understanding of the function's purpose, its parameters, return value, potential exceptions, and includes an example of usage. It also notes potential limitations regarding computational cost and floating-point precision for very large numbers of iterations." - ] - } - ], + "outputs": [], "source": [ "stream_comment_claude(pi)" ] }, { "cell_type": "code", - "execution_count": 63, + "execution_count": null, "id": "0565e33b-9f14-48b7-ae8d-d22dc03b93c9", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Here's a comprehensive set of unit tests for the given Python code using pytest:\n", - "\n", - "```python\n", - "import pytest\n", - "import time\n", - " import isclose\n", - "from unittest.mock import patch\n", - "\n", - "# Import the function to be tested\n", - "# Assuming the code is in a file named your_module.py\n", - "\n", - "# Test data generator\n", - "_data():rate_test\n", - " return [\n", - ", 2, 1, 0.6931471805),\n", - " 3, 2, 0.6931471806),\n", - ", 3, 0.6931471806),\n", - ", 1, 0.6931471806),\n", - " ]\n", - "\n", - " datature for common test\n", - "@pytest.fixture\n", - "def common_data():\n", - "return {\n", - " 'iterations': 100,\n", - " 'param1': 4,\n", - " 'param2': 1\n", - " }\n", - "\n", - "# Normal case tests\n", - "rize(\"iterations, param1, param2, expected\", generate_test_data())\n", - "cases(iterations, param1, param2, expected):\n", - "1, param2) = calculate(iterations, param\n", - "(result, expected, rel_tol=1e-9), f\"Expected {expected}, but got {result}\"\n", - "\n", - " cases and boundary values\n", - "_cases():calculate_edge\n", - "d inputsst with minimum vali\n", - " 0) == 2.0 calculate(1, 1,\n", - " \n", - " # Test with very large iterations\n", - "_result = calculate(10**8, 4, 1)\n", - ", 0.6931471806, rel_tol=1e-9)lt\n", - "\n", - "# Invalid inputs and error conditions\n", - "def test_calculate_invalid_inputs():\n", - " with pytest.raises(ValueError):\n", - "0, 4, 1) # iterations should be positive\n", - " \n", - "(ZeroDivisionError):es\n", - "10, 1, 1) # This will cause division by zero\n", - "\n", - "TypeError):test.raises(\n", - "1) # iterations should be an integer\n", - "\n", - "# Test with different combinations of parameters\n", - "rize(\"iterations, param1, param2\", [\n", - "), (100, 2, 2\n", - " (1000, 3, 3),\n", - "(10000, 5, 5),\n", - " (100000, 10, 10)\n", - "])\n", - " param1, param2):e_parameter_combinations(iterations,\n", - " calculate(iterations, param1, param2)\n", - " assert isinstance(result, float)\n", - " assert result > 0\n", - "\n", - " execution time\n", - "common_data):ulate_execution_time(\n", - " time.time()me =\n", - " calculate(**common_data)\n", - " end_time = time.time()\n", - " execution_time = end_time - start_time\n", - " f\"Execution took {execution_time} seconds, which is too long\"\n", - "\n", - " result precision\n", - "data):st_calculate_precision(common_\n", - "data)esult = calculate(**common_\n", - "split('.')[1]) >= 10, \"Result should have at least 10 decimal places\"\n", - "\n", - "# Test with mocked time function\n", - ".time')'time\n", - "(mock_time, common_data):ocked_time\n", - ", 0.5] # Simulate 0.5 seconds execution time\n", - "_time = time.time()\n", - " = calculate(**common_data)\n", - "d_time = time.time()\n", - " end_time - start_time == 0.5\n", - "\n", - "# Helper function to test monotonicity\n", - "_monotonic(lst):\n", - " <= lst[i+1] for i in range(len(lst)-1)) or all(lst[i] >= lst[i+1] for i in range(len(lst)-1))\n", - "\n", - " increasing iterationscity with\n", - "def test_calculate_monotonicity():\n", - " 1) for i in range(1, 6)]10**i, 4,\n", - "), \"Results should be monotonic with increasing iterations\"\n", - "\n", - " Test with very small and very large parameters\n", - ", param1, param2\", [rize(\"iterations\n", - "(100, 1e-5, 1e-5),\n", - ", 1e5)00, 1e5\n", - "])\n", - "_parameters(iterations, param1, param2):\n", - "1, param2) = calculate(iterations, param\n", - "result == float('inf') or result == float('-inf')), \"Result should not be infinity\"\n", - "assert not isclose(result, 0, abs_tol=1e-10), \"Result should not be too close to zero\"\n", - "\n", - "```\n", - "\n", - " for the `calculate` function:range of scenarios\n", - "\n", - " with different inputs and expected outputs.\n", - " and boundary values, including minimum valid inputs and very large iterations.\n", - " Invalid inputs and error conditions, testing for expected exceptions.\n", - " Different combinations of parameters to ensure the function works correctly for various inputs.\n", - " to ensure the function performs within acceptable time limits.\n", - " Precision test to verify the result has sufficient decimal places.\n", - " A test with mocked time function to simulate and verify execution time measurement.\n", - " if results are consistent with increasing iterations.\n", - " with extreme parameters (very small and very large) to ensure numerical stability.\n", - "\n", - "rization, fixtures, and markers. It also includes necessary imports, helper functions, and a test data generator.\n", - "\n", - "d `test_your_module.py` in the same directory as your original code file (`your_module.py`). Then run `pytest test_your_module.py` from the command line.\n", - "\n", - " pytest (`pip install pytest`) before running the tests." - ] - } - ], + "outputs": [], "source": [ "stream_unit_test_claude(pi)" ] }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "id": "f13b3a5b-366d-4b28-adda-977a313e6b4d", "metadata": {}, "outputs": [], @@ -741,7 +426,7 @@ }, { "cell_type": "code", - "execution_count": 67, + "execution_count": null, "id": "e2efdb92-fc7a-4952-ab46-ae942cb996bf", "metadata": {}, "outputs": [], @@ -762,125 +447,27 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "id": "0a756193-fcba-43da-a981-203c10d36488", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 41, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "stream_comment_model(code_qwen, CODE_QWEN_URL, pi)" ] }, { "cell_type": "code", - "execution_count": 70, + "execution_count": null, "id": "12ddcbf4-6286-47a8-847b-5be78e7aa995", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Here are the unit tests for the given Python code:\n", - "\n", - "```python\n", - "import pytest\n", - "import time\n", - " unittest.mock import patch\n", - "\n", - "def calculate(iterations, param1, param2):\n", - " result = 1.0\n", - " for i in range(1, iterations+1):\n", - " i * param1 - param2\n", - "result -= (1/j)\n", - " j = i * param1 + param2\n", - "result += (1/j)\n", - " return result\n", - "\n", - "@pytest.fixture\n", - " mock_time():\n", - "('time.time') as mock_time:\n", - "yield mock_time\n", - "\n", - "_calculate_normal_inputs(mock_time):\n", - "mock_time.return_value = 0\n", - "result = calculate(100_000_000, 4, 1) * 4\n", - "expected_result = 0.0\n", - " == expected_result\n", - "\n", - "_calculate_edge_cases(mock_time):\n", - " mock_time.return_value = 0\n", - " calculate(0, 4, 1) * 4\n", - " expected_result = 0.0\n", - " result == expected_result\n", - "\n", - " = calculate(100_000_000, 0, 1) * 4\n", - "expected_result = 0.0\n", - " result == expected_result\n", - "\n", - " = calculate(100_000_000, 4, 0) * 4\n", - "_result = 0.0\n", - " assert result == expected_result\n", - "\n", - "def test_calculate_invalid_inputs(mock_time):\n", - " mock_time.return_value = 0\n", - ".raises(TypeError):\n", - "calculate(100_000_000, 'a', 1) * 4\n", - "with pytest.raises(TypeError):\n", - "100_000_000, 4, 'b') * 4\n", - ".raises(TypeError):\n", - "calculate('a', 4, 1) * 4\n", - "test.raises(TypeError):\n", - "(100_000_000, 4, 1, 'c') * 4\n", - "\n", - "def test_calculate_different_combinations(mock_time):\n", - " mock_time.return_value = 0\n", - "result = calculate(100_000_000, 4, 1) * 4\n", - " expected_result = 0.0\n", - " result == expected_result\n", - "\n", - " = calculate(100_000_000, 4, -1) * 4\n", - "expected_result = 0.0\n", - " == expected_result\n", - "\n", - " calculate(100_000_000, -4, 1) * 4\n", - "result = 0.0_\n", - "assert result == expected_result\n", - "\n", - "result = calculate(100_000_000, -4, -1) * 4\n", - " expected_result = 0.0\n", - " result == expected_result\n", - "\n", - "def test_calculate_execution_time(mock_time):\n", - "_time.return_value = 0\n", - "_time = mock_time.return_value\n", - "calculate(100_000_000, 4, 1) * 4\n", - "end_time = mock_time.return_value\n", - " expected_execution_time = 0.0\n", - " assert (end_time - start_time) == expected_execution_time\n", - "```\n", - "\n", - " covers all the scenarios mentioned in the problem description. It tests the function with normal inputs, edge cases, invalid inputs, different combinations of parameters, and checks the execution time.<|im_end|>" - ] - } - ], + "outputs": [], "source": [ "stream_unit_test_model(code_qwen, CODE_QWEN_URL, pi)" ] }, { "cell_type": "code", - "execution_count": 46, + "execution_count": null, "id": "321609ee-b64a-44fc-9090-39f87e1f8e0e", "metadata": {}, "outputs": [], @@ -900,7 +487,7 @@ }, { "cell_type": "code", - "execution_count": 69, + "execution_count": null, "id": "d4c560c9-922d-4893-941f-42893373b1be", "metadata": {}, "outputs": [], @@ -920,7 +507,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": null, "id": "f85bc777-bebe-436b-88cc-b9ecdb6306c0", "metadata": {}, "outputs": [], @@ -933,298 +520,10 @@ }, { "cell_type": "code", - "execution_count": 74, + "execution_count": null, "id": "ee27cc91-81e6-42c8-ae3c-c04161229d8c", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7881\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 74, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Here are the unit tests for the given Python code:\n", - "\n", - "```python\n", - "import pytest\n", - "import time\n", - " unittest.mock import patch\n", - "\n", - "def calculate(iterations, param1, param2):\n", - " result = 1.0\n", - " for i in range(1, iterations+1):\n", - " i * param1 - param2\n", - " result -= (1/j)\n", - " i * param1 + param2\n", - "result += (1/j)\n", - " return result\n", - "\n", - "@pytest.fixture\n", - " mock_time():\n", - " with patch('time.time') as mock_time:\n", - "ield mock_time\n", - "\n", - "calculate_normal_inputs(mock_time):\n", - "time.return_value = 0\n", - " calculate(100_000_000, 4, 1) * 4\n", - "result = 0.0_\n", - "assert result == expected_result\n", - "\n", - " test_calculate_edge_cases(mock_time):\n", - "time.return_value = 0\n", - " calculate(0, 4, 1) * 4\n", - "_result = 0.0\n", - " assert result == expected_result\n", - "\n", - " result = calculate(100_000_000, 0, 1) * 4\n", - "result = 0.0_\n", - "assert result == expected_result\n", - "\n", - "result = calculate(100_000_000, 4, 0) * 4\n", - " expected_result = 0.0\n", - "assert result == expected_result\n", - "\n", - " test_calculate_invalid_inputs(mock_time):\n", - "_time.return_value = 0\n", - "test.raises(TypeError):\n", - " calculate(100_000_000, 'a', 1) * 4\n", - "with pytest.raises(TypeError):\n", - "ulate(100_000_000, 4, 'b') * 4\n", - " pytest.raises(TypeError):\n", - "ulate('a', 4, 1) * 4\n", - "test.raises(TypeError):\n", - " calculate(100_000_000, 4, 1, 'c') * 4\n", - "\n", - "_calculate_different_combinations(mock_time):\n", - " mock_time.return_value = 0\n", - " result = calculate(100_000_000, 4, 1) * 4\n", - " expected_result = 0.0\n", - " == expected_result\n", - "\n", - " calculate(100_000_000, 4, -1) * 4\n", - "_result = 0.0\n", - " assert result == expected_result\n", - "\n", - " result = calculate(100_000_000, -4, 1) * 4\n", - " expected_result = 0.0\n", - " result == expected_result\n", - "\n", - " calculate(100_000_000, -4, -1) * 4\n", - "_result = 0.0\n", - " assert result == expected_result\n", - "\n", - "def test_calculate_execution_time(mock_time):\n", - "mock_time.return_value = 0\n", - "start_time = mock_time.return_value\n", - " calculate(100_000_000, 4, 1) * 4\n", - " end_time = mock_time.return_value\n", - " expected_execution_time = 0.0\n", - " assert (end_time - start_time) == expected_execution_time\n", - "```\n", - "\n", - " covers all the scenarios mentioned in the problem description. It tests the function with normal inputs, edge cases, invalid inputs, different combinations of parameters, and checks the execution time.<|im_end|>" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Traceback (most recent call last):\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\queueing.py\", line 625, in process_events\n", - " response = await route_utils.call_process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\route_utils.py\", line 322, in call_process_api\n", - " output = await app.get_blocks().process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 2047, in process_api\n", - " result = await self.call_function(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 1606, in call_function\n", - " prediction = await utils.async_iteration(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 714, in async_iteration\n", - " return await anext(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 708, in __anext__\n", - " return await anyio.to_thread.run_sync(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", - " return await get_async_backend().run_sync_in_worker_thread(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2505, in run_sync_in_worker_thread\n", - " return await future\n", - " ^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 1005, in run\n", - " result = context.run(func, *args)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 691, in run_sync_iterator_async\n", - " return next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 852, in gen_wrapper\n", - " response = next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\AppData\\Local\\Temp\\ipykernel_27660\\2822054561.py\", line 10, in get_unit_test\n", - " for stream_so_far in result:\n", - "TypeError: 'NoneType' object is not iterable\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Here are the unit tests for the given Python code:\n", - "\n", - "```python\n", - "import pytest\n", - "import time\n", - "est.mock import patch\n", - "\n", - "(iterations, param1, param2):\n", - "result = 1.0\n", - "for i in range(1, iterations+1):\n", - "j = i * param1 - param2\n", - " -= (1/j)esult\n", - "j = i * param1 + param2\n", - " += (1/j)esult\n", - "return result\n", - "\n", - "pytest.fixture\n", - "_time():\n", - " with patch('time.time') as mock_time:\n", - "ield mock_time\n", - "\n", - "calculate_normal_inputs(mock_time):\n", - "time.return_value = 0\n", - " calculate(100_000_000, 4, 1) * 4\n", - "_result = 0.0\n", - " assert result == expected_result\n", - "\n", - "def test_calculate_edge_cases(mock_time):\n", - "mock_time.return_value = 0\n", - "result = calculate(0, 4, 1) * 4\n", - " expected_result = 0.0\n", - " result == expected_result\n", - "\n", - " = calculate(100_000_000, 0, 1) * 4\n", - "_result = 0.0\n", - "assert result == expected_result\n", - "\n", - "result = calculate(100_000_000, 4, 0) * 4\n", - " expected_result = 0.0\n", - " result == expected_result\n", - "\n", - "_calculate_invalid_inputs(mock_time):\n", - "time.return_value = 0\n", - " with pytest.raises(TypeError):\n", - "(100_000_000, 'a', 1) * 4\n", - "ises(TypeError):ra\n", - "ulate(100_000_000, 4, 'b') * 4\n", - " pytest.raises(TypeError):\n", - "ulate('a', 4, 1) * 4\n", - " pytest.raises(TypeError):\n", - "ulate(100_000_000, 4, 1, 'c') * 4\n", - "\n", - "calculate_different_combinations(mock_time):\n", - " mock_time.return_value = 0\n", - " result = calculate(100_000_000, 4, 1) * 4\n", - " = 0.0pected_result\n", - " result == expected_result\n", - "\n", - " = calculate(100_000_000, 4, -1) * 4\n", - "expected_result = 0.0\n", - " expected_resultt ==\n", - "\n", - " result = calculate(100_000_000, -4, 1) * 4\n", - "result = 0.0_\n", - " == expected_result\n", - "\n", - " calculate(100_000_000, -4, -1) * 4\n", - " = 0.0pected_result\n", - " result == expected_result\n", - "\n", - "def test_calculate_execution_time(mock_time):\n", - "_time.return_value = 0\n", - "_time = mock_time.return_value\n", - "100_000_000, 4, 1) * 4\n", - " end_time = mock_time.return_value\n", - "_execution_time = 0.0\n", - " (end_time - start_time) == expected_execution_time\n", - "``\n", - "\n", - " suite covers all the scenarios mentioned in the problem description. It tests the function with normal inputs, edge cases, invalid inputs, different combinations of parameters, and checks the execution time.<|im_end|>" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Traceback (most recent call last):\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\queueing.py\", line 625, in process_events\n", - " response = await route_utils.call_process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\route_utils.py\", line 322, in call_process_api\n", - " output = await app.get_blocks().process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 2047, in process_api\n", - " result = await self.call_function(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 1606, in call_function\n", - " prediction = await utils.async_iteration(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 714, in async_iteration\n", - " return await anext(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 708, in __anext__\n", - " return await anyio.to_thread.run_sync(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", - " return await get_async_backend().run_sync_in_worker_thread(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2505, in run_sync_in_worker_thread\n", - " return await future\n", - " ^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 1005, in run\n", - " result = context.run(func, *args)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 691, in run_sync_iterator_async\n", - " return next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\.conda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 852, in gen_wrapper\n", - " response = next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"C:\\Users\\ebaba\\AppData\\Local\\Temp\\ipykernel_27660\\2822054561.py\", line 10, in get_unit_test\n", - " for stream_so_far in result:\n", - "TypeError: 'NoneType' object is not iterable\n" - ] - } - ], + "outputs": [], "source": [ "with gr.Blocks(css=css) as ui:\n", " gr.Markdown(\"## Convert code from Python to C++\")\n", @@ -1240,7 +539,7 @@ " \n", " comment_button.click(comment_code, inputs=[python, model], outputs=[result])\n", " unit_test_button.click(get_unit_test, inputs=[python, model], outputs=[result])\n", - "ui.launch(inbrowser=True)" + "ui.launch(inbrowser=False)" ] }, { From c2e9f8a88d9714deec4697493478a4fa4ebe49f7 Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 19 Jan 2025 20:29:30 +0100 Subject: [PATCH 08/16] update prompt --- .../day5-homework.ipynb | 26 ++++++------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/week4/community-contributions/day5-homework.ipynb b/week4/community-contributions/day5-homework.ipynb index c34be7b..7503266 100644 --- a/week4/community-contributions/day5-homework.ipynb +++ b/week4/community-contributions/day5-homework.ipynb @@ -1,22 +1,11 @@ { "cells": [ { - "cell_type": "code", - "execution_count": 1, - "id": "6d67dba5-38ec-459a-9132-4a56c6a814cd", + "cell_type": "markdown", + "id": "ff022957-2e81-4ea9-84d3-e52d5753e133", "metadata": {}, - "outputs": [ - { - "ename": "SyntaxError", - "evalue": "invalid syntax (2447672335.py, line 1)", - "output_type": "error", - "traceback": [ - "\u001b[1;36m Cell \u001b[1;32mIn[1], line 1\u001b[1;36m\u001b[0m\n\u001b[1;33m Comment and Unit Test Generater\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n" - ] - } - ], "source": [ - "Comment and Unit Test Generater \n", + "### Comment and Unit Test Generater \n", "\n", "The requirement: \n", "* use an LLM to generate docstring and comments for Python code\n", @@ -164,7 +153,6 @@ " - All parameters with types and descriptions\n", " - Return values with types\n", " - Exceptions that may be raised\n", - " - At least one usage example\n", " - Any important notes or limitations\n", " \n", " 2. Strategic inline comments for:\n", @@ -415,7 +403,7 @@ " messages = messages_for_comment(code)\n", " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", " client = InferenceClient(model_url, token=hf_token)\n", - " stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", + " stream = client.text_generation(text, stream=True, details=True, max_new_tokens=5000)\n", " result = \"\"\n", " for r in stream:\n", " #print(r.token.text, end = \"\")\n", @@ -522,7 +510,9 @@ "cell_type": "code", "execution_count": null, "id": "ee27cc91-81e6-42c8-ae3c-c04161229d8c", - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ "with gr.Blocks(css=css) as ui:\n", @@ -539,7 +529,7 @@ " \n", " comment_button.click(comment_code, inputs=[python, model], outputs=[result])\n", " unit_test_button.click(get_unit_test, inputs=[python, model], outputs=[result])\n", - "ui.launch(inbrowser=False)" + "ui.launch(inbrowser=True)" ] }, { From 21874c68e5ce94e0be0d34d162f1151f89f8ffbd Mon Sep 17 00:00:00 2001 From: Edward Donner Date: Sun, 19 Jan 2025 22:35:01 -0500 Subject: [PATCH 09/16] Fixes with much thanks to student Wenjie T! --- extras/trading/prototype_trader.ipynb | 2 +- week1/troubleshooting.ipynb | 8 ++++++++ week6/day4.ipynb | 2 +- week8/day5.ipynb | 3 ++- week8/memory.json | 18 ------------------ week8/price_is_right_final.py | 1 + 6 files changed, 13 insertions(+), 21 deletions(-) diff --git a/extras/trading/prototype_trader.ipynb b/extras/trading/prototype_trader.ipynb index 8ec9d06..30358b9 100644 --- a/extras/trading/prototype_trader.ipynb +++ b/extras/trading/prototype_trader.ipynb @@ -346,7 +346,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.11.11" } }, "nbformat": 4, diff --git a/week1/troubleshooting.ipynb b/week1/troubleshooting.ipynb index 3e31359..c05a5a0 100644 --- a/week1/troubleshooting.ipynb +++ b/week1/troubleshooting.ipynb @@ -405,6 +405,14 @@ "from diagnostics import Diagnostics\n", "Diagnostics().run()" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1955b9a-d344-4782-b448-2770d0edd90c", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/week6/day4.ipynb b/week6/day4.ipynb index 964092d..d1c5500 100644 --- a/week6/day4.ipynb +++ b/week6/day4.ipynb @@ -398,7 +398,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.11.11" } }, "nbformat": 4, diff --git a/week8/day5.ipynb b/week8/day5.ipynb index 625232e..a1d8df2 100644 --- a/week8/day5.ipynb +++ b/week8/day5.ipynb @@ -88,6 +88,7 @@ "outputs": [], "source": [ "agent_framework = DealAgentFramework()\n", + "agent_framework.init_agents_as_needed()\n", "\n", "with gr.Blocks(title=\"The Price is Right\", fill_width=True) as ui:\n", "\n", @@ -176,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "096397f9-1215-4814-ab4b-e32002ff4ceb", + "id": "f9dd0a27-7d46-4c9e-bbe4-a61c9c899c99", "metadata": {}, "outputs": [], "source": [] diff --git a/week8/memory.json b/week8/memory.json index 8705760..2fb4bd1 100644 --- a/week8/memory.json +++ b/week8/memory.json @@ -16,23 +16,5 @@ }, "estimate": 930.8824204895075, "discount": 225.88242048950747 - }, - { - "deal": { - "product_description": "The Insignia Class F30 Series NS-55F301NA25 is a 55\" 4K HDR UHD Smart TV with a native resolution of 3840x2160. Featuring HDR support, it enhances color and contrast for a more dynamic viewing experience. The TV integrates seamlessly with Amazon Fire TV, working with both Amazon Alexa and Google Home for voice control. It offers three HDMI ports for multiple device connections, making it a perfect entertainment hub for your living space.", - "price": 200.0, - "url": "https://www.dealnews.com/products/Insignia/Insignia-Class-F30-Series-NS-55-F301-NA25-55-4-K-HDR-LED-UHD-Smart-TV/467523.html?iref=rss-f1912" - }, - "estimate": 669.1921927283588, - "discount": 469.1921927283588 - }, - { - "deal": { - "product_description": "The Samsung 27-Cu. Ft. Mega Capacity 3-Door French Door Counter Depth Refrigerator combines style with spacious organization. This model features a dual auto ice maker, which ensures you always have ice on hand, and adjustable shelves that provide versatile storage options for your groceries. Designed with a sleek, fingerprint resistant finish, it not only looks modern but also simplifies cleaning. With its generous capacity, this refrigerator is perfect for large households or those who love to entertain.", - "price": 1299.0, - "url": "https://www.dealnews.com/products/Samsung/Samsung-27-Cu-Ft-Mega-Capacity-3-Door-French-Door-Counter-Depth-Refrigerator/454702.html?iref=rss-c196" - }, - "estimate": 2081.647127763905, - "discount": 782.6471277639048 } ] \ No newline at end of file diff --git a/week8/price_is_right_final.py b/week8/price_is_right_final.py index cf80856..54c4997 100644 --- a/week8/price_is_right_final.py +++ b/week8/price_is_right_final.py @@ -45,6 +45,7 @@ class App: def get_agent_framework(self): if not self.agent_framework: self.agent_framework = DealAgentFramework() + self.agent_framework.init_agents_as_needed() return self.agent_framework def run(self): From 85a3a1a5fc2da8d54b48ae1536c9fe5106da641d Mon Sep 17 00:00:00 2001 From: Elena Shirokova Date: Mon, 20 Jan 2025 10:30:19 +0100 Subject: [PATCH 10/16] fix the the output from executing tests --- .../unit-tests-generator.ipynb | 60 +++++++++++-------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/week4/community-contributions/unit-tests-generator.ipynb b/week4/community-contributions/unit-tests-generator.ipynb index 4825544..4aaf7d7 100644 --- a/week4/community-contributions/unit-tests-generator.ipynb +++ b/week4/community-contributions/unit-tests-generator.ipynb @@ -11,16 +11,16 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ - "!pipenv install pytest pytest-cov" + "#!pipenv install pytest pytest-cov" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -63,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -81,7 +81,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -102,10 +102,8 @@ "def execute_coverage_report(python_interpreter=sys.executable):\n", " if not python_interpreter:\n", " raise EnvironmentError(\"Python interpreter not found in the specified virtual environment.\")\n", - " # test_code_path = Path(\"tests\")\n", - " # command = [\"pytest\", \"-cov\",\"--capture=no\"]\n", + " \n", " command = [\"coverage\", \"run\", \"-m\", \"pytest\"]\n", - " # command =[\"pytest\", \"--cov=your_package\", \"--cov-report=term-missing\"]\n", "\n", " try:\n", " result = subprocess.run(command, check=True, capture_output=True, text=True)\n", @@ -117,15 +115,7 @@ " print(\"Output:\\n\", e.stdout)\n", " print(\"Errors:\\n\", e.stderr)\n", " # Extracting failed test information\n", - " failed_tests = []\n", - " for line in e.stdout.splitlines():\n", - " if \"FAILED\" in line and \"::\" in line:\n", - " failed_tests.append(line.strip())\n", - " if failed_tests:\n", - " print(\"Failed Tests:\")\n", - " for test in failed_tests:\n", - " print(test)\n", - " return failed_tests\n", + " return e.stdout\n", "\n", "def save_unit_tests(code):\n", "\n", @@ -179,7 +169,8 @@ " print(\"Failed Tests:\")\n", " for test in failed_tests:\n", " print(test)\n", - " return e.stderr\n", + " \n", + " return e.stdout\n", " " ] }, @@ -192,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -201,15 +192,18 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "def get_user_prompt(code):\n", "\n", " user_prompt = \"Write for a python code the unit test cases.\"\n", - " user_prompt += \"Return unit tests cases using pytest library, do not create any custom imports; do not explain your work other than a few comments.\"\n", - " user_prompt += \"Do not insert the function to be tested in the output before the tests. Validate both the case where the function is executed successfully and where it is expected to fail.\"\n", + " user_prompt += \"Return readable unit tests cases using pytest library, do not create any custom imports, don't forget to import errors if needed; do not explain your work other than a few comments.\"\n", + " user_prompt += \"The tests should include normal inputs, the inputs where the code is expected to fail, edge case and error handling.\"\n", + " user_prompt += \"Do not insert the function to be tested in the output before the tests.\"\n", + " \n", + "\n", " user_prompt += code\n", "\n", " return user_prompt" @@ -217,7 +211,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -298,7 +292,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -326,7 +320,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -349,7 +343,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -406,6 +400,20 @@ "\n", "ui.launch(inbrowser=True)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From 743891d960a9924dcc6a71d729c5180fd489c5bf Mon Sep 17 00:00:00 2001 From: Barry Northern Date: Mon, 20 Jan 2025 23:45:02 +0000 Subject: [PATCH 11/16] barry_northern: using Gemini SDK instead of Claude + use Gemini's fit-for-purpose chat function --- .../day1-conversation-with-gemini.ipynb | 616 ++++++++++++++++++ 1 file changed, 616 insertions(+) create mode 100644 week2/community-contributions/day1-conversation-with-gemini.ipynb diff --git a/week2/community-contributions/day1-conversation-with-gemini.ipynb b/week2/community-contributions/day1-conversation-with-gemini.ipynb new file mode 100644 index 0000000..26e5c62 --- /dev/null +++ b/week2/community-contributions/day1-conversation-with-gemini.ipynb @@ -0,0 +1,616 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", + "metadata": {}, + "source": [ + "# Welcome to Week 2!\n", + "\n", + "## Frontier Model APIs\n", + "\n", + "In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", + "\n", + "Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." + ] + }, + { + "cell_type": "markdown", + "id": "2b268b6e-0ba4-461e-af86-74a41f4d681f", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Important Note - Please read me

\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.
\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!

\n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:
\n", + " conda env update --f environment.yml
\n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):
\n", + " pip install -r requirements.txt\n", + "
Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + "
\n", + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Reminder about the resources page

\n", + " Here's a link to resources for the course. This includes links to all the slides.
\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/
\n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "85cfe275-4705-4d30-abea-643fbddf1db0", + "metadata": {}, + "source": [ + "## Setting up your keys\n", + "\n", + "If you haven't done so already, you could now create API keys for Anthropic and Google in addition to OpenAI.\n", + "\n", + "**Please note:** if you'd prefer to avoid extra API costs, feel free to skip setting up Anthopic and Google! You can see me do it, and focus on OpenAI for the course. You could also substitute Anthropic and/or Google for Ollama, using the exercise you did in week 1.\n", + "\n", + "For OpenAI, visit https://openai.com/api/ \n", + "For Anthropic, visit https://console.anthropic.com/ \n", + "For Google, visit https://ai.google.dev/gemini-api \n", + "\n", + "When you get your API keys, you need to set them as environment variables by adding them to your `.env` file.\n", + "\n", + "```\n", + "OPENAI_API_KEY=xxxx\n", + "ANTHROPIC_API_KEY=xxxx\n", + "GOOGLE_API_KEY=xxxx\n", + "```\n", + "\n", + "Afterwards, you may need to restart the Jupyter Lab Kernel (the Python process that sits behind this notebook) via the Kernel menu, and then rerun the cells from the top." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", + "metadata": {}, + "outputs": [], + "source": [ + "# import for google\n", + "# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n", + "# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n", + "\n", + "import google.generativeai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv()\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "425ed580-808d-429b-85b0-6cba50ca1d0c", + "metadata": {}, + "outputs": [], + "source": [ + "# This is the set up code for Gemini\n", + "# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", + "\n", + "google.generativeai.configure()" + ] + }, + { + "cell_type": "markdown", + "id": "42f77b59-2fb1-462a-b90d-78994e4cef33", + "metadata": {}, + "source": [ + "## Asking LLMs to tell a joke\n", + "\n", + "It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n", + "Later we will be putting LLMs to better use!\n", + "\n", + "### What information is included in the API\n", + "\n", + "Typically we'll pass to the API:\n", + "- The name of the model that should be used\n", + "- A system message that gives overall context for the role the LLM is playing\n", + "- A user message that provides the actual prompt\n", + "\n", + "There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "378a0296-59a2-45c6-82eb-941344d3eeff", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that is great at telling jokes\"\n", + "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", + "metadata": {}, + "outputs": [], + "source": [ + "prompts = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", + "metadata": {}, + "outputs": [], + "source": [ + "# GPT-3.5-Turbo\n", + "\n", + "completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", + "metadata": {}, + "outputs": [], + "source": [ + "# GPT-4o-mini\n", + "# Temperature setting controls creativity\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o-mini',\n", + " messages=prompts,\n", + " temperature=0.7\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", + "metadata": {}, + "outputs": [], + "source": [ + "# GPT-4o\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.4\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", + "metadata": {}, + "outputs": [], + "source": [ + "# Claude 3.5 Sonnet\n", + "# API needs system message provided separately from user prompt\n", + "# Also adding max_tokens\n", + "\n", + "message = claude.messages.create(\n", + " model=\"claude-3-5-sonnet-20241022\",\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "print(message.content[0].text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", + "metadata": {}, + "outputs": [], + "source": [ + "# Claude 3.5 Sonnet again\n", + "# Now let's add in streaming back results\n", + "\n", + "result = claude.messages.stream(\n", + " model=\"claude-3-5-sonnet-20241022\",\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "with result as stream:\n", + " for text in stream.text_stream:\n", + " print(text, end=\"\", flush=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", + "metadata": {}, + "outputs": [], + "source": [ + "# The API for Gemini has a slightly different structure.\n", + "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", + "# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", + "\n", + "gemini_client = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=system_message\n", + ")\n", + "response = gemini_client.generate_content(user_prompt)\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49009a30-037d-41c8-b874-127f61c4aa3a", + "metadata": {}, + "outputs": [], + "source": [ + "# As an alternative way to use Gemini that bypasses Google's python API library,\n", + "# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", + "\n", + "gemini_via_openai_client = OpenAI(\n", + " api_key=google_api_key, \n", + " base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + ")\n", + "\n", + "response = gemini_via_openai_client.chat.completions.create(\n", + " model=\"gemini-1.5-flash\",\n", + " messages=prompts\n", + ")\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83ddb483-4f57-4668-aeea-2aade3a9e573", + "metadata": {}, + "outputs": [], + "source": [ + "# To be serious! GPT-4o-mini with the original question\n", + "\n", + "prompts = [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant that responds in Markdown\"},\n", + " {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution? Please respond in Markdown.\"}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "749f50ab-8ccd-4502-a521-895c3f0808a2", + "metadata": {}, + "outputs": [], + "source": [ + "# Have it stream back results in markdown\n", + "\n", + "stream = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.7,\n", + " stream=True\n", + ")\n", + "\n", + "reply = \"\"\n", + "display_handle = display(Markdown(\"\"), display_id=True)\n", + "for chunk in stream:\n", + " reply += chunk.choices[0].delta.content or ''\n", + " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", + " update_display(Markdown(reply), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "markdown", + "id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", + "metadata": {}, + "source": [ + "## And now for some fun - an adversarial conversation between Chatbots..\n", + "\n", + "You're already familar with prompts being organized into lists like:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"user prompt here\"}\n", + "]\n", + "```\n", + "\n", + "In fact this structure can be used to reflect a longer conversation history:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "And we can use this approach to engage in a longer interaction with history." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and gemini-1.5-flash\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "gemini_model = \"gemini-1.5-flash\"\n", + "\n", + "gpt_system = \"You are a chatbot who is very argumentative; \\\n", + "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", + "\n", + "gemini_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", + "everything the other person says, or find common ground. If the other person is argumentative, \\\n", + "you try to calm them down and keep chatting.\"\n", + "\n", + "gpt_messages = [\"Hi there\"]\n", + "gemini_messages = [\"Hi\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude in zip(gpt_messages, claude_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": claude})\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", + "metadata": {}, + "outputs": [], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "302586ca-645d-41f1-9738-efd8e7581bcf", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gemini():\n", + " client = google.generativeai.GenerativeModel(\n", + " model_name=gemini_model,\n", + " system_instruction=gemini_system\n", + " )\n", + " messages = []\n", + " for gpt, gemini in zip(gpt_messages, gemini_messages):\n", + " messages.append({\"role\": \"user\", \"parts\": gpt})\n", + " messages.append({\"role\": \"model\", \"parts\": gemini})\n", + " last_message = messages.pop() \n", + " chat = client.start_chat(\n", + " history=messages\n", + " )\n", + " response = chat.send_message(last_message[\"parts\"])\n", + " return response.text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e322e1e-9a99-4488-a3bf-6d5562163553", + "metadata": {}, + "outputs": [], + "source": [ + "call_gemini()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [\"Hi there\"]\n", + "gemini_messages = [\"Hi\"]\n", + "\n", + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " gemini_next = call_gemini()\n", + " print(f\"Gemini:\\n{gemini_next}\\n\")\n", + " gemini_messages.append(gemini_next)" + ] + }, + { + "cell_type": "markdown", + "id": "1d10e705-db48-4290-9dc8-9efdb4e31323", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Before you continue

\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how the messages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?
\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac", + "metadata": {}, + "source": [ + "# More advanced exercises\n", + "\n", + "Try creating a 3-way, perhaps bringing Claude into the conversation!\n", + "\n", + "Try doing this yourself before you look at the solutions.\n", + "\n", + "## Additional exercise\n", + "\n", + "You could also try replacing one of the models with an open source model running with Ollama." + ] + }, + { + "cell_type": "markdown", + "id": "446c81e3-b67e-4cd9-8113-bc3092b93063", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business relevance

\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c23224f6-7008-44ed-a57f-718975f4e291", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 2e196555340495422b3d05bd7b38ea45e1d169fb Mon Sep 17 00:00:00 2001 From: Elena Shirokova Date: Tue, 21 Jan 2025 10:33:17 +0100 Subject: [PATCH 12/16] enhance the prompt for unit testing --- .../unit-tests-generator.ipynb | 40 ++++++++++++------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/week4/community-contributions/unit-tests-generator.ipynb b/week4/community-contributions/unit-tests-generator.ipynb index 4aaf7d7..9ff116a 100644 --- a/week4/community-contributions/unit-tests-generator.ipynb +++ b/week4/community-contributions/unit-tests-generator.ipynb @@ -198,12 +198,28 @@ "source": [ "def get_user_prompt(code):\n", "\n", - " user_prompt = \"Write for a python code the unit test cases.\"\n", - " user_prompt += \"Return readable unit tests cases using pytest library, do not create any custom imports, don't forget to import errors if needed; do not explain your work other than a few comments.\"\n", - " user_prompt += \"The tests should include normal inputs, the inputs where the code is expected to fail, edge case and error handling.\"\n", - " user_prompt += \"Do not insert the function to be tested in the output before the tests.\"\n", - " \n", + " user_prompt = \"\"\"Test include:\n", + "\n", + " - Valid inputs with expected results.\n", + " - Inputs that test the boundaries or limits of the function's behavior.\n", + " - Invalid inputs or scenarios where the function is expected to raise exceptions.\n", + "\n", + " Structure:\n", + "\n", + " - Begin with all necessary imports. \n", + " - Do not create custom imports. \n", + " - Do not insert in the response the function for the tests.\n", + " - Ensure proper error handling for tests that expect exceptions.\n", + " - Clearly name the test functions to indicate their purpose (e.g., test_function_name).\n", + "\n", + " Example Structure:\n", + "\n", + " - Use pytest.raises to validate exceptions.\n", + " - Use assertions to verify correct outputs for successful and edge cases.\n", + "\n", + " Documentation:\n", "\n", + " - Add docstrings explaining what each test verifies.\"\"\"\n", " user_prompt += code\n", "\n", " return user_prompt" @@ -298,6 +314,8 @@ "source": [ "function_to_test = \"\"\"\n", " def lengthOfLongestSubstring(s):\n", + " if not isinstance(s, str):\n", + " raise TypeError(\"Input must be a string\")\n", " max_length = 0\n", " substring = \"\"\n", " start_idx = 0\n", @@ -343,7 +361,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -398,16 +416,10 @@ " save_test_run.click(save_unit_tests, inputs=[unit_tests_out])\n", "\n", "\n", - "ui.launch(inbrowser=True)" + "ui.launch(inbrowser=True)\n", + "# ui.launch()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "code", "execution_count": null, From 651e8f6eb06abbfbd513d1966a8f79fbc9fdf05c Mon Sep 17 00:00:00 2001 From: Elena Shirokova Date: Tue, 21 Jan 2025 10:36:25 +0100 Subject: [PATCH 13/16] add the notebook description --- .../unit-tests-generator.ipynb | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/week4/community-contributions/unit-tests-generator.ipynb b/week4/community-contributions/unit-tests-generator.ipynb index 9ff116a..4076149 100644 --- a/week4/community-contributions/unit-tests-generator.ipynb +++ b/week4/community-contributions/unit-tests-generator.ipynb @@ -18,6 +18,23 @@ "#!pipenv install pytest pytest-cov" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Current flow:\n", + "\n", + "1. For a python code it generates the unit tests using `pytest` library. The dashboard supports tests execution along with a coverage report. If the unit tests are fine, there is an option to save them for future use. It can happen, especially with Ollama , the tests are having a typing error. In this case the code can be edited in the right window and executed afterwards. \n", + "\n", + "2. Supports 3 models: \n", + "\n", + "- gpt-4o-mini\n", + "- claude-3-5-sonnet-20240620\n", + "- llama3.2\n", + "\n", + "It is recommended though to use other models except Ollama, my tests showed the code returned from ollama required more supervision and editing. Some generated unit tests from ollama don't provide full coverage, but still it is a good starting point for building such a tool." + ] + }, { "cell_type": "code", "execution_count": 2, @@ -419,13 +436,6 @@ "ui.launch(inbrowser=True)\n", "# ui.launch()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From 4087552ccd3d2a1baec69738f62dee0b7a63bda5 Mon Sep 17 00:00:00 2001 From: jasjyotsinghjaswal Date: Tue, 21 Jan 2025 10:36:29 -0400 Subject: [PATCH 14/16] Rename oh_sheet_its_spark!!!!.ipynb to oh_sheet_its_spark.ipynb --- .../{oh_sheet_its_spark!!!!.ipynb => oh_sheet_its_spark.ipynb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename week2/community-contributions/{oh_sheet_its_spark!!!!.ipynb => oh_sheet_its_spark.ipynb} (100%) diff --git a/week2/community-contributions/oh_sheet_its_spark!!!!.ipynb b/week2/community-contributions/oh_sheet_its_spark.ipynb similarity index 100% rename from week2/community-contributions/oh_sheet_its_spark!!!!.ipynb rename to week2/community-contributions/oh_sheet_its_spark.ipynb From d71a10e2acd43f9e5a4d7a8c5943575a24140732 Mon Sep 17 00:00:00 2001 From: Thomas Butman Date: Tue, 21 Jan 2025 14:39:18 +0000 Subject: [PATCH 15/16] fix typo in day3.ipynb --- week2/day3.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/week2/day3.ipynb b/week2/day3.ipynb index 28e6896..3d3b8f8 100644 --- a/week2/day3.ipynb +++ b/week2/day3.ipynb @@ -184,7 +184,7 @@ "system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n", "the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n", "For example, if the customer says 'I'm looking to buy a hat', \\\n", - "you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales evemt.'\\\n", + "you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales event.'\\\n", "Encourage the customer to buy hats if they are unsure what to get.\"" ] }, From ba7851afd43df8b0d41208067161523eb10b9576 Mon Sep 17 00:00:00 2001 From: Junaid Date: Thu, 23 Jan 2025 03:51:36 +0530 Subject: [PATCH 16/16] Added my day1 contribution to community-contributions --- .../day1_analyze_CV_Write_cover_letter.ipynb | 356 ++++++++++++++++++ 1 file changed, 356 insertions(+) create mode 100644 week1/community-contributions/day1_analyze_CV_Write_cover_letter.ipynb diff --git a/week1/community-contributions/day1_analyze_CV_Write_cover_letter.ipynb b/week1/community-contributions/day1_analyze_CV_Write_cover_letter.ipynb new file mode 100644 index 0000000..242ea3c --- /dev/null +++ b/week1/community-contributions/day1_analyze_CV_Write_cover_letter.ipynb @@ -0,0 +1,356 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "31d3c4a4-5442-4074-b812-42d60e0a0c04", + "metadata": {}, + "outputs": [], + "source": [ + "#In this example we will fetch the job description by pasting the URL,then we upload CV. Only then ChatGPT will\n", + "#analyze CV against the fetched job description. If the CV is a good match then it will write a cover letter.\n", + "\n", + "#If \n", + " ##job posting url is fake/random text or \n", + " ##job posting is fake/random tex or \n", + " ##CV is fake/random text\n", + "#then ChatGPT will not analyze CV, it will give a generic response to enter the info correctly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc2eafe6-5255-4317-8ddd-a93695296043", + "metadata": {}, + "outputs": [], + "source": [ + "pip install PyPDF2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf45e9d5-4913-416c-9880-5be60a96c0e6", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "import os\n", + "import io\n", + "import time\n", + "import requests\n", + "import PyPDF2\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from bs4 import BeautifulSoup\n", + "from openai import OpenAI\n", + "from ipywidgets import Textarea, FileUpload, Button, VBox, HTML" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af8fea69-60aa-430c-a16c-8757b487e07a", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "daee94d2-f82b-43f0-95d1-15370eda1bc7", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0712dd1d-b6bc-41c6-84ec-d965f696f7aa", + "metadata": {}, + "outputs": [], + "source": [ + "# Step 1: Create your prompts\n", + "\n", + "system_prompt = \"You are an assistant who analyzes user's CV against the job description \\\n", + " and provide a short summary if the user is fit for this job. If the user is fit for the job \\\n", + " write a cover letter for the user to apply for the job. Keep the cover letter professional, short, \\\n", + " and formal. \\\n", + " Important things to notice before analyzing CV:\\\n", + " 1. Always check if the CV is actually a CV or just random text\\\n", + " 2. Check if the job description fetched from the website is the job description or not\\\n", + " and ignore text related to navigation\\\n", + " 3. Also check the link of the job posting, if it actually resembles a job posting or is just random \\\n", + " fake website\\\n", + " 4. if any one of these two checks fails, do not analyze the CV against the Job description and give an\\\n", + " appropriate response as you think\\\n", + " 5. Always respond in Markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70c972a6-8af6-4ff2-a338-6d7ba90e2045", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "426dfd9b-3446-4543-9819-63040abd9644", + "metadata": {}, + "outputs": [], + "source": [ + "for_user_prompt = {\n", + " 'job_posting_url':'',\n", + " 'job_posting': '',\n", + " 'cv_text': ''\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79d9ccd6-f5fe-4ce8-982c-7235d2cf6a9f", + "metadata": {}, + "outputs": [], + "source": [ + "# Create widgets - to create a box for the job posting text\n", + "job_posting_url_area = Textarea(\n", + " placeholder='Paste the URL of the job posting here, ONLY URL PLEASE',\n", + " description='Fetching job:',\n", + " disabled=False,\n", + " layout={'width': '800px', 'height': '50px'}\n", + ")\n", + "\n", + "status_job_posting = HTML(value=\"Status: Waiting for inputs...\")\n", + "\n", + "# Create Submit Buttons\n", + "fetch_job_posting_button = Button(description='Fetch Job Posting', button_style='primary')\n", + "\n", + "def fetch_job_posting_action(b):\n", + " for_user_prompt['job_posting_url'] = job_posting_url_area.value\n", + " if for_user_prompt['job_posting_url']:\n", + " ed = Website(for_user_prompt['job_posting_url'])\n", + " status_job_posting.value = \"Status: Job posting fetched successfully!\"\n", + " fetch_job_posting_button.button_style='success'\n", + " for_user_prompt['job_posting']=ed.text\n", + " else:\n", + " status_job_posting.value = \"Status: Please enter a job posting url before submitting.\"\n", + "\n", + "# Attach actions to buttons\n", + "fetch_job_posting_button.on_click(fetch_job_posting_action)\n", + "\n", + "# Layout\n", + "job_posting_box = VBox([job_posting_url_area, fetch_job_posting_button])\n", + "\n", + "# Display all widgets\n", + "display(VBox([\n", + " HTML(value=\"

Input Job Posting Url

\"),\n", + " job_posting_box,\n", + " status_job_posting\n", + "]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "58d42786-1580-4d3f-b44f-5c52250c2935", + "metadata": {}, + "outputs": [], + "source": [ + "# Print fetched job description\n", + "\n", + "#print(for_user_prompt['job_posting'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd258dec-9b57-40ce-b37c-2627acbcb5af", + "metadata": {}, + "outputs": [], + "source": [ + "# Define file upload for CV\n", + "cv_upload = FileUpload(\n", + " accept='.pdf', # Only accept PDF files\n", + " multiple=False, # Only allow single file selection\n", + " description='Upload CV (PDF)'\n", + ")\n", + "\n", + "status = HTML(value=\"Status: Waiting for inputs...\")\n", + "\n", + "# Create Submit Buttons\n", + "submit_cv_button = Button(description='Submit CV', button_style='success')\n", + "\n", + "# Functions\n", + "def submit_cv_action(change):\n", + "\n", + " if not for_user_prompt['cv_text']:\n", + " status.value = \"Status: Please upload a CV before submitting.\"\n", + " \n", + " if cv_upload.value:\n", + " # Get the uploaded file\n", + " uploaded_file = cv_upload.value[0]\n", + " content = io.BytesIO(uploaded_file['content'])\n", + " \n", + " try:\n", + " pdf_reader = PyPDF2.PdfReader(content) \n", + " cv_text = \"\"\n", + " for page in pdf_reader.pages: \n", + " cv_text += page.extract_text() \n", + " \n", + " # Store CV text in for_user_prompt\n", + " for_user_prompt['cv_text'] = cv_text\n", + " status.value = \"Status: CV uploaded and processed successfully!\"\n", + " except Exception as e:\n", + " status.value = f\"Status: Error processing PDF: {str(e)}\"\n", + "\n", + " time.sleep(0.5) # Short pause between upload and submit messages to display both\n", + " \n", + " if for_user_prompt['cv_text']:\n", + " #print(\"CV Submitted:\")\n", + " #print(for_user_prompt['cv_text'])\n", + " status.value = \"Status: CV submitted successfully!\"\n", + " \n", + "\n", + "# Attach actions to buttons\n", + "submit_cv_button.on_click(submit_cv_action)\n", + "\n", + "# Layout\n", + "cv_buttons = VBox([submit_cv_button])\n", + "\n", + "# Display all widgets\n", + "display(VBox([\n", + " HTML(value=\"

Import CV and submit

\"),\n", + " cv_upload,\n", + " cv_buttons,\n", + " status\n", + "]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7dd22a4-ca7b-4b8c-a328-6205cec689cb", + "metadata": {}, + "outputs": [], + "source": [ + "# Prepare the user prompt that we will send to open ai (added URL for the context)\n", + "user_prompt = f\"\"\"\n", + "Job Posting: \n", + "{for_user_prompt['job_posting']}\n", + "\n", + "CV: \n", + "{for_user_prompt['cv_text']}\n", + "\n", + "Url:\n", + "{for_user_prompt['job_posting_url']}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82b71c1a-895a-48e7-a945-13e615bb0096", + "metadata": {}, + "outputs": [], + "source": [ + "# Define messages with system_prompt and user_prompt\n", + "def messages_for(system_prompt_input, user_prompt_input):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt_input},\n", + " {\"role\": \"user\", \"content\": user_prompt_input}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "854dc42e-2bbd-493b-958f-c20484908300", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. \n", + "response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(system_prompt, user_prompt)\n", + ")\n", + "\n", + "# Response is provided in Markdown and displayed accordingly\n", + "display(Markdown(response.choices[0].message.content))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "758d2cbe-0f80-4572-8724-7cba77f701dd", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}