diff --git a/week4/community-contributions/day4-docstrings.ipynb b/week4/community-contributions/week4-day4-challenge.ipynb similarity index 55% rename from week4/community-contributions/day4-docstrings.ipynb rename to week4/community-contributions/week4-day4-challenge.ipynb index 007cfbc..00a21f3 100644 --- a/week4/community-contributions/day4-docstrings.ipynb +++ b/week4/community-contributions/week4-day4-challenge.ipynb @@ -39,7 +39,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 51, "id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3", "metadata": {}, "outputs": [], @@ -63,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 52, "id": "4f672e1c-87e9-4865-b760-370fa605e614", "metadata": {}, "outputs": [], @@ -78,7 +78,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 53, "id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da", "metadata": {}, "outputs": [], @@ -93,28 +93,71 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 166, "id": "6896636f-923e-4a2c-9d6c-fac07828a201", "metadata": {}, "outputs": [], "source": [ - "system_message = \"You are an assistant that reimplements Python code in high performance C++. \"\n", - "system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n", - "system_message += \"The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\"\n", + "# Define the different actions available\n", "\n", - "user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n", - "user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n", - "user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"" + "prompt_options = [\"Convert to C\", \"Add comments\", \"Write unit tests\"]\n", + "\n", + "system_prompts = {\n", + " prompt_options[0]: \"\"\"\n", + "You are an assistant that reimplements Python code in high performance C++.\n", + "Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments.\n", + "The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\n", + "\"\"\",\n", + " \n", + " prompt_options[1]: \"\"\"\n", + "You are an assistant that adds succinct comments and docstrings to Python code. Respond only with valid Python code.\n", + "\"\"\",\n", + " \n", + " prompt_options[2]: \"\"\"\n", + "You are an assistant that creates unit tests for Python code. Respond only with valid Python code.\n", + "\"\"\"\n", + "}\n", + "\n", + "user_prompts = {\n", + " prompt_options[0]: \"\"\"\n", + "Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \n", + "Respond only with C++ code; do not explain your work other than a few comments.\n", + "Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\n", + "\"\"\",\n", + " \n", + " prompt_options[1]: \"\"\"\n", + "Keep this Python code but insert appropriate comments and docstrings.\n", + "\"\"\",\n", + " \n", + " prompt_options[2]: \"\"\"\n", + "Create unit tests for this Python code.\n", + "\"\"\"\n", + "}" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 157, "id": "a1cbb778-fa57-43de-b04b-ed523f396c38", "metadata": {}, "outputs": [], "source": [ - "pi = \"\"\"\n", + "python_sample_options = [\"Hello, World\", \"Calculate pi\", \"Kadane's Algorithm\", \"Sieve of Eratosthenes\"]\n", + "\n", + "python_code_samples = {\n", + " python_sample_options[0]: \"\"\"\n", + "import time\n", + "\n", + "start_time = time.time()\n", + "\n", + "print(\"Hello, world\")\n", + "\n", + "end_time = time.time()\n", + "\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "\"\"\",\n", + "\n", + " python_sample_options[1]: \"\"\"\n", "import time\n", "\n", "def calculate(iterations, param1, param2):\n", @@ -132,17 +175,10 @@ "\n", "print(f\"Result: {result:.12f}\")\n", "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", - "\"\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "c3b497b3-f569-420e-b92e-fb0f49957ce0", - "metadata": {}, - "outputs": [], - "source": [ - "python_hard = \"\"\"# Be careful to support large number sizes\n", + "\"\"\",\n", + "\n", + " python_sample_options[2]: \"\"\"\n", + "# Be careful to support large number sizes\n", "\n", "def lcg(seed, a=1664525, c=1013904223, m=2**32):\n", " value = seed\n", @@ -184,12 +220,36 @@ "\n", "print(\"Total Maximum Subarray Sum (20 runs):\", result)\n", "print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n", - "\"\"\"" + "\"\"\",\n", + "\n", + " python_sample_options[3]: \"\"\"\n", + "import time\n", + "start_time = time.time()\n", + "stop_at=100_000_000\n", + "prime = [True] * (stop_at + 1)\n", + "p = 2\n", + "\n", + "while p * p <= stop_at:\n", + " # If prime[p] is True, then p is a prime\n", + " if prime[p]:\n", + " # Mark all multiples of p as non-prime\n", + " for i in range(p * p, stop_at + 1, p):\n", + " prime[i] = False\n", + " p += 1\n", + "\n", + "# Collect all prime numbers\n", + "primes = [p for p in range(2, stop_at + 1) if prime[p]]\n", + "\n", + "end_time = time.time()\n", + "print(\"Maximum prime:, {:,}\".format(primes[-1]))\n", + "print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n", + "\"\"\"\n", + "}" ] }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 56, "id": "e33565c0-cba8-46d3-a0c5-9440d7fe4d2c", "metadata": {}, "outputs": [], @@ -199,16 +259,16 @@ " return user_prompt + '\\n' + python_code\n", "\n", "# Create the list the GPT. Claude doesn't need this because it does not combine the system and user prompts.\n", - "def create_messages_for_gpt(system_message, user_prompt):\n", + "def create_messages_for_gpt(system_prompt, user_prompt):\n", " return [\n", - " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": user_prompt}\n", " ]" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 57, "id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b", "metadata": {}, "outputs": [], @@ -221,76 +281,52 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 164, "id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9", "metadata": {}, "outputs": [], "source": [ - "def optimize_cpp_gpt(python): \n", - " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", + "# This is where additional models can be easily added. \n", + "# Just update the model_options list, add a streaming function, and update the call_llm function. \n", + "\n", + "model_options = [\"GPT\", \"Claude\"]\n", + "# model_options = [\"GPT\", \"Claude\", \"CodeQwen\"]\n", + "default_model = model_options[0]\n", + "\n", + "def stream_gpt(system_prompt, user_prompt, python_code): \n", + " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=create_messages_for_gpt(system_prompt, create_user_prompt(user_prompt, python_code)), stream=True)\n", " reply = \"\"\n", " for chunk in stream:\n", " fragment = chunk.choices[0].delta.content or \"\"\n", " reply += fragment\n", - " print(fragment, end='', flush=True)\n", - " write_output(reply)\n", + " yield reply.replace('```cpp\\n','').replace('```','')\n", "\n", - "def optimize_cpp_claude(python):\n", + "def stream_claude(system_prompt, user_prompt, python_code):\n", " result = claude.messages.stream(\n", " model=CLAUDE_MODEL,\n", " max_tokens=2000,\n", - " system=system_message,\n", - " messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", + " system=system_prompt,\n", + " messages=[{\"role\": \"user\", \"content\": create_user_prompt(user_prompt, python_code)}],\n", " )\n", " reply = \"\"\n", " with result as stream:\n", " for text in stream.text_stream:\n", " reply += text\n", - " print(text, end=\"\", flush=True)\n", - " write_output(reply)\n", + " yield reply.replace('```cpp\\n','').replace('```','')\n", "\n", - "def optimize(python, model):\n", + "def call_llm(system_prompt, user_prompt, python_code, model):\n", " if model==\"GPT\":\n", - " result = stream_gpt(python)\n", + " result = stream_gpt(system_prompt, user_prompt, python_code)\n", " elif model==\"Claude\":\n", - " result = stream_claude(python)\n", - " elif model==\"CodeQwen\":\n", - " result = stream_code_qwen(python)\n", + " result = stream_claude(system_prompt, user_prompt, python_code)\n", + " # elif model==\"CodeQwen\":\n", + " # result = stream_code_qwen(python)\n", " else:\n", " raise ValueError(\"Unknown model\")\n", " for stream_so_far in result:\n", " yield stream_so_far " ] }, - { - "cell_type": "code", - "execution_count": 12, - "id": "0be9f47d-5213-4700-b0e2-d444c7c738c0", - "metadata": {}, - "outputs": [], - "source": [ - "def stream_gpt(python): \n", - " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", - " reply = \"\"\n", - " for chunk in stream:\n", - " fragment = chunk.choices[0].delta.content or \"\"\n", - " reply += fragment\n", - " yield reply.replace('```cpp\\n','').replace('```','')\n", - "\n", - "def stream_claude(python):\n", - " result = claude.messages.stream(\n", - " model=CLAUDE_MODEL,\n", - " max_tokens=2000,\n", - " system=system_message,\n", - " messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", - " )\n", - " reply = \"\"\n", - " with result as stream:\n", - " for text in stream.text_stream:\n", - " reply += text\n", - " yield reply.replace('```cpp\\n','').replace('```','')" - ] - }, { "cell_type": "code", "execution_count": null, @@ -323,7 +359,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 61, "id": "19bf2bff-a822-4009-a539-f003b1651383", "metadata": {}, "outputs": [], @@ -350,23 +386,21 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 62, "id": "4ba311ec-c16a-4fe0-946b-4b940704cf65", "metadata": {}, "outputs": [], "source": [ - "def select_sample_program(sample_program):\n", - " if sample_program==\"pi\":\n", - " return pi\n", - " elif sample_program==\"python_hard\":\n", - " return python_hard\n", + "def select_python_sample(python_sample):\n", + " if python_sample in python_sample_options:\n", + " return python_code_samples[python_sample]\n", " else:\n", - " return \"Type your Python program here\"" + " return next(iter(donedone.values()), \"# Type in your Python program here\")" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 63, "id": "e42286bc-085c-45dc-b101-234308e58269", "metadata": {}, "outputs": [], @@ -461,25 +495,16 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 167, "id": "f9ca2e6f-60c1-4e5f-b570-63c75b2d189b", "metadata": { "scrolled": true }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7870\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - }, { "data": { "text/html": [ - "
" + "" ], "text/plain": [ "