diff --git a/week4/community-contributions/day4-docstrings.ipynb b/week4/community-contributions/day4-docstrings.ipynb index 007cfbc..00a21f3 100644 --- a/week4/community-contributions/day4-docstrings.ipynb +++ b/week4/community-contributions/day4-docstrings.ipynb @@ -39,7 +39,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 51, "id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3", "metadata": {}, "outputs": [], @@ -63,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 52, "id": "4f672e1c-87e9-4865-b760-370fa605e614", "metadata": {}, "outputs": [], @@ -78,7 +78,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 53, "id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da", "metadata": {}, "outputs": [], @@ -93,28 +93,71 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 166, "id": "6896636f-923e-4a2c-9d6c-fac07828a201", "metadata": {}, "outputs": [], "source": [ - "system_message = \"You are an assistant that reimplements Python code in high performance C++. \"\n", - "system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n", - "system_message += \"The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\"\n", + "# Define the different actions available\n", "\n", - "user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n", - "user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n", - "user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"" + "prompt_options = [\"Convert to C\", \"Add comments\", \"Write unit tests\"]\n", + "\n", + "system_prompts = {\n", + " prompt_options[0]: \"\"\"\n", + "You are an assistant that reimplements Python code in high performance C++.\n", + "Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments.\n", + "The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\n", + "\"\"\",\n", + " \n", + " prompt_options[1]: \"\"\"\n", + "You are an assistant that adds succinct comments and docstrings to Python code. Respond only with valid Python code.\n", + "\"\"\",\n", + " \n", + " prompt_options[2]: \"\"\"\n", + "You are an assistant that creates unit tests for Python code. Respond only with valid Python code.\n", + "\"\"\"\n", + "}\n", + "\n", + "user_prompts = {\n", + " prompt_options[0]: \"\"\"\n", + "Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \n", + "Respond only with C++ code; do not explain your work other than a few comments.\n", + "Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\n", + "\"\"\",\n", + " \n", + " prompt_options[1]: \"\"\"\n", + "Keep this Python code but insert appropriate comments and docstrings.\n", + "\"\"\",\n", + " \n", + " prompt_options[2]: \"\"\"\n", + "Create unit tests for this Python code.\n", + "\"\"\"\n", + "}" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 157, "id": "a1cbb778-fa57-43de-b04b-ed523f396c38", "metadata": {}, "outputs": [], "source": [ - "pi = \"\"\"\n", + "python_sample_options = [\"Hello, World\", \"Calculate pi\", \"Kadane's Algorithm\", \"Sieve of Eratosthenes\"]\n", + "\n", + "python_code_samples = {\n", + " python_sample_options[0]: \"\"\"\n", + "import time\n", + "\n", + "start_time = time.time()\n", + "\n", + "print(\"Hello, world\")\n", + "\n", + "end_time = time.time()\n", + "\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "\"\"\",\n", + "\n", + " python_sample_options[1]: \"\"\"\n", "import time\n", "\n", "def calculate(iterations, param1, param2):\n", @@ -132,17 +175,10 @@ "\n", "print(f\"Result: {result:.12f}\")\n", "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", - "\"\"\"" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "c3b497b3-f569-420e-b92e-fb0f49957ce0", - "metadata": {}, - "outputs": [], - "source": [ - "python_hard = \"\"\"# Be careful to support large number sizes\n", + "\"\"\",\n", + "\n", + " python_sample_options[2]: \"\"\"\n", + "# Be careful to support large number sizes\n", "\n", "def lcg(seed, a=1664525, c=1013904223, m=2**32):\n", " value = seed\n", @@ -184,12 +220,36 @@ "\n", "print(\"Total Maximum Subarray Sum (20 runs):\", result)\n", "print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n", - "\"\"\"" + "\"\"\",\n", + "\n", + " python_sample_options[3]: \"\"\"\n", + "import time\n", + "start_time = time.time()\n", + "stop_at=100_000_000\n", + "prime = [True] * (stop_at + 1)\n", + "p = 2\n", + "\n", + "while p * p <= stop_at:\n", + " # If prime[p] is True, then p is a prime\n", + " if prime[p]:\n", + " # Mark all multiples of p as non-prime\n", + " for i in range(p * p, stop_at + 1, p):\n", + " prime[i] = False\n", + " p += 1\n", + "\n", + "# Collect all prime numbers\n", + "primes = [p for p in range(2, stop_at + 1) if prime[p]]\n", + "\n", + "end_time = time.time()\n", + "print(\"Maximum prime:, {:,}\".format(primes[-1]))\n", + "print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n", + "\"\"\"\n", + "}" ] }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 56, "id": "e33565c0-cba8-46d3-a0c5-9440d7fe4d2c", "metadata": {}, "outputs": [], @@ -199,16 +259,16 @@ " return user_prompt + '\\n' + python_code\n", "\n", "# Create the list the GPT. Claude doesn't need this because it does not combine the system and user prompts.\n", - "def create_messages_for_gpt(system_message, user_prompt):\n", + "def create_messages_for_gpt(system_prompt, user_prompt):\n", " return [\n", - " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": user_prompt}\n", " ]" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 57, "id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b", "metadata": {}, "outputs": [], @@ -221,76 +281,52 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 164, "id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9", "metadata": {}, "outputs": [], "source": [ - "def optimize_cpp_gpt(python): \n", - " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", + "# This is where additional models can be easily added. \n", + "# Just update the model_options list, add a streaming function, and update the call_llm function. \n", + "\n", + "model_options = [\"GPT\", \"Claude\"]\n", + "# model_options = [\"GPT\", \"Claude\", \"CodeQwen\"]\n", + "default_model = model_options[0]\n", + "\n", + "def stream_gpt(system_prompt, user_prompt, python_code): \n", + " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=create_messages_for_gpt(system_prompt, create_user_prompt(user_prompt, python_code)), stream=True)\n", " reply = \"\"\n", " for chunk in stream:\n", " fragment = chunk.choices[0].delta.content or \"\"\n", " reply += fragment\n", - " print(fragment, end='', flush=True)\n", - " write_output(reply)\n", + " yield reply.replace('```cpp\\n','').replace('```','')\n", "\n", - "def optimize_cpp_claude(python):\n", + "def stream_claude(system_prompt, user_prompt, python_code):\n", " result = claude.messages.stream(\n", " model=CLAUDE_MODEL,\n", " max_tokens=2000,\n", - " system=system_message,\n", - " messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", + " system=system_prompt,\n", + " messages=[{\"role\": \"user\", \"content\": create_user_prompt(user_prompt, python_code)}],\n", " )\n", " reply = \"\"\n", " with result as stream:\n", " for text in stream.text_stream:\n", " reply += text\n", - " print(text, end=\"\", flush=True)\n", - " write_output(reply)\n", + " yield reply.replace('```cpp\\n','').replace('```','')\n", "\n", - "def optimize(python, model):\n", + "def call_llm(system_prompt, user_prompt, python_code, model):\n", " if model==\"GPT\":\n", - " result = stream_gpt(python)\n", + " result = stream_gpt(system_prompt, user_prompt, python_code)\n", " elif model==\"Claude\":\n", - " result = stream_claude(python)\n", - " elif model==\"CodeQwen\":\n", - " result = stream_code_qwen(python)\n", + " result = stream_claude(system_prompt, user_prompt, python_code)\n", + " # elif model==\"CodeQwen\":\n", + " # result = stream_code_qwen(python)\n", " else:\n", " raise ValueError(\"Unknown model\")\n", " for stream_so_far in result:\n", " yield stream_so_far " ] }, - { - "cell_type": "code", - "execution_count": 12, - "id": "0be9f47d-5213-4700-b0e2-d444c7c738c0", - "metadata": {}, - "outputs": [], - "source": [ - "def stream_gpt(python): \n", - " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", - " reply = \"\"\n", - " for chunk in stream:\n", - " fragment = chunk.choices[0].delta.content or \"\"\n", - " reply += fragment\n", - " yield reply.replace('```cpp\\n','').replace('```','')\n", - "\n", - "def stream_claude(python):\n", - " result = claude.messages.stream(\n", - " model=CLAUDE_MODEL,\n", - " max_tokens=2000,\n", - " system=system_message,\n", - " messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", - " )\n", - " reply = \"\"\n", - " with result as stream:\n", - " for text in stream.text_stream:\n", - " reply += text\n", - " yield reply.replace('```cpp\\n','').replace('```','')" - ] - }, { "cell_type": "code", "execution_count": null, @@ -323,7 +359,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 61, "id": "19bf2bff-a822-4009-a539-f003b1651383", "metadata": {}, "outputs": [], @@ -350,23 +386,21 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 62, "id": "4ba311ec-c16a-4fe0-946b-4b940704cf65", "metadata": {}, "outputs": [], "source": [ - "def select_sample_program(sample_program):\n", - " if sample_program==\"pi\":\n", - " return pi\n", - " elif sample_program==\"python_hard\":\n", - " return python_hard\n", + "def select_python_sample(python_sample):\n", + " if python_sample in python_sample_options:\n", + " return python_code_samples[python_sample]\n", " else:\n", - " return \"Type your Python program here\"" + " return next(iter(donedone.values()), \"# Type in your Python program here\")" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 63, "id": "e42286bc-085c-45dc-b101-234308e58269", "metadata": {}, "outputs": [], @@ -461,25 +495,16 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 167, "id": "f9ca2e6f-60c1-4e5f-b570-63c75b2d189b", "metadata": { "scrolled": true }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7870\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - }, { "data": { "text/html": [ - "
" + "
" ], "text/plain": [ "" @@ -492,7 +517,7 @@ "data": { "text/plain": [] }, - "execution_count": 24, + "execution_count": 167, "metadata": {}, "output_type": "execute_result" } @@ -503,54 +528,141 @@ ".cpp {background-color: #050;}\n", "\"\"\"\n", "\n", - "available_models = [\"GPT\", \"Claude\"]\n", - "default_model = available_models[0]\n", - "# available_models = [\"GPT\", \"Claude\", \"CodeQwen\"]\n", + "model = default_model\n", + "selected_tab = prompt_options[0]\n", "\n", + "# Determine the C (C++, really) compiler to use based on the platform\n", "compiler_cmd = c_compiler_cmd(\"optimized\")\n", "\n", - "with gr.Blocks(css=css) as ui:\n", - " gr.Markdown(\"## Convert code from Python to C++\")\n", - " with gr.Row():\n", - " system_prompt = gr.Textbox(label=\"System prompt\", value=system_message )\n", - " user_prompt = gr.Textbox(label=\"User prompt\", value=user_prompt\n", - " with gr.Row():\n", - " python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n", - " cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n", - " with gr.Row():\n", - " with gr.Column():\n", - " sample_program = gr.Radio([\"pi\", \"python_hard\"], label=\"Sample program\", value=\"python_hard\")\n", - " model = gr.Dropdown(available_models, label=\"Select model\", value=default_model)\n", - " with gr.Column():\n", - " architecture = gr.Radio([compiler_cmd[0]], label=\"Architecture\", interactive=False, value=compiler_cmd[0])\n", - " compiler = gr.Radio([compiler_cmd[1]], label=\"Compiler\", interactive=False, value=compiler_cmd[1])\n", - " with gr.Row():\n", - " convert = gr.Button(\"Convert code\")\n", - " with gr.Row():\n", - " python_run = gr.Button(\"Run Python\")\n", - " if not compiler_cmd[1] == \"Unavailable\":\n", - " cpp_run = gr.Button(\"Run C++\")\n", + "def any_tab_on_select(evt: gr.SelectData):\n", + " global selected_tab\n", + " selected_tab = evt.value\n", + "\n", + "def reset_prompts():\n", + " return system_prompts[selected_tab], user_prompts[selected_tab]\n", + "\n", + "def change_python_sample(python_sample, python_code):\n", + " if not python_sample == \"Custom\":\n", + " if python_sample in python_sample_options:\n", + " return python_code_samples[python_sample]\n", " else:\n", - " cpp_run = gr.Button(\"No compiler to run C++\", interactive=False)\n", - " with gr.Row():\n", - " python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n", - " cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n", + " return python_code\n", + " else:\n", + " return python_code\n", "\n", - " sample_program.change(select_sample_program, inputs=[sample_program], outputs=[python])\n", - " convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", - " python_run.click(execute_python, inputs=[python], outputs=[python_out])\n", - " cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n", + "def change_python_sample_to_custom():\n", + " return \"Custom\"\n", "\n", + "# Display the interface\n", + "with gr.Blocks(css=css) as ui:\n", + " with gr.Tab(prompt_options[0]) as first_tab:\n", + " gr.Markdown(\"# \" + prompt_options[0])\n", + " with gr.Group():\n", + " with gr.Row():\n", + " first_system_prompt_txt = gr.Textbox(label=\"System prompt\", value=system_prompts[prompt_options[0]], lines=10, interactive=True )\n", + " first_user_prompt_txt = gr.Textbox(label=\"User prompt\", value=user_prompts[prompt_options[0]], lines=10, interactive=True )\n", + " with gr.Row():\n", + " first_reset_prompts_btn = gr.Button(\"Reset prompts\")\n", + " with gr.Row():\n", + " with gr.Column():\n", + " first_sample_program_rad = gr.Radio(python_sample_options + [\"Custom\"], label=\"Sample program\", value=python_sample_options[0])\n", + " first_python_code_txt = gr.Textbox(label=\"Python code:\", value=python_code_samples[python_sample_options[0]], lines=10, interactive=True)\n", + " with gr.Column():\n", + " first_model_drp = gr.Dropdown(model_options, label=\"Select model\", value=default_model, interactive=True)\n", + " first_convert_btn = gr.Button(\"Convert code\", interactive=True)\n", + " first_cpp_txt = gr.Textbox(label=\"C++ code:\", lines=10, interactive=True)\n", + " with gr.Row():\n", + " with gr.Column():\n", + " with gr.Group():\n", + " first_python_run_btn = gr.Button(\"Run Python\", interactive=True)\n", + " first_python_out_txt = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n", + " with gr.Column():\n", + " with gr.Group():\n", + " if not compiler_cmd[1] == \"Unavailable\":\n", + " first_cpp_run_btn = gr.Button(\"Run C++\")\n", + " else:\n", + " first_cpp_run_btn = gr.Button(\"No compiler to run C++\", interactive=False)\n", + " first_cpp_out_txt = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n", + " first_architecture_rad = gr.Radio([compiler_cmd[0]], label=\"Architecture\", interactive=False, value=compiler_cmd[0])\n", + " first_compiler_rad = gr.Radio([compiler_cmd[1]], label=\"Compiler\", interactive=False, value=compiler_cmd[1])\n", + " \n", + " with gr.Tab(prompt_options[1]) as second_tab:\n", + " gr.Markdown(\"# \" + prompt_options[1])\n", + " with gr.Group():\n", + " with gr.Row():\n", + " second_system_prompt_txt = gr.Textbox(label=\"System prompt\", value=system_prompts[prompt_options[1]], lines=10, interactive=True )\n", + " second_user_prompt_txt = gr.Textbox(label=\"User prompt\", value=user_prompts[prompt_options[1]], lines=10, interactive=True )\n", + " with gr.Row():\n", + " second_reset_prompts_btn = gr.Button(\"Reset prompts\")\n", + " with gr.Row():\n", + " with gr.Column():\n", + " second_sample_program_rad = gr.Radio(python_sample_options + [\"Custom\"], label=\"Sample program\", value=python_sample_options[1])\n", + " second_python_code_txt = gr.Textbox(label=\"Python code:\", value=python_code_samples[python_sample_options[1]], lines=10)\n", + " with gr.Column():\n", + " second_model_drp = gr.Dropdown(model_options, label=\"Select model\", value=default_model)\n", + " second_comment_btn = gr.Button(\"Comment code\")\n", + " second_python_code_comments_txt = gr.Textbox(label=\"Commented code\", lines=20)\n", + "\n", + " \n", + " with gr.Tab(prompt_options[2]) as third_tab:\n", + " gr.Markdown(\"# \" + prompt_options[2])\n", + " with gr.Group():\n", + " with gr.Row():\n", + " third_system_prompt_txt = gr.Textbox(label=\"System prompt\", value=system_prompts[prompt_options[2]], lines=10, interactive=True )\n", + " third_user_prompt_txt = gr.Textbox(label=\"User prompt\", value=user_prompts[prompt_options[2]], lines=10, interactive=True )\n", + " with gr.Row():\n", + " third_reset_prompts_btn = gr.Button(\"Reset prompts\")\n", + " with gr.Row():\n", + " with gr.Column():\n", + " third_sample_program_rad = gr.Radio(python_sample_options + [\"Custom\"], label=\"Sample program\", value=python_sample_options[1])\n", + " third_python_code_txt = gr.Textbox(label=\"Python code:\", value=python_code_samples[python_sample_options[1]], lines=10)\n", + " with gr.Column():\n", + " third_model_drp = gr.Dropdown(model_options, label=\"Select model\", value=default_model)\n", + " third_unit_test_btn = gr.Button(\"Create unit tests\")\n", + " third_python_unit_tests_txt = gr.Textbox(label=\"Unit tests\", lines=20)\n", + "\n", + " first_tab.select(any_tab_on_select)\n", + " second_tab.select(any_tab_on_select)\n", + " third_tab.select(any_tab_on_select)\n", + " \n", + " first_reset_prompts_btn.click(reset_prompts, outputs=[first_system_prompt_txt, first_user_prompt_txt])\n", + " second_reset_prompts_btn.click(reset_prompts, outputs=[second_system_prompt_txt, second_user_prompt_txt])\n", + " third_reset_prompts_btn.click(reset_prompts, outputs=[third_system_prompt_txt, third_user_prompt_txt])\n", + "\n", + " first_sample_program_rad.input(change_python_sample, inputs=[first_sample_program_rad, first_python_code_txt], outputs=[first_python_code_txt])\n", + " first_python_code_txt.input(change_python_sample_to_custom, inputs=[], outputs=[first_sample_program_rad])\n", + " first_convert_btn.click(call_llm, inputs=[first_system_prompt_txt, first_user_prompt_txt, first_python_code_txt, first_model_drp], outputs=[first_cpp_txt])\n", + " first_python_run_btn.click(execute_python, inputs=[first_python_code_txt], outputs=[first_python_out_txt])\n", + " first_cpp_run_btn.click(execute_cpp, inputs=[first_cpp_txt], outputs=[first_cpp_out_txt])\n", + "\n", + " second_sample_program_rad.input(change_python_sample, inputs=[second_sample_program_rad, second_python_code_txt], outputs=[second_python_code_txt])\n", + " second_python_code_txt.input(change_python_sample_to_custom, inputs=[], outputs=[second_sample_program_rad])\n", + " second_comment_btn.click(call_llm, inputs=[second_system_prompt_txt, second_user_prompt_txt, second_python_code_txt, second_model_drp], outputs=[second_python_code_comments_txt])\n", + "\n", + " third_sample_program_rad.input(change_python_sample, inputs=[third_sample_program_rad, third_python_code_txt], outputs=[third_python_code_txt])\n", + " third_python_code_txt.input(change_python_sample_to_custom, inputs=[], outputs=[second_sample_program_rad])\n", + " third_unit_test_btn.click(call_llm, inputs=[third_system_prompt_txt, third_user_prompt_txt, third_python_code_txt, third_model_drp], outputs=[third_python_unit_tests_txt])\n", "ui.launch(inbrowser=True)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 152, "id": "9d0ad093-425b-488e-8c3f-67f729dd9c06", "metadata": {}, "outputs": [], - "source": [] + "source": [ + "import time\n", + "\n", + "start_time = time.time()\n", + " \n", + "print(\"Hello, world\")\n", + " \n", + "end_time = time.time()\n", + " \n", + "\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")" + ] } ], "metadata": {