You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

681 lines
21 KiB

{
"cells": [
{
"cell_type": "markdown",
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9",
"metadata": {},
"source": [
"# Code Generator\n",
"\n",
"The requirement: use an Open Source model to generate high performance C++ code from Python code\n",
"\n",
"To replicate this, you'll need to set up a HuggingFace endpoint as I do in the video. It's simple to do, and it's quite satisfying to see the results!\n",
"\n",
"It's also an important part of your learning; this is the first example of deploying an open source model to be behind an API. We'll return to this in Week 8, but this should plant a seed in your mind for what's involved in productionizing models."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import io\n",
"import sys\n",
"import json\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import gradio as gr\n",
"import subprocess"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
"metadata": {},
"outputs": [],
"source": [
"# environment\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
"metadata": {},
"outputs": [],
"source": [
"# initialize\n",
"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"OPENAI_MODEL = \"gpt-4o\"\n",
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6896636f-923e-4a2c-9d6c-fac07828a201",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n",
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n",
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(python):\n",
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n",
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n",
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n",
" user_prompt += python\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4",
"metadata": {},
"outputs": [],
"source": [
"def messages_for(python):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b",
"metadata": {},
"outputs": [],
"source": [
"# write to a file called optimized.cpp\n",
"\n",
"def write_output(cpp):\n",
" code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n",
" with open(\"optimized.cpp\", \"w\") as f:\n",
" f.write(code)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9",
"metadata": {},
"outputs": [],
"source": [
"def optimize_gpt(python): \n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" print(fragment, end='', flush=True)\n",
" write_output(reply)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d",
"metadata": {},
"outputs": [],
"source": [
"def optimize_claude(python):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_message,\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" print(text, end=\"\", flush=True)\n",
" write_output(reply)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1cbb778-fa57-43de-b04b-ed523f396c38",
"metadata": {},
"outputs": [],
"source": [
"pi = \"\"\"\n",
"import time\n",
"\n",
"def calculate(iterations, param1, param2):\n",
" result = 1.0\n",
" for i in range(1, iterations+1):\n",
" j = i * param1 - param2\n",
" result -= (1/j)\n",
" j = i * param1 + param2\n",
" result += (1/j)\n",
" return result\n",
"\n",
"start_time = time.time()\n",
"result = calculate(100_000_000, 4, 1) * 4\n",
"end_time = time.time()\n",
"\n",
"print(f\"Result: {result:.12f}\")\n",
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7fe1cd4b-d2c5-4303-afed-2115a3fef200",
"metadata": {},
"outputs": [],
"source": [
"exec(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "105db6f9-343c-491d-8e44-3a5328b81719",
"metadata": {},
"outputs": [],
"source": [
"optimize_gpt(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3",
"metadata": {},
"outputs": [],
"source": [
"exec(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4194e40c-04ab-4940-9d64-b4ad37c5bb40",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "983a11fe-e24d-4c65-8269-9802c5ef3ae6",
"metadata": {},
"outputs": [],
"source": [
"optimize_claude(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
"metadata": {},
"outputs": [],
"source": [
"python_hard = \"\"\"\n",
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
" value = seed\n",
" while True:\n",
" value = (a * value + c) % m\n",
" yield value\n",
" \n",
"def max_subarray_sum(n, seed, min_val, max_val):\n",
" lcg_gen = lcg(seed)\n",
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
" max_sum = float('-inf')\n",
" for i in range(n):\n",
" current_sum = 0\n",
" for j in range(i, n):\n",
" current_sum += random_numbers[j]\n",
" if current_sum > max_sum:\n",
" max_sum = current_sum\n",
" return max_sum\n",
"\n",
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
" total_sum = 0\n",
" lcg_gen = lcg(initial_seed)\n",
" for _ in range(20):\n",
" seed = next(lcg_gen)\n",
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
" return total_sum\n",
"\n",
"# Parameters\n",
"n = 10000 # Number of random numbers\n",
"initial_seed = 42 # Initial seed for the LCG\n",
"min_val = -10 # Minimum value of random numbers\n",
"max_val = 10 # Maximum value of random numbers\n",
"\n",
"# Timing the function\n",
"import time\n",
"start_time = time.time()\n",
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
"end_time = time.time()\n",
"\n",
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dab5e4bc-276c-4555-bd4c-12c699d5e899",
"metadata": {},
"outputs": [],
"source": [
"exec(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8",
"metadata": {},
"outputs": [],
"source": [
"optimize_gpt(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0b3d073-88a2-40b2-831c-6f0c345c256f",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c",
"metadata": {},
"outputs": [],
"source": [
"optimize_claude(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0c181036-8193-4fdd-aef3-fc513b218d43",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(python): \n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" yield reply.replace('```cpp\\n','').replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8669f56b-8314-4582-a167-78842caea131",
"metadata": {},
"outputs": [],
"source": [
"def stream_claude(python):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_message,\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" yield reply.replace('```cpp\\n','').replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
"metadata": {},
"outputs": [],
"source": [
"def optimize(python, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
" convert = gr.Button(\"Convert code\")\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "19bf2bff-a822-4009-a539-f003b1651383",
"metadata": {},
"outputs": [],
"source": [
"def execute_python(code):\n",
" try:\n",
" output = io.StringIO()\n",
" sys.stdout = output\n",
" exec(code)\n",
" finally:\n",
" sys.stdout = sys.__stdout__\n",
" return output.getvalue()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6",
"metadata": {},
"outputs": [],
"source": [
"def execute_cpp(code):\n",
" write_output(code)\n",
" try:\n",
" compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"optimized\", \"optimized.cpp\"]\n",
" compile_result = subprocess.run(compile_cmd, check=True, text=True, capture_output=True)\n",
" run_cmd = [\"./optimized\"]\n",
" run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n",
" return run_result.stdout\n",
" except subprocess.CalledProcessError as e:\n",
" return f\"An error occurred:\\n{e.stderr}\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442",
"metadata": {},
"outputs": [],
"source": [
"css = \"\"\"\n",
".python {background-color: #306998;}\n",
".cpp {background-color: #050;}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1303932-160c-424b-97a8-d28c816721b2",
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks(css=css) as ui:\n",
" gr.Markdown(\"## Convert code from Python to C++\")\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
" with gr.Row():\n",
" convert = gr.Button(\"Convert code\")\n",
" with gr.Row():\n",
" python_run = gr.Button(\"Run Python\")\n",
" cpp_run = gr.Button(\"Run C++\")\n",
" with gr.Row():\n",
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bb8c5b4e-ec51-4f21-b3f8-6aa94fede86d",
"metadata": {},
"outputs": [],
"source": [
"from huggingface_hub import login, InferenceClient\n",
"from transformers import AutoTokenizer"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "13347633-4606-4e38-9927-80c39e65c1f1",
"metadata": {},
"outputs": [],
"source": [
"hf_token = os.environ['HF_TOKEN']\n",
"login(hf_token, add_to_git_credential=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef60a4df-6267-4ebd-8eed-dcb917af0a5e",
"metadata": {},
"outputs": [],
"source": [
"code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n",
"code_gemma = \"google/codegemma-7b-it\"\n",
"CODE_QWEN_URL = \"https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud\"\n",
"CODE_GEMMA_URL = \"https://c5hggiyqachmgnqg.us-east-1.aws.endpoints.huggingface.cloud\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "695ce389-a903-4533-a2f1-cd9e2a6af8f2",
"metadata": {},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n",
"messages = messages_for(pi)\n",
"text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d4548e96-0b32-4793-bdd6-1b072c2f26ab",
"metadata": {},
"outputs": [],
"source": [
"print(text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bb2a126b-09e7-4966-bc97-0ef5c2cc7896",
"metadata": {},
"outputs": [],
"source": [
"client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n",
"stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n",
"for r in stream:\n",
" print(r.token.text, end = \"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "127a52e5-ad85-42b7-a0f5-9afda5efe090",
"metadata": {},
"outputs": [],
"source": [
"def stream_code_quen(python):\n",
" tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n",
" messages = messages_for(python)\n",
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
" client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n",
" stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n",
" result = \"\"\n",
" for r in stream:\n",
" result += r.token.text\n",
" yield result "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a82387d1-7651-4923-995b-fe18356fcaa6",
"metadata": {},
"outputs": [],
"source": [
"def optimize(python, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(python)\n",
" elif model==\"CodeQwen\":\n",
" result = stream_code_qwen(python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9ca2e6f-60c1-4e5f-b570-63c75b2d189b",
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks(css=css) as ui:\n",
" gr.Markdown(\"## Convert code from Python to C++\")\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\", \"CodeQwen\"], label=\"Select model\", value=\"GPT\")\n",
" with gr.Row():\n",
" convert = gr.Button(\"Convert code\")\n",
" with gr.Row():\n",
" python_run = gr.Button(\"Run Python\")\n",
" cpp_run = gr.Button(\"Run C++\")\n",
" with gr.Row():\n",
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f12bfe23-135b-45a7-8c6d-0c27d68b0a82",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}