From 8e1a8661350047d293eaaa47dfe4862c5f477764 Mon Sep 17 00:00:00 2001 From: iammarshall Date: Thu, 6 Feb 2025 18:05:09 +0000 Subject: [PATCH] update function names to be more generic and add deepseek model as example --- ...seek_and_hf_inference_provider_added.ipynb | 711 +++--------------- 1 file changed, 86 insertions(+), 625 deletions(-) diff --git a/week4/community-contributions/day4_extra_deepseek_and_hf_inference_provider_added.ipynb b/week4/community-contributions/day4_extra_deepseek_and_hf_inference_provider_added.ipynb index 39b22fc..c5c9145 100644 --- a/week4/community-contributions/day4_extra_deepseek_and_hf_inference_provider_added.ipynb +++ b/week4/community-contributions/day4_extra_deepseek_and_hf_inference_provider_added.ipynb @@ -39,7 +39,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3", "metadata": {}, "outputs": [], @@ -62,7 +62,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "4f672e1c-87e9-4865-b760-370fa605e614", "metadata": {}, "outputs": [ @@ -90,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 19, "id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da", "metadata": {}, "outputs": [], @@ -99,15 +99,15 @@ "\n", "openai = OpenAI()\n", "claude = anthropic.Anthropic()\n", - "code_qwen_inference_hf = OpenAI(base_url=\"https://api-inference.huggingface.co/v1/\", api_key=os.environ['HF_TOKEN'])\n", + "hf_inference_provider = OpenAI(base_url=\"https://api-inference.huggingface.co/v1/\", api_key=os.environ['HF_TOKEN'])\n", "OPENAI_MODEL = \"gpt-4o\"\n", "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", - "CODE_QWEN_MODEL = \"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B\" #\"Qwen/CodeQwen1.5-7B-Chat\"" + "DEEPSEEK_MODEL = \"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B\" #\"Qwen/CodeQwen1.5-7B-Chat\"" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "6896636f-923e-4a2c-9d6c-fac07828a201", "metadata": {}, "outputs": [], @@ -119,7 +119,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb", "metadata": {}, "outputs": [], @@ -134,7 +134,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "c6190659-f54c-4951-bef4-4960f8e51cc4", "metadata": {}, "outputs": [], @@ -148,7 +148,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b", "metadata": {}, "outputs": [], @@ -163,7 +163,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9", "metadata": {}, "outputs": [], @@ -180,7 +180,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d", "metadata": {}, "outputs": [], @@ -202,13 +202,13 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 20, "id": "8047295c-ec4a-489c-8dad-79db73a2dfca", "metadata": {}, "outputs": [], "source": [ - "def optimize_code_qwen(python): \n", - " stream = code_qwen_inference_hf.chat.completions.create(model=CODE_QWEN_MODEL, messages=messages_for(python), max_tokens=500, stream=True)\n", + "def optimize_hf_inference_provider(python): \n", + " stream = hf_inference_provider.chat.completions.create(model=DEEPSEEK_MODEL, messages=messages_for(python), max_tokens=500, stream=True)\n", " reply = \"\"\n", " for chunk in stream:\n", " fragment = chunk.choices[0].delta.content or \"\"\n", @@ -219,7 +219,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "id": "a1cbb778-fa57-43de-b04b-ed523f396c38", "metadata": {}, "outputs": [], @@ -247,105 +247,40 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "7fe1cd4b-d2c5-4303-afed-2115a3fef200", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Result: 3.141592658589\n", - "Execution Time: 12.479274 seconds\n" - ] - } - ], + "outputs": [], "source": [ "exec(pi)" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "105db6f9-343c-491d-8e44-3a5328b81719", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "```cpp\n", - "#include \n", - "#include \n", - "#include \n", - "\n", - "double calculate(int iterations, int param1, int param2) {\n", - " double result = 1.0;\n", - " for (int i = 1; i <= iterations; ++i) {\n", - " int j = i * param1 - param2;\n", - " result -= 1.0 / j;\n", - " j = i * param1 + param2;\n", - " result += 1.0 / j;\n", - " }\n", - " return result;\n", - "}\n", - "\n", - "int main() {\n", - " auto start_time = std::chrono::high_resolution_clock::now();\n", - " \n", - " double result = calculate(100000000, 4, 1) * 4;\n", - " \n", - " auto end_time = std::chrono::high_resolution_clock::now();\n", - " std::chrono::duration elapsed = end_time - start_time;\n", - " \n", - " std::cout << std::fixed << std::setprecision(12) \n", - " << \"Result: \" << result << std::endl;\n", - " std::cout << \"Execution Time: \" << elapsed.count() << \" seconds\" << std::endl;\n", - " \n", - " return 0;\n", - "}\n", - "```" - ] - } - ], + "outputs": [], "source": [ "optimize_gpt(pi)" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Result: 3.141592658589\n", - "Execution Time: 11.925580 seconds\n" - ] - } - ], + "outputs": [], "source": [ "exec(pi)" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "4194e40c-04ab-4940-9d64-b4ad37c5bb40", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Result: 3.141592658589\n", - "Execution Time: 0.320651458000 seconds\n" - ] - } - ], + "outputs": [], "source": [ "!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", "!./optimized" @@ -353,65 +288,20 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "983a11fe-e24d-4c65-8269-9802c5ef3ae6", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "#include \n", - "omanip>e lude (i) * param1 - param2;\n", - "1.0 / j);esult -= (\n", - "_cast(i) * param1 + param2;\n", - "1.0 / j);esult += (\n", - " }\n", - " return result;\n", - "}\n", - "\n", - "() {main\n", - "d::chrono::high_resolution_clock::now();\n", - " \n", - "(100'000'000, 4, 1) * 4;ulate\n", - " \n", - "chrono::high_resolution_clock::now();\n", - "ono::duration_cast(end_time - start_time);\n", - "\n", - "d::setprecision(12);::fixed << st\n", - " << \"Result: \" << result << std::endl;\n", - "Execution Time: \" << duration.count() / 1e6 << \" seconds\" << std::endl;\n", - "\n", - "0; return \n", - "}" - ] - } - ], + "outputs": [], "source": [ "optimize_claude(pi)" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Result: 3.141592658589\n", - "Execution Time: 0.317956000000 seconds\n" - ] - } - ], + "outputs": [], "source": [ "!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", "!./optimized" @@ -419,7 +309,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 13, "id": "c3b497b3-f569-420e-b92e-fb0f49957ce0", "metadata": {}, "outputs": [], @@ -471,134 +361,30 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "dab5e4bc-276c-4555-bd4c-12c699d5e899", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total Maximum Subarray Sum (20 runs): 10980\n", - "Execution Time: 38.106860 seconds\n" - ] - } - ], + "outputs": [], "source": [ "exec(python_hard)" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "```cpp\n", - "#include \n", - "#include \n", - "#include \n", - "#include \n", - "#include \n", - "\n", - " Linear Congruential Generator (LCG)\n", - "class LCG {\n", - "public:\n", - " LCG(uint32_t seed, uint32_t a = 1664525, uint32_t c = 1013904223, uint32_t m = 1U << 32)\n", - " : value(seed), a(a), c(c), m(m) {}\n", - "\n", - " uint32_t next() {\n", - " value = (a * value + c) % m;\n", - " return value;\n", - " }\n", - "\n", - "private:\n", - " uint32_t value, a, c, m;\n", - "};\n", - "\n", - " Finds the max subarray sum in an array\n", - "int32_t max_subarray_sum(int n, uint32_t seed, int min_val, int max_val) {\n", - " LCG lcg(seed);\n", - " int range = max_val - min_val + 1;\n", - " std::vector random_numbers(n);\n", - "\n", - " for (int i = 0; i < n; ++i) {\n", - " random_numbers[i] = lcg.next() % range + min_val;\n", - " }\n", - "\n", - " int32_t max_sum = std::numeric_limits::min();\n", - " for (int i = 0; i < n; ++i) {\n", - " int32_t current_sum = 0;\n", - " for (int j = i; j < n; ++j) {\n", - " current_sum += random_numbers[j];\n", - " if (current_sum > max_sum) {\n", - " max_sum = current_sum;\n", - " }\n", - " }\n", - " }\n", - " return max_sum;\n", - "}\n", - "\n", - " Computes the total max subarray sum for 20 runs\n", - "int64_t total_max_subarray_sum(int n, uint32_t initial_seed, int min_val, int max_val) {\n", - " int64_t total_sum = 0;\n", - " LCG lcg(initial_seed);\n", - "\n", - " for (int i = 0; i < 20; ++i) {\n", - " uint32_t seed = lcg.next();\n", - " total_sum += max_subarray_sum(n, seed, min_val, max_val);\n", - " }\n", - " return total_sum;\n", - "}\n", - "\n", - "int main() {\n", - " int n = 10000;\n", - " uint32_t initial_seed = 42;\n", - " int min_val = -10;\n", - " int max_val = 10;\n", - " \n", - " auto start_time = std::chrono::high_resolution_clock::now();\n", - " int64_t result = total_max_subarray_sum(n, initial_seed, min_val, max_val);\n", - " auto end_time = std::chrono::high_resolution_clock::now();\n", - "\n", - " std::chrono::duration execution_time = end_time - start_time;\n", - "\n", - " std::cout << \"Total Maximum Subarray Sum (20 runs): \" << result << std::endl;\n", - " std::cout << \"Execution Time: \" << execution_time.count() << \" seconds\" << std::endl;\n", - " \n", - " return 0;\n", - "}\n", - "```" - ] - } - ], + "outputs": [], "source": [ "optimize_gpt(python_hard)" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "e0b3d073-88a2-40b2-831c-6f0c345c256f", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[1moptimized.cpp:11:87: \u001b[0m\u001b[0;1;35mwarning: \u001b[0m\u001b[1mshift count >= width of type [-Wshift-count-overflow]\u001b[0m\n", - " 11 | LCG(uint32_t seed, uint32_t a = 1664525, uint32_t c = 1013904223, uint32_t m = 1U << 32)\u001b[0m\n", - " | \u001b[0;1;32m ^ ~~\n", - "1 warning generated.\n", - "Total Maximum Subarray Sum (20 runs): 0\n", - "Execution Time: 0.553671 seconds\n" - ] - } - ], + "outputs": [], "source": [ "!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", "!./optimized" @@ -606,108 +392,20 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "#include \n", - "#include \n", - "#include \n", - " \n", - "omanip>e random_numbers(n);\n", - " (int i = 0; i < n; ++i) {\n", - "[i] = lcg.next() % (max_val - min_val + 1) + min_val;\n", - " }\n", - "\n", - "_limits::min();meric\n", - " int64_t current_sum = 0;\n", - "0; int64_t min_sum = \n", - "\n", - "0; i < n; ++i) { \n", - "i]; current_sum += random_numbers[\n", - "max_sum, current_sum - min_sum);\n", - " min(min_sum, current_sum);\n", - " }\n", - "\n", - " return max_sum;\n", - "}\n", - "\n", - "(int n, uint64_t initial_seed, int min_val, int max_val) {\n", - "_sum = 0;_t total\n", - "G lcg(initial_seed);\n", - " < 20; ++i) {i = 0; i\n", - "uint64_t seed = lcg.next();\n", - "array_sum(n, seed, min_val, max_val);\n", - " }\n", - " return total_sum;\n", - "}\n", - "\n", - " main() {\n", - "000;int n = 10\n", - "seed = 42;_t initial_\n", - "val = -10;n_\n", - "val = 10;ax_\n", - "\n", - "time = high_resolution_clock::now();\n", - "max_subarray_sum(n, initial_seed, min_val, max_val);\n", - "resolution_clock::now();_\n", - "\n", - "(end_time - start_time);\n", - "\n", - " Subarray Sum (20 runs): \" << result << endl;\n", - "< fixed << setprecision(6) << duration.count() / 1e6 << \" seconds\" << endl;\n", - "\n", - " return 0;\n", - "}" - ] - } - ], + "outputs": [], "source": [ "optimize_claude(python_hard)" ] }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "0c181036-8193-4fdd-aef3-fc513b218d43", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Total Maximum Subarray Sum (20 runs): 10980\n", - "Execution Time: 0.001604 seconds\n" - ] - } - ], + "outputs": [], "source": [ "!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", "!./optimized" @@ -715,7 +413,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 14, "id": "0be9f47d-5213-4700-b0e2-d444c7c738c0", "metadata": {}, "outputs": [], @@ -731,7 +429,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 15, "id": "8669f56b-8314-4582-a167-78842caea131", "metadata": {}, "outputs": [], @@ -752,13 +450,13 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 21, "id": "fcb30458-1023-41d7-bc9a-7e95d5e782aa", "metadata": {}, "outputs": [], "source": [ - "def stream_code_qwen(python): \n", - " stream = code_qwen_inference_hf.chat.completions.create(model=CODE_QWEN_MODEL, messages=messages_for(python), max_tokens=500, stream=True)\n", + "def stream_hf_inference_provider(python): \n", + " stream = hf_inference_provider.chat.completions.create(model=DEEPSEEK_MODEL, messages=messages_for(python), max_tokens=500, stream=True)\n", " reply = \"\"\n", " for chunk in stream:\n", " fragment = chunk.choices[0].delta.content or \"\"\n", @@ -768,7 +466,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 17, "id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d", "metadata": {}, "outputs": [], @@ -779,7 +477,7 @@ " elif model==\"Claude\":\n", " result = stream_claude(python)\n", " elif model==\"Code Qwen\":\n", - " result = stream_code_qwen(python)\n", + " result = stream_hf_inference_provider(python)\n", " else:\n", " raise ValueError(\"Unknown model\")\n", " for stream_so_far in result:\n", @@ -788,14 +486,23 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 22, "id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a", "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7861\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, { "data": { "text/html": [ - "
" + "
" ], "text/plain": [ "" @@ -808,7 +515,7 @@ "data": { "text/plain": [] }, - "execution_count": 42, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -829,7 +536,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 17, "id": "19bf2bff-a822-4009-a539-f003b1651383", "metadata": {}, "outputs": [], @@ -846,7 +553,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 18, "id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6", "metadata": {}, "outputs": [], @@ -864,7 +571,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 19, "id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442", "metadata": {}, "outputs": [], @@ -877,7 +584,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 20, "id": "f1303932-160c-424b-97a8-d28c816721b2", "metadata": {}, "outputs": [ @@ -885,7 +592,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "* Running on local URL: http://127.0.0.1:7865\n", + "* Running on local URL: http://127.0.0.1:7861\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] @@ -893,7 +600,7 @@ { "data": { "text/html": [ - "
" + "
" ], "text/plain": [ "" @@ -906,147 +613,9 @@ "data": { "text/plain": [] }, - "execution_count": 32, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Traceback (most recent call last):\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", - " response = await route_utils.call_process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", - " output = await app.get_blocks().process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2051, in process_api\n", - " result = await self.call_function(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1610, in call_function\n", - " prediction = await utils.async_iteration(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 728, in async_iteration\n", - " return await anext(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 722, in __anext__\n", - " return await anyio.to_thread.run_sync(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", - " return await get_async_backend().run_sync_in_worker_thread(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2505, in run_sync_in_worker_thread\n", - " return await future\n", - " ^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 1005, in run\n", - " result = context.run(func, *args)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 705, in run_sync_iterator_async\n", - " return next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 866, in gen_wrapper\n", - " response = next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"/var/folders/jp/lj6x3d515f92pn0n_tm5wqzr0000gn/T/ipykernel_87995/3588058922.py\", line 10, in optimize\n", - " for stream_so_far in result:\n", - " File \"/var/folders/jp/lj6x3d515f92pn0n_tm5wqzr0000gn/T/ipykernel_87995/3037730415.py\", line 2, in stream_code_qwen\n", - " stream = code_qwen_inference_hf.chat.completions.create(model=CODE_QWEN_MODEL, messages=messages_for(python), stream=True)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 279, in wrapper\n", - " return func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 863, in create\n", - " return self._post(\n", - " ^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1283, in post\n", - " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 960, in request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1049, in _request\n", - " return self._retry_request(\n", - " ^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1098, in _retry_request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1049, in _request\n", - " return self._retry_request(\n", - " ^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1098, in _retry_request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1064, in _request\n", - " raise self._make_status_error_from_response(err.response) from None\n", - "openai.InternalServerError: Error code: 503 - {'error': 'Model deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B is currently loading', 'estimated_time': 142.1670379638672}\n", - "Traceback (most recent call last):\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n", - " response = await route_utils.call_process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n", - " output = await app.get_blocks().process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2051, in process_api\n", - " result = await self.call_function(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1610, in call_function\n", - " prediction = await utils.async_iteration(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 728, in async_iteration\n", - " return await anext(iterator)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 722, in __anext__\n", - " return await anyio.to_thread.run_sync(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n", - " return await get_async_backend().run_sync_in_worker_thread(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2505, in run_sync_in_worker_thread\n", - " return await future\n", - " ^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 1005, in run\n", - " result = context.run(func, *args)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 705, in run_sync_iterator_async\n", - " return next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 866, in gen_wrapper\n", - " response = next(iterator)\n", - " ^^^^^^^^^^^^^^\n", - " File \"/var/folders/jp/lj6x3d515f92pn0n_tm5wqzr0000gn/T/ipykernel_87995/3588058922.py\", line 10, in optimize\n", - " for stream_so_far in result:\n", - " File \"/var/folders/jp/lj6x3d515f92pn0n_tm5wqzr0000gn/T/ipykernel_87995/3037730415.py\", line 2, in stream_code_qwen\n", - " stream = code_qwen_inference_hf.chat.completions.create(model=CODE_QWEN_MODEL, messages=messages_for(python), stream=True)\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_utils/_utils.py\", line 279, in wrapper\n", - " return func(*args, **kwargs)\n", - " ^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/resources/chat/completions.py\", line 863, in create\n", - " return self._post(\n", - " ^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1283, in post\n", - " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 960, in request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1049, in _request\n", - " return self._retry_request(\n", - " ^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1098, in _retry_request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1049, in _request\n", - " return self._retry_request(\n", - " ^^^^^^^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1098, in _retry_request\n", - " return self._request(\n", - " ^^^^^^^^^^^^^^\n", - " File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/openai/_base_client.py\", line 1064, in _request\n", - " raise self._make_status_error_from_response(err.response) from None\n", - "openai.InternalServerError: Error code: 503 - {'error': 'Model deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B is currently loading', 'estimated_time': 142.1670379638672}\n" - ] } ], "source": [ @@ -1083,7 +652,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": null, "id": "bb8c5b4e-ec51-4f21-b3f8-6aa94fede86d", "metadata": {}, "outputs": [], @@ -1094,18 +663,10 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": null, "id": "13347633-4606-4e38-9927-80c39e65c1f1", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n" - ] - } - ], + "outputs": [], "source": [ "hf_token = os.environ['HF_TOKEN']\n", "login(hf_token, add_to_git_credential=True)" @@ -1113,7 +674,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": null, "id": "ef60a4df-6267-4ebd-8eed-dcb917af0a5e", "metadata": {}, "outputs": [], @@ -1126,53 +687,10 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, "id": "695ce389-a903-4533-a2f1-cd9e2a6af8f2", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "691c42df67f94725afdb01ed86ac0bf4", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "tokenizer_config.json: 0%| | 0.00/972 [00:00system\n", - "You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.<|im_end|>\n", - "<|im_start|>user\n", - "Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. Respond only with C++ code; do not explain your work other than a few comments. Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\n", - "\n", - "\n", - "import time\n", - "\n", - "def calculate(iterations, param1, param2):\n", - " result = 1.0\n", - " for i in range(1, iterations+1):\n", - " j = i * param1 - param2\n", - " result -= (1/j)\n", - " j = i * param1 + param2\n", - " result += (1/j)\n", - " return result\n", - "\n", - "start_time = time.time()\n", - "result = calculate(100_000_000, 4, 1) * 4\n", - "end_time = time.time()\n", - "\n", - "print(f\"Result: {result:.12f}\")\n", - "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", - "<|im_end|>\n", - "<|im_start|>assistant\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(text)" ] }, { "cell_type": "code", - "execution_count": 38, + "execution_count": null, "id": "bb2a126b-09e7-4966-bc97-0ef5c2cc7896", "metadata": {}, - "outputs": [ - { - "ename": "HfHubHTTPError", - "evalue": "(Request ID: HaGzFE)\n\n403 Forbidden: None.\nCannot access content at: https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud/.\nMake sure your token has the correct permissions.\nForbidden: You don't have the required permissions to complete this action, missing permissions: inference.endpoints.infer.write", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/huggingface_hub/utils/_http.py:406\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 405\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 406\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 407\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m e:\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/models.py:1024\u001b[0m, in \u001b[0;36mResponse.raise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1023\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m http_error_msg:\n\u001b[0;32m-> 1024\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HTTPError(http_error_msg, response\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n", - "\u001b[0;31mHTTPError\u001b[0m: 403 Client Error: Forbidden for url: https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud/", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[0;31mHfHubHTTPError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[38], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m client \u001b[38;5;241m=\u001b[39m InferenceClient(CODE_QWEN_URL, token\u001b[38;5;241m=\u001b[39mhf_token)\n\u001b[0;32m----> 2\u001b[0m stream \u001b[38;5;241m=\u001b[39m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtext_generation\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdetails\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_new_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m3000\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m r \u001b[38;5;129;01min\u001b[39;00m stream:\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(r\u001b[38;5;241m.\u001b[39mtoken\u001b[38;5;241m.\u001b[39mtext, end \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/huggingface_hub/inference/_client.py:2332\u001b[0m, in \u001b[0;36mInferenceClient.text_generation\u001b[0;34m(self, prompt, details, stream, model, adapter_id, best_of, decoder_input_details, do_sample, frequency_penalty, grammar, max_new_tokens, repetition_penalty, return_full_text, seed, stop, stop_sequences, temperature, top_k, top_n_tokens, top_p, truncate, typical_p, watermark)\u001b[0m\n\u001b[1;32m 2307\u001b[0m _set_unsupported_text_generation_kwargs(model, unused_params)\n\u001b[1;32m 2308\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtext_generation( \u001b[38;5;66;03m# type: ignore\u001b[39;00m\n\u001b[1;32m 2309\u001b[0m prompt\u001b[38;5;241m=\u001b[39mprompt,\n\u001b[1;32m 2310\u001b[0m details\u001b[38;5;241m=\u001b[39mdetails,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 2330\u001b[0m watermark\u001b[38;5;241m=\u001b[39mwatermark,\n\u001b[1;32m 2331\u001b[0m )\n\u001b[0;32m-> 2332\u001b[0m \u001b[43mraise_text_generation_error\u001b[49m\u001b[43m(\u001b[49m\u001b[43me\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2334\u001b[0m \u001b[38;5;66;03m# Parse output\u001b[39;00m\n\u001b[1;32m 2335\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m stream:\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/huggingface_hub/inference/_common.py:466\u001b[0m, in \u001b[0;36mraise_text_generation_error\u001b[0;34m(http_error)\u001b[0m\n\u001b[1;32m 463\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exception \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhttp_error\u001b[39;00m\n\u001b[1;32m 465\u001b[0m \u001b[38;5;66;03m# Otherwise, fallback to default error\u001b[39;00m\n\u001b[0;32m--> 466\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m http_error\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/huggingface_hub/inference/_client.py:2302\u001b[0m, in \u001b[0;36mInferenceClient.text_generation\u001b[0;34m(self, prompt, details, stream, model, adapter_id, best_of, decoder_input_details, do_sample, frequency_penalty, grammar, max_new_tokens, repetition_penalty, return_full_text, seed, stop, stop_sequences, temperature, top_k, top_n_tokens, top_p, truncate, typical_p, watermark)\u001b[0m\n\u001b[1;32m 2300\u001b[0m \u001b[38;5;66;03m# Handle errors separately for more precise error messages\u001b[39;00m\n\u001b[1;32m 2301\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 2302\u001b[0m bytes_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpost\u001b[49m\u001b[43m(\u001b[49m\u001b[43mjson\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpayload\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtext-generation\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# type: ignore\u001b[39;00m\n\u001b[1;32m 2303\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 2304\u001b[0m match \u001b[38;5;241m=\u001b[39m MODEL_KWARGS_NOT_USED_REGEX\u001b[38;5;241m.\u001b[39msearch(\u001b[38;5;28mstr\u001b[39m(e))\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/huggingface_hub/inference/_client.py:296\u001b[0m, in \u001b[0;36mInferenceClient.post\u001b[0;34m(self, json, data, model, task, stream)\u001b[0m\n\u001b[1;32m 293\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m InferenceTimeoutError(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInference call timed out: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01merror\u001b[39;00m \u001b[38;5;66;03m# type: ignore\u001b[39;00m\n\u001b[1;32m 295\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 296\u001b[0m \u001b[43mhf_raise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43mresponse\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 297\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\u001b[38;5;241m.\u001b[39miter_lines() \u001b[38;5;28;01mif\u001b[39;00m stream \u001b[38;5;28;01melse\u001b[39;00m response\u001b[38;5;241m.\u001b[39mcontent\n\u001b[1;32m 298\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m error:\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/huggingface_hub/utils/_http.py:468\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 462\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mstatus_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m403\u001b[39m:\n\u001b[1;32m 463\u001b[0m message \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 464\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39mstatus_code\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m Forbidden: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00merror_message\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 465\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mCannot access content at: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 466\u001b[0m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mMake sure your token has the correct permissions.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 467\u001b[0m )\n\u001b[0;32m--> 468\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m _format(HfHubHTTPError, message, response) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 470\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mstatus_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m416\u001b[39m:\n\u001b[1;32m 471\u001b[0m range_header \u001b[38;5;241m=\u001b[39m response\u001b[38;5;241m.\u001b[39mrequest\u001b[38;5;241m.\u001b[39mheaders\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRange\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", - "\u001b[0;31mHfHubHTTPError\u001b[0m: (Request ID: HaGzFE)\n\n403 Forbidden: None.\nCannot access content at: https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud/.\nMake sure your token has the correct permissions.\nForbidden: You don't have the required permissions to complete this action, missing permissions: inference.endpoints.infer.write" - ] - } - ], + "outputs": [], "source": [ "client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n", "stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", @@ -1272,7 +735,7 @@ "metadata": {}, "outputs": [], "source": [ - "def stream_code_qwen(python):\n", + "def stream_hf_inference_provider(python):\n", " tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", " messages = messages_for(python)\n", " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", @@ -1298,7 +761,7 @@ " elif model==\"Claude\":\n", " result = stream_claude(python)\n", " elif model==\"CodeQwen\":\n", - " result = stream_code_qwen(python)\n", + " result = stream_hf_inference_provider(python)\n", " else:\n", " raise ValueError(\"Unknown model\")\n", " for stream_so_far in result:\n", @@ -1442,17 +905,7 @@ "execution_count": null, "id": "f9ca2e6f-60c1-4e5f-b570-63c75b2d189b", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7866\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - } - ], + "outputs": [], "source": [ "compiler_cmd = c_compiler_cmd(\"optimized\")\n", "\n", @@ -1487,6 +940,14 @@ "\n", "ui.launch(inbrowser=True)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d0ad093-425b-488e-8c3f-67f729dd9c06", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -1505,7 +966,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.12.6" } }, "nbformat": 4,