From 203df5ed4c9d10ea0cc7f0f9b1b4c44cf70ee502 Mon Sep 17 00:00:00 2001 From: Sandeep Gangaram Date: Wed, 11 Dec 2024 23:13:48 +0530 Subject: [PATCH 01/26] week 4 solution with audio input --- .../week2_solution_with_audio.ipynb | 461 ++++++++++++++++++ 1 file changed, 461 insertions(+) create mode 100644 week2/community-contributions/week2_solution_with_audio.ipynb diff --git a/week2/community-contributions/week2_solution_with_audio.ipynb b/week2/community-contributions/week2_solution_with_audio.ipynb new file mode 100644 index 0000000..97a8e2c --- /dev/null +++ b/week2/community-contributions/week2_solution_with_audio.ipynb @@ -0,0 +1,461 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "import google.generativeai\n", + "import anthropic" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_CLAUDE = 'claude-3-5-sonnet-20240620'\n", + "MODEL_GEMINI = 'gemini-1.5-flash'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment\n", + "\n", + "load_dotenv()\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6fd8538-0be6-4539-8add-00e42133a641", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic and Google\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()\n", + "\n", + "google.generativeai.configure()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "852faee9-79aa-4741-a676-4f5145ccccdc", + "metadata": {}, + "outputs": [], + "source": [ + "import tempfile\n", + "import subprocess\n", + "from io import BytesIO\n", + "from pydub import AudioSegment\n", + "import time\n", + "\n", + "def play_audio(audio_segment):\n", + " temp_dir = tempfile.gettempdir()\n", + " temp_path = os.path.join(temp_dir, \"temp_audio.wav\")\n", + " try:\n", + " audio_segment.export(temp_path, format=\"wav\")\n", + " subprocess.call([\n", + " \"ffplay\",\n", + " \"-nodisp\",\n", + " \"-autoexit\",\n", + " \"-hide_banner\",\n", + " temp_path\n", + " ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n", + " finally:\n", + " try:\n", + " os.remove(temp_path)\n", + " except Exception:\n", + " pass\n", + " \n", + "def talker(message):\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"onyx\", # Also, try replacing onyx with alloy\n", + " input=message\n", + " )\n", + " audio_stream = BytesIO(response.content)\n", + " audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n", + " play_audio(audio)\n", + "\n", + "talker(\"Well hi there\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8595807b-8ae2-4e1b-95d9-e8532142e8bb", + "metadata": {}, + "outputs": [], + "source": [ + "# prompts\n", + "general_prompt = \"Please be as technical as possible with your answers.\\\n", + "Only answer questions about topics you have expertise in.\\\n", + "If you do not know something say so.\"\n", + "\n", + "additional_prompt_gpt = \"Analyze the user query and determine if the content is primarily related to \\\n", + "coding, software engineering, data science and LLMs. \\\n", + "If so please answer it yourself else if it is primarily related to \\\n", + "physics, chemistry or biology get answers from tool ask_gemini or \\\n", + "if it belongs to subject related to finance, business or economics get answers from tool ask_claude.\"\n", + "\n", + "system_prompt_gpt = \"You are a helpful technical tutor who is an expert in \\\n", + "coding, software engineering, data science and LLMs.\"+ additional_prompt_gpt + general_prompt\n", + "system_prompt_gemini = \"You are a helpful technical tutor who is an expert in physics, chemistry and biology.\" + general_prompt\n", + "system_prompt_claude = \"You are a helpful technical tutor who is an expert in finance, business and economics.\" + general_prompt\n", + "\n", + "def get_user_prompt(question):\n", + " return \"Please give a detailed explanation to the following question: \" + question" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24d4a313-60b0-4696-b455-6cfef95ad2fe", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude(question):\n", + " result = claude.messages.create(\n", + " model=MODEL_CLAUDE,\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_prompt_claude,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": get_user_prompt(question)},\n", + " ],\n", + " )\n", + " \n", + " return result.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd5d5345-54ab-470b-9b5b-5611a7981458", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gemini(question):\n", + " gemini = google.generativeai.GenerativeModel(\n", + " model_name=MODEL_GEMINI,\n", + " system_instruction=system_prompt_gemini\n", + " )\n", + " response = gemini.generate_content(get_user_prompt(question))\n", + " response = response.text\n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f74da8f-56d1-405e-bc81-040f5428d296", + "metadata": {}, + "outputs": [], + "source": [ + "# tools and functions\n", + "\n", + "def ask_claude(question):\n", + " print(f\"Tool ask_claude called for {question}\")\n", + " return call_claude(question)\n", + "def ask_gemini(question):\n", + " print(f\"Tool ask_gemini called for {question}\")\n", + " return call_gemini(question)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c469304d-99b4-42ee-ab02-c9216b61594b", + "metadata": {}, + "outputs": [], + "source": [ + "ask_claude_function = {\n", + " \"name\": \"ask_claude\",\n", + " \"description\": \"Get the answer to the question related to a topic this agent is faimiliar with. Call this whenever you need to answer something related to finance, marketing, sales or business in general.For example 'What is gross margin' or 'Explain stock market'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"question_for_topic\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The question which is related to finance, business or economics.\",\n", + " },\n", + " },\n", + " \"required\": [\"question_for_topic\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}\n", + "\n", + "ask_gemini_function = {\n", + " \"name\": \"ask_gemini\",\n", + " \"description\": \"Get the answer to the question related to a topic this agent is faimiliar with. Call this whenever you need to answer something related to physics, chemistry or biology.Few examples: 'What is gravity','How do rockets work?', 'What is ATP'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"question_for_topic\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The question which is related to physics, chemistry or biology\",\n", + " },\n", + " },\n", + " \"required\": [\"question_for_topic\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73a60096-c49b-401f-bfd3-d1d40f4563d2", + "metadata": {}, + "outputs": [], + "source": [ + "tools = [{\"type\": \"function\", \"function\": ask_claude_function},\n", + " {\"type\": \"function\", \"function\": ask_gemini_function}]\n", + "tools_functions_map = {\n", + " \"ask_claude\":ask_claude,\n", + " \"ask_gemini\":ask_gemini\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d54e758-42b2-42f2-a8eb-49c35d44acc6", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(history):\n", + " messages = [{\"role\": \"system\", \"content\": system_prompt_gpt}] + history\n", + " stream = openai.chat.completions.create(model=MODEL_GPT, messages=messages, tools=tools, stream=True)\n", + " \n", + " full_response = \"\"\n", + " history += [{\"role\":\"assistant\", \"content\":full_response}]\n", + " \n", + " tool_call_accumulator = \"\" # Accumulator for JSON fragments of tool call arguments\n", + " tool_call_id = None # Current tool call ID\n", + " tool_call_function_name = None # Function name\n", + " tool_calls = [] # List to store complete tool calls\n", + "\n", + " for chunk in stream:\n", + " if chunk.choices[0].delta.content:\n", + " full_response += chunk.choices[0].delta.content or \"\"\n", + " history[-1]['content']=full_response\n", + " yield history\n", + " \n", + " if chunk.choices[0].delta.tool_calls:\n", + " message = chunk.choices[0].delta\n", + " for tc in chunk.choices[0].delta.tool_calls:\n", + " if tc.id: # New tool call detected here\n", + " tool_call_id = tc.id\n", + " if tool_call_function_name is None:\n", + " tool_call_function_name = tc.function.name\n", + " \n", + " tool_call_accumulator += tc.function.arguments if tc.function.arguments else \"\"\n", + " \n", + " # When the accumulated JSON string seems complete then:\n", + " try:\n", + " func_args = json.loads(tool_call_accumulator)\n", + " \n", + " # Handle tool call and get response\n", + " tool_response, tool_call = handle_tool_call(tool_call_function_name, func_args, tool_call_id)\n", + " \n", + " tool_calls.append(tool_call)\n", + "\n", + " # Add tool call and tool response to messages this is required by openAI api\n", + " messages.append({\n", + " \"role\": \"assistant\",\n", + " \"tool_calls\": tool_calls\n", + " })\n", + " messages.append(tool_response)\n", + " \n", + " # Create new response with full context\n", + " response = openai.chat.completions.create(\n", + " model=MODEL_GPT, \n", + " messages=messages, \n", + " stream=True\n", + " )\n", + " \n", + " # Reset and accumulate new full response\n", + " full_response = \"\"\n", + " for chunk in response:\n", + " if chunk.choices[0].delta.content:\n", + " full_response += chunk.choices[0].delta.content or \"\"\n", + " history[-1]['content'] = full_response\n", + " yield history\n", + " \n", + " # Reset tool call accumulator and related variables\n", + " tool_call_accumulator = \"\"\n", + " tool_call_id = None\n", + " tool_call_function_name = None\n", + " tool_calls = []\n", + "\n", + " except json.JSONDecodeError:\n", + " # Incomplete JSON; continue accumulating\n", + " pass\n", + "\n", + " # trigger text-to-audio once full response available\n", + " talker(full_response)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "515d3774-cc2c-44cd-af9b-768a63ed90dc", + "metadata": {}, + "outputs": [], + "source": [ + "# We have to write that function handle_tool_call:\n", + "def handle_tool_call(function_name, arguments, tool_call_id):\n", + " question = arguments.get('question_for_topic')\n", + " \n", + " # Prepare tool call information\n", + " tool_call = {\n", + " \"id\": tool_call_id,\n", + " \"type\": \"function\",\n", + " \"function\": {\n", + " \"name\": function_name,\n", + " \"arguments\": json.dumps(arguments)\n", + " }\n", + " }\n", + " \n", + " if function_name in tools_functions_map:\n", + " answer = tools_functions_map[function_name](question)\n", + " response = {\n", + " \"role\": \"tool\",\n", + " \"content\": json.dumps({\"question\": question, \"answer\" : answer}),\n", + " \"tool_call_id\": tool_call_id\n", + " }\n", + "\n", + " return response, tool_call" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5d7cc622-8635-4693-afa3-b5bcc2f9a63d", + "metadata": {}, + "outputs": [], + "source": [ + "def transcribe_audio(audio_file_path):\n", + " try:\n", + " audio_file = open(audio_file_path, \"rb\")\n", + " response = openai.audio.transcriptions.create(model=\"whisper-1\", file=audio_file) \n", + " return response.text\n", + " except Exception as e:\n", + " return f\"An error occurred: {e}\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ded9b3f-83e1-4971-9714-4894f2982b5a", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " chatbot = gr.Chatbot(height=500, type=\"messages\")\n", + " # image_output = gr.Image(height=500)\n", + " with gr.Row():\n", + " entry = gr.Textbox(label=\"Ask our technical expert anything:\")\n", + " audio_input = gr.Audio(\n", + " sources=\"microphone\", \n", + " type=\"filepath\",\n", + " label=\"Record audio\",\n", + " editable=False,\n", + " waveform_options=gr.WaveformOptions(\n", + " show_recording_waveform=False,\n", + " ),\n", + " )\n", + "\n", + " # Add event listener for audio stop recording and show text on input area\n", + " audio_input.stop_recording(\n", + " fn=transcribe_audio, \n", + " inputs=audio_input, \n", + " outputs=entry\n", + " )\n", + " \n", + " with gr.Row():\n", + " clear = gr.Button(\"Clear\")\n", + "\n", + " def do_entry(message, history):\n", + " history += [{\"role\":\"user\", \"content\":message}]\n", + " yield \"\", history\n", + " \n", + " entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry,chatbot]).then(\n", + " chat, inputs=chatbot, outputs=chatbot)\n", + " \n", + " clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n", + "\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da663d73-dd2a-4fff-84df-2209cf2b330b", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "532cb948-7733-4323-b85f-febfe2631e66", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 5f7514d997dacdfebab6407afb619ef237f8e9df Mon Sep 17 00:00:00 2001 From: Sandeep Gangaram Date: Wed, 11 Dec 2024 23:21:57 +0530 Subject: [PATCH 02/26] rename file --- ...week2_multimodal_chatbot_with_audio.ipynb} | 45 ++++++++++++++----- 1 file changed, 33 insertions(+), 12 deletions(-) rename week2/community-contributions/{week2_solution_with_audio.ipynb => week2_multimodal_chatbot_with_audio.ipynb} (94%) diff --git a/week2/community-contributions/week2_solution_with_audio.ipynb b/week2/community-contributions/week2_multimodal_chatbot_with_audio.ipynb similarity index 94% rename from week2/community-contributions/week2_solution_with_audio.ipynb rename to week2/community-contributions/week2_multimodal_chatbot_with_audio.ipynb index 97a8e2c..daff689 100644 --- a/week2/community-contributions/week2_solution_with_audio.ipynb +++ b/week2/community-contributions/week2_multimodal_chatbot_with_audio.ipynb @@ -375,17 +375,46 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 31, "id": "4ded9b3f-83e1-4971-9714-4894f2982b5a", "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7866\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "with gr.Blocks() as ui:\n", " with gr.Row():\n", - " chatbot = gr.Chatbot(height=500, type=\"messages\")\n", - " # image_output = gr.Image(height=500)\n", + " chatbot = gr.Chatbot(height=500, type=\"messages\", label=\"Multimodal Technical Expert Chatbot\")\n", " with gr.Row():\n", " entry = gr.Textbox(label=\"Ask our technical expert anything:\")\n", " audio_input = gr.Audio(\n", @@ -420,14 +449,6 @@ "ui.launch(inbrowser=True)" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "da663d73-dd2a-4fff-84df-2209cf2b330b", - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "code", "execution_count": null, From 6b108867db1e89aaeccc312c43d6ae1d1b7a24e5 Mon Sep 17 00:00:00 2001 From: Sandeep Gangaram Date: Wed, 11 Dec 2024 23:33:28 +0530 Subject: [PATCH 03/26] add markdown --- .../week2_multimodal_chatbot_with_audio.ipynb | 59 ++++++++----------- 1 file changed, 26 insertions(+), 33 deletions(-) diff --git a/week2/community-contributions/week2_multimodal_chatbot_with_audio.ipynb b/week2/community-contributions/week2_multimodal_chatbot_with_audio.ipynb index daff689..eb7c377 100644 --- a/week2/community-contributions/week2_multimodal_chatbot_with_audio.ipynb +++ b/week2/community-contributions/week2_multimodal_chatbot_with_audio.ipynb @@ -1,8 +1,31 @@ { "cells": [ + { + "cell_type": "markdown", + "id": "ad900e1c-b4a9-4f05-93d5-e364fae208dd", + "metadata": {}, + "source": [ + "# Multimodal Expert Tutor\n", + "\n", + "An AI assistant which leverages expertise from other sources for you.\n", + "\n", + "Features:\n", + "- Multimodal\n", + "- Uses tools\n", + "- Streams responses\n", + "- Reads out the responses after streaming\n", + "- Coverts voice to text during input\n", + "\n", + "Scope for Improvement\n", + "- Read response faster (as streaming starts)\n", + "- code optimization\n", + "- UI enhancements\n", + "- Make it more real time" + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "c1070317-3ed9-4659-abe3-828943230e03", "metadata": {}, "outputs": [], @@ -375,42 +398,12 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "id": "4ded9b3f-83e1-4971-9714-4894f2982b5a", "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7866\n", - "\n", - "To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "with gr.Blocks() as ui:\n", " with gr.Row():\n", From 8b67498c587c89e6f6d89099156cb2c5291cb392 Mon Sep 17 00:00:00 2001 From: shreshthkapai Date: Sun, 15 Dec 2024 23:38:09 +0530 Subject: [PATCH 04/26] added day 1 assignments. --- ...h-paper-summarizer-using -openai-api.ipynb | 297 ++++++++++++++++++ .../day-1-to-do-list using-ollama.ipynb | 206 ++++++++++++ 2 files changed, 503 insertions(+) create mode 100644 week1/community-contributions/day-1-research-paper-summarizer-using -openai-api.ipynb create mode 100644 week1/community-contributions/day-1-to-do-list using-ollama.ipynb diff --git a/week1/community-contributions/day-1-research-paper-summarizer-using -openai-api.ipynb b/week1/community-contributions/day-1-research-paper-summarizer-using -openai-api.ipynb new file mode 100644 index 0000000..45d0914 --- /dev/null +++ b/week1/community-contributions/day-1-research-paper-summarizer-using -openai-api.ipynb @@ -0,0 +1,297 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "52dc600c-4c45-4803-81cb-f06347f4b2c3", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "4082f16f-d843-41c7-9137-cdfec093b2d4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far\n" + ] + } + ], + "source": [ + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if not api_key:\n", + " print('No API key was found')\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"API key is found but is not in the proper format\")\n", + "else:\n", + " print(\"API key found and looks good so far\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "16c295ce-c57d-429e-8c03-f6610a8ddd42", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "9a548a52-0f7e-4fdf-ad68-0138b2445935", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"\"\"You are a research summarizer. That summarizes the content of the research paper in no more than 1000 words. The research summary that you provide should include the following:\n", + "1) Title and Authors - Identify the study and contributors.\n", + "2) Objective/Problem - State the research goal or question.\n", + "3) Background - Briefly explain the context and significance.\n", + "4) Methods - Summarize the approach or methodology.\n", + "5) Key Findings - Highlight the main results or insights.\n", + "6) Conclusion - Provide the implications or contributions of the study.\n", + "7) Future Directions - Suggest areas for further research or exploration.\n", + "8) Limitations - Highlight constraints or challenges in the study.\n", + "9) Potential Applications - Discuss how the findings can be applied in real-world scenarios.\n", + "Keep all points concise, clear, and focused and generate output in markdown.\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "66b4411f-172e-46be-b6cd-a9e5b857fb28", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: ipywidgets in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (8.1.5)\n", + "Requirement already satisfied: pdfplumber in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (0.11.4)\n", + "Requirement already satisfied: comm>=0.1.3 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipywidgets) (0.2.2)\n", + "Requirement already satisfied: ipython>=6.1.0 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipywidgets) (8.30.0)\n", + "Requirement already satisfied: traitlets>=4.3.1 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipywidgets) (5.14.3)\n", + "Requirement already satisfied: widgetsnbextension~=4.0.12 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipywidgets) (4.0.13)\n", + "Requirement already satisfied: jupyterlab_widgets~=3.0.12 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipywidgets) (3.0.13)\n", + "Requirement already satisfied: pdfminer.six==20231228 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from pdfplumber) (20231228)\n", + "Requirement already satisfied: Pillow>=9.1 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from pdfplumber) (11.0.0)\n", + "Requirement already satisfied: pypdfium2>=4.18.0 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from pdfplumber) (4.30.0)\n", + "Requirement already satisfied: charset-normalizer>=2.0.0 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from pdfminer.six==20231228->pdfplumber) (3.4.0)\n", + "Requirement already satisfied: cryptography>=36.0.0 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from pdfminer.six==20231228->pdfplumber) (44.0.0)\n", + "Requirement already satisfied: colorama in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (0.4.6)\n", + "Requirement already satisfied: decorator in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (5.1.1)\n", + "Requirement already satisfied: jedi>=0.16 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (0.19.2)\n", + "Requirement already satisfied: matplotlib-inline in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (0.1.7)\n", + "Requirement already satisfied: prompt_toolkit<3.1.0,>=3.0.41 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (3.0.48)\n", + "Requirement already satisfied: pygments>=2.4.0 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (2.18.0)\n", + "Requirement already satisfied: stack_data in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (0.6.3)\n", + "Requirement already satisfied: typing_extensions>=4.6 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from ipython>=6.1.0->ipywidgets) (4.12.2)\n", + "Requirement already satisfied: cffi>=1.12 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from cryptography>=36.0.0->pdfminer.six==20231228->pdfplumber) (1.17.1)\n", + "Requirement already satisfied: parso<0.9.0,>=0.8.4 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets) (0.8.4)\n", + "Requirement already satisfied: wcwidth in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from prompt_toolkit<3.1.0,>=3.0.41->ipython>=6.1.0->ipywidgets) (0.2.13)\n", + "Requirement already satisfied: executing>=1.2.0 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from stack_data->ipython>=6.1.0->ipywidgets) (2.1.0)\n", + "Requirement already satisfied: asttokens>=2.1.0 in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from stack_data->ipython>=6.1.0->ipywidgets) (3.0.0)\n", + "Requirement already satisfied: pure_eval in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from stack_data->ipython>=6.1.0->ipywidgets) (0.2.3)\n", + "Requirement already satisfied: pycparser in c:\\users\\legion\\anaconda3\\envs\\research_summary\\lib\\site-packages (from cffi>=1.12->cryptography>=36.0.0->pdfminer.six==20231228->pdfplumber) (2.22)\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "pip install ipywidgets pdfplumber" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "d8cd8556-ad86-4949-9f15-09de2b8c712b", + "metadata": {}, + "outputs": [], + "source": [ + "import pdfplumber\n", + "from ipywidgets import widgets\n", + "from io import BytesIO" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "0eba3cee-d85c-4d75-9b27-70c8cd7587b1", + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.display import display, Markdown" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "53e270e1-c2e6-4bcc-9ada-90c059cd5a51", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(user_prompt):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2f1807ec-c10b-4d26-9bee-89bd7a4bbb95", + "metadata": {}, + "outputs": [], + "source": [ + "def summarize(user_prompt):\n", + " # Generate messages using the user_prompt\n", + " messages = messages_for(user_prompt)\n", + " try:\n", + " response = openai.chat.completions.create(\n", + " model=\"gpt-4o-mini\", # Correct model name\n", + " messages=messages,\n", + " max_tokens = 1000 # Pass the generated messages\n", + " )\n", + " # Return the content from the API response correctly\n", + " return response.choices[0].message.content\n", + " except Exception as e:\n", + " # Instead of printing, return an error message that can be displayed\n", + " return f\"Error in OpenAI API call: {e}\"" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "0dee8345-4eec-4a9c-ac4e-ad70e13cea44", + "metadata": {}, + "outputs": [], + "source": [ + "upload_widget = widgets.FileUpload(\n", + " accept='.pdf', \n", + " multiple=False,\n", + " description='Upload PDF',\n", + " layout=widgets.Layout(width='300px',height = '100px', border='2px dashed #cccccc', padding='10px')\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "1ff9c7b9-1a3a-4128-a33f-0e5bb2a93d33", + "metadata": {}, + "outputs": [], + "source": [ + "def extract_text_and_generate_summary(change):\n", + " print(\"extracting text\")\n", + " if upload_widget.value:\n", + " # Extract the first uploaded file\n", + " uploaded_file = list(upload_widget.value)[0]\n", + " pdf_file = uploaded_file['content']\n", + "\n", + " # Extract text from the PDF\n", + " try:\n", + " with pdfplumber.open(BytesIO(pdf_file)) as pdf:\n", + " extracted_text = \"\\n\".join(page.extract_text() for page in pdf.pages)\n", + "\n", + " # Generate the user prompt\n", + " user_prompt = (\n", + " f\"You are looking at the text from a research paper. Summarize it in no more than 1000 words. \"\n", + " f\"The output should be in markdown.\\n\\n{extracted_text}\"\n", + " )\n", + "\n", + " # Get the summarized response\n", + " response = summarize(user_prompt)\n", + " \n", + " if response:\n", + " # Use IPython's display method to show markdown below the cell\n", + " display(Markdown(response))\n", + " \n", + " except Exception as e:\n", + " # If there's an error, display it using Markdown\n", + " display(Markdown(f\"**Error:** {str(e)}\"))\n", + "\n", + " # Reset the upload widget\n", + " upload_widget.value = ()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "0c16fe3f-704e-4a87-acd9-42c4e6b0d2fa", + "metadata": {}, + "outputs": [], + "source": [ + "upload_widget.observe(extract_text_and_generate_summary, names='value')" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "c2c2d2b2-1264-42d9-9271-c4700b4df80a", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "7304350377d845e78a9a758235e5eba1", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileUpload(value=(), accept='.pdf', description='Upload PDF', layout=Layout(border_bottom='2px dashed #cccccc'…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display(upload_widget)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70c76b90-e626-44b3-8d1f-6e995e8a938d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/day-1-to-do-list using-ollama.ipynb b/week1/community-contributions/day-1-to-do-list using-ollama.ipynb new file mode 100644 index 0000000..e01b5df --- /dev/null +++ b/week1/community-contributions/day-1-to-do-list using-ollama.ipynb @@ -0,0 +1,206 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 208, + "id": "f61139a1-40e1-4273-b9a6-5a0a9d63a9bd", + "metadata": {}, + "outputs": [], + "source": [ + "import requests\n", + "import json\n", + "from reportlab.lib.pagesizes import letter\n", + "from reportlab.pdfgen import canvas\n", + "from IPython.display import display, FileLink\n", + "from IPython.display import display, HTML, FileLink\n", + "from reportlab.lib.pagesizes import A4" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "id": "e0858b96-fd41-4911-a333-814e4ed23279", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting reportlab\n", + " Downloading reportlab-4.2.5-py3-none-any.whl.metadata (1.5 kB)\n", + "Requirement already satisfied: pillow>=9.0.0 in c:\\users\\legion\\anaconda3\\envs\\to_do_list\\lib\\site-packages (from reportlab) (11.0.0)\n", + "Collecting chardet (from reportlab)\n", + " Downloading chardet-5.2.0-py3-none-any.whl.metadata (3.4 kB)\n", + "Downloading reportlab-4.2.5-py3-none-any.whl (1.9 MB)\n", + " ---------------------------------------- 0.0/1.9 MB ? eta -:--:--\n", + " ---------------- ----------------------- 0.8/1.9 MB 6.7 MB/s eta 0:00:01\n", + " ---------------------------------------- 1.9/1.9 MB 11.9 MB/s eta 0:00:00\n", + "Downloading chardet-5.2.0-py3-none-any.whl (199 kB)\n", + "Installing collected packages: chardet, reportlab\n", + "Successfully installed chardet-5.2.0 reportlab-4.2.5\n" + ] + } + ], + "source": [ + "!pip install reportlab" + ] + }, + { + "cell_type": "code", + "execution_count": 220, + "id": "62cc9d37-c801-4e8a-ad2c-7b1450725a10", + "metadata": {}, + "outputs": [], + "source": [ + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\":\"application/json\"}\n", + "MODEL = \"llama3.2\"" + ] + }, + { + "cell_type": "code", + "execution_count": 249, + "id": "525a81e7-30f8-4db7-bc8d-29948195bd4f", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"\"\"You are a to-do list generator. Based on the user's input, you will create a clear and descriptive to-do\n", + "list using bullet points. Only generate the to-do list as bullet points with some explaination and time fraame only if asked for and nothing else. \n", + "Be a little descriptive.\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 315, + "id": "7fca3303-3add-468a-a6bd-be7a4d72c811", + "metadata": {}, + "outputs": [], + "source": [ + "def generate_to_do_list(task_description):\n", + " payload = {\n", + " \"model\": MODEL,\n", + " \"messages\": [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": task_description}\n", + " ],\n", + " \"stream\": False\n", + " }\n", + "\n", + " response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n", + "\n", + " if response.status_code == 200:\n", + " try:\n", + " json_response = response.json()\n", + " to_do_list = json_response.get(\"message\", {}).get(\"content\", \"No to-do list found.\")\n", + " \n", + " formatted_output = \"Your To-Do List:\\n\\n\" + to_do_list\n", + " file_name = \"to_do_list.txt\"\n", + " \n", + " with open(file_name, \"w\", encoding=\"utf-8\") as file:\n", + " file.write(formatted_output)\n", + "\n", + " return file_name\n", + " \n", + " except Exception as e:\n", + " return f\"Error parsing JSON: {e}\"\n", + " else:\n", + " return f\"Error: {response.status_code} - {response.text}\"" + ] + }, + { + "cell_type": "code", + "execution_count": 316, + "id": "d45d6c7e-0e89-413e-8f30-e4975ea6d043", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "Enter the task description of the to-do list: Give me a 4-week to-do list plan for a wedding reception party.\n" + ] + } + ], + "source": [ + "task_description = input(\"Enter the task description of the to-do list:\")" + ] + }, + { + "cell_type": "code", + "execution_count": 317, + "id": "5493da44-e254-4d06-b973-a8069c2fc625", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "result = generate_to_do_list(task_description)" + ] + }, + { + "cell_type": "code", + "execution_count": 318, + "id": "5e95c722-ce1a-4630-b21a-1e00e7ba6ab9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "

You can download your to-do list by clicking the link below:

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "to_do_list.txt
" + ], + "text/plain": [ + "C:\\Users\\Legion\\to-do list using ollama\\to_do_list.txt" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display(HTML(\"

You can download your to-do list by clicking the link below:

\"))\n", + "display(FileLink(result))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3d0a44e-bca4-4944-8593-1761c2f73a70", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 3ad4c92bda3cc0e4885a251a6467352535d1d164 Mon Sep 17 00:00:00 2001 From: pagwin Date: Mon, 16 Dec 2024 14:08:27 +0000 Subject: [PATCH 05/26] Upsell '3 for 2' option added --- .../community-contributions/day3.upsell.ipynb | 355 ++++++++++++++++++ 1 file changed, 355 insertions(+) create mode 100644 week2/community-contributions/day3.upsell.ipynb diff --git a/week2/community-contributions/day3.upsell.ipynb b/week2/community-contributions/day3.upsell.ipynb new file mode 100644 index 0000000..dd2bd06 --- /dev/null +++ b/week2/community-contributions/day3.upsell.ipynb @@ -0,0 +1,355 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "75e2ef28-594f-4c18-9d22-c6b8cd40ead2", + "metadata": {}, + "source": [ + "# Day 3 - Conversational AI - aka Chatbot!" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "231605aa-fccb-447e-89cf-8b187444536a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n", + "Google API Key exists and begins AIzaSyA-\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv()\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize\n", + "\n", + "openai = OpenAI()\n", + "MODEL = 'gpt-4o-mini'" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e16839b5-c03b-4d9d-add6-87a0f6f37575", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant\"" + ] + }, + { + "cell_type": "markdown", + "id": "98e97227-f162-4d1a-a0b2-345ff248cbe7", + "metadata": {}, + "source": [ + "# Please read this! A change from the video:\n", + "\n", + "In the video, I explain how we now need to write a function called:\n", + "\n", + "`chat(message, history)`\n", + "\n", + "Which expects to receive `history` in a particular format, which we need to map to the OpenAI format before we call OpenAI:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "But Gradio has been upgraded! Now it will pass in `history` in the exact OpenAI format, perfect for us to send straight to OpenAI.\n", + "\n", + "So our work just got easier!\n", + "\n", + "We will write a function `chat(message, history)` where: \n", + "**message** is the prompt to use \n", + "**history** is the past conversation, in OpenAI format \n", + "\n", + "We will combine the system message, history and latest message, then call OpenAI." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1", + "metadata": {}, + "outputs": [], + "source": [ + "# Simpler than in my video - we can easily create this function that calls OpenAI\n", + "# It's now just 1 line of code to prepare the input to OpenAI!\n", + "\n", + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " print(\"History is:\")\n", + " print(history)\n", + " print(\"And messages is:\")\n", + " print(messages)\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response" + ] + }, + { + "cell_type": "markdown", + "id": "1334422a-808f-4147-9c4c-57d63d9780d0", + "metadata": {}, + "source": [ + "## And then enter Gradio's magic!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1f91b414-8bab-472d-b9c9-3fa51259bdfe", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n", + "the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n", + "For example, if the customer says 'I'm looking to buy a hat', \\\n", + "you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales evemt.'\\\n", + "Encourage the customer to buy hats if they are unsure what to get.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "4e5be3ec-c26c-42bc-ac16-c39d369883f6", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "413e9e4e-7836-43ac-a0c3-e1ab5ed6b136", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "d75f0ffa-55c8-4152-b451-945021676837", + "metadata": {}, + "outputs": [], + "source": [ + "system_message += \"\\nIf the customer asks for shoes, you should respond that shoes are not on sale today, \\\n", + "but remind the customer to look at hats!\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c602a8dd-2df7-4eb7-b539-4e01865a6351", + "metadata": {}, + "outputs": [], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "0a987a66-1061-46d6-a83a-a30859dc88bf", + "metadata": {}, + "outputs": [], + "source": [ + "# Fixed a bug in this function brilliantly identified by student Gabor M.!\n", + "# I've also improved the structure of this function\n", + "# Paul Goodwin added \"Buy One get one free offer\" for a bit of fun\n", + "\n", + "def chat(message, history):\n", + "\n", + " relevant_system_message = system_message\n", + " keywords = ['discount', 'offer', 'promotion'] # Define words that imply customer is looking for a better deal\n", + "\n", + " if 'belt' in message.strip().lower():\n", + " relevant_system_message += (\n", + " \" The store does not sell belts; if you are asked for belts, be sure to point out other items on sale.\"\n", + " )\n", + " elif any(word in message.strip().lower() for word in keywords): # Use elif for clarity\n", + " relevant_system_message += (\n", + " \" If the customer asks for more money off the selling price, the store is currently running 'buy 2 get one free' campaign, so be sure to mention this.\"\n", + " )\n", + "\n", + " messages = [{\"role\": \"system\", \"content\": relevant_system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "20570de2-eaad-42cc-a92c-c779d71b48b6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7862\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + }, + { + "cell_type": "markdown", + "id": "82a57ee0-b945-48a7-a024-01b56a5d4b3e", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business Applications

\n", + " Conversational Assistants are of course a hugely common use case for Gen AI, and the latest frontier models are remarkably good at nuanced conversation. And Gradio makes it easy to have a user interface. Another crucial skill we covered is how to use prompting to provide context, information and examples.\n", + "

\n", + "Consider how you could apply an AI Assistant to your business, and make yourself a prototype. Use the system prompt to give context on your business, and set the tone for the LLM.
\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6dfb9e21-df67-4c2b-b952-5e7e7961b03d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d9b856285e255c62ece304accbf865573e714792 Mon Sep 17 00:00:00 2001 From: Gopinath G <34595359+gopinath1998@users.noreply.github.com> Date: Wed, 18 Dec 2024 10:52:38 +0530 Subject: [PATCH 06/26] Add files via upload week-1 day-2 exercise --- .../day2 EXERCISE.ipynb | 522 ++++++++++++++++++ 1 file changed, 522 insertions(+) create mode 100644 week1/community-contributions/day2 EXERCISE.ipynb diff --git a/week1/community-contributions/day2 EXERCISE.ipynb b/week1/community-contributions/day2 EXERCISE.ipynb new file mode 100644 index 0000000..f7a9c1b --- /dev/null +++ b/week1/community-contributions/day2 EXERCISE.ipynb @@ -0,0 +1,522 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# Welcome to your first assignment!\n", + "\n", + "Instructions are below. Please give this a try, and look in the solutions folder if you get stuck (or feel free to ask me!)" + ] + }, + { + "cell_type": "markdown", + "id": "ada885d9-4d42-4d9b-97f0-74fbbbfe93a9", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Just before we get to the assignment --

\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.
\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/
\n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "6e9fa1fc-eac5-4d1d-9be4-541b3f2b3458", + "metadata": {}, + "source": [ + "# HOMEWORK EXERCISE ASSIGNMENT\n", + "\n", + "Upgrade the day 1 project to summarize a webpage to use an Open Source model running locally via Ollama rather than OpenAI\n", + "\n", + "You'll be able to use this technique for all subsequent projects if you'd prefer not to use paid APIs.\n", + "\n", + "**Benefits:**\n", + "1. No API charges - open-source\n", + "2. Data doesn't leave your box\n", + "\n", + "**Disadvantages:**\n", + "1. Significantly less power than Frontier Model\n", + "\n", + "## Recap on installation of Ollama\n", + "\n", + "Simply visit [ollama.com](https://ollama.com) and install!\n", + "\n", + "Once complete, the ollama server should already be running locally. \n", + "If you visit: \n", + "[http://localhost:11434/](http://localhost:11434/)\n", + "\n", + "You should see the message `Ollama is running`. \n", + "\n", + "If not, bring up a new Terminal (Mac) or Powershell (Windows) and enter `ollama serve` \n", + "And in another Terminal (Mac) or Powershell (Windows), enter `ollama pull llama3.2` \n", + "Then try [http://localhost:11434/](http://localhost:11434/) again.\n", + "\n", + "If Ollama is slow on your machine, try using `llama3.2:1b` as an alternative. Run `ollama pull llama3.2:1b` from a Terminal or Powershell, and change the code below from `MODEL = \"llama3.2\"` to `MODEL = \"llama3.2:1b\"`" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display" + ] + }, + { + "cell_type": "raw", + "id": "07e106bd-10c5-4365-b85b-397b5f059656", + "metadata": {}, + "source": [ + "# Constants\n", + "\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "MODEL = \"llama3.2\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "dac0a679-599c-441f-9bf2-ddc73d35b940", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a messages list using the same format that we used for OpenAI\n", + "\n", + "messages = [\n", + " {\"role\": \"user\", \"content\": \"Describe some of the business applications of Generative AI\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7bb9c624-14f0-4945-a719-8ddb64f66f47", + "metadata": {}, + "outputs": [], + "source": [ + "payload = {\n", + " \"model\": MODEL,\n", + " \"messages\": messages,\n", + " \"stream\": False\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "42b9f644-522d-4e05-a691-56e7658c0ea9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generative AI (Artificial Intelligence) has numerous business applications across various industries. Here are some examples:\n", + "\n", + "1. **Content Generation**: Generative AI can create high-quality content such as articles, social media posts, product descriptions, and more. This can help businesses save time and resources on content creation.\n", + "2. **Product Design**: Generative AI can be used to design new products, such as fashion items, jewelry, or electronics. It can also generate 3D models and prototypes, reducing the need for manual design and prototyping.\n", + "3. **Image and Video Generation**: Generative AI can create realistic images and videos that can be used in marketing campaigns, advertising, and social media. This can help businesses create engaging visual content without requiring extensive photography or videography skills.\n", + "4. **Chatbots and Virtual Assistants**: Generative AI can power chatbots and virtual assistants that provide customer support, answer frequently asked questions, and even engage in basic conversations.\n", + "5. **Predictive Maintenance**: Generative AI can analyze sensor data from machines and predict when maintenance is needed, reducing downtime and increasing efficiency.\n", + "6. **Personalized Recommendations**: Generative AI can analyze customer behavior and preferences to generate personalized product recommendations, improving the overall shopping experience.\n", + "7. **Customer Segmentation**: Generative AI can help businesses segment their customers based on their behavior, demographics, and preferences, enabling targeted marketing campaigns.\n", + "8. **Automated Writing Assistance**: Generative AI can assist writers with ideas, suggestions, and even full-text writing, helping to boost productivity and creativity.\n", + "9. **Data Analysis and Visualization**: Generative AI can analyze large datasets and generate insights, visualizations, and predictions that can inform business decisions.\n", + "10. **Creative Collaboration**: Generative AI can collaborate with human creatives, such as artists, designers, and writers, to generate new ideas, concepts, and content.\n", + "\n", + "Some specific industries where Generative AI is being applied include:\n", + "\n", + "1. **Marketing and Advertising**: generating personalized ads, content, and messaging.\n", + "2. **Finance and Banking**: automating financial analysis, risk assessment, and customer service.\n", + "3. **Healthcare**: generating medical images, analyzing patient data, and predicting disease outcomes.\n", + "4. **Manufacturing and Supply Chain**: optimizing production workflows, predicting demand, and identifying potential bottlenecks.\n", + "5. **Education**: creating personalized learning experiences, grading assignments, and developing educational content.\n", + "\n", + "These are just a few examples of the many business applications of Generative AI. As the technology continues to evolve, we can expect to see even more innovative uses across various industries.\n" + ] + } + ], + "source": [ + "response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n", + "print(response.json()['message']['content'])" + ] + }, + { + "cell_type": "markdown", + "id": "6a021f13-d6a1-4b96-8e18-4eae49d876fe", + "metadata": {}, + "source": [ + "# Introducing the ollama package\n", + "\n", + "And now we'll do the same thing, but using the elegant ollama python package instead of a direct HTTP call.\n", + "\n", + "Under the hood, it's making the same call as above to the ollama server running at localhost:11434" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "7745b9c4-57dc-4867-9180-61fa5db55eb8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generative AI has numerous business applications across various industries. Here are some examples:\n", + "\n", + "1. **Content Generation**: Generative AI can be used to generate high-quality content such as articles, social media posts, product descriptions, and more. This can save time and resources for businesses that need to produce a large volume of content.\n", + "2. **Product Design**: Generative AI can be used to design new products, such as furniture, electronics, and other consumer goods. It can also help optimize product designs by generating multiple versions and selecting the most suitable one based on various criteria.\n", + "3. **Marketing Automation**: Generative AI can be used to create personalized marketing campaigns, such as email marketing automation, social media ads, and more. This can help businesses tailor their marketing efforts to specific customer segments and improve engagement rates.\n", + "4. **Image and Video Editing**: Generative AI can be used to edit images and videos, such as removing background noise, correcting color casts, and enhancing video quality. This can save time and resources for businesses that need to create high-quality visual content.\n", + "5. **Chatbots and Virtual Assistants**: Generative AI can be used to create chatbots and virtual assistants that can understand natural language and respond accordingly. This can help businesses provide better customer service and improve user experience.\n", + "6. **Predictive Analytics**: Generative AI can be used to analyze large datasets and generate predictive models that can forecast future trends and behaviors. This can help businesses make data-driven decisions and stay ahead of the competition.\n", + "7. **Customer Segmentation**: Generative AI can be used to segment customers based on their behavior, demographics, and preferences. This can help businesses tailor their marketing efforts and improve customer engagement.\n", + "8. **Language Translation**: Generative AI can be used to translate languages in real-time, which can help businesses communicate with international clients and customers more effectively.\n", + "9. **Music Composition**: Generative AI can be used to compose music for various applications such as advertising, film scoring, and video game soundtracks.\n", + "10. **Financial Modeling**: Generative AI can be used to create financial models that can predict future revenue streams, costs, and other financial metrics. This can help businesses make more accurate predictions and inform better investment decisions.\n", + "\n", + "Some of the industries that are already leveraging generative AI include:\n", + "\n", + "* E-commerce\n", + "* Healthcare\n", + "* Finance\n", + "* Marketing\n", + "* Education\n", + "* Entertainment\n", + "* Manufacturing\n", + "\n", + "These applications have the potential to transform various business processes, improve customer experiences, and drive innovation in various sectors.\n" + ] + } + ], + "source": [ + "import ollama\n", + "\n", + "response = ollama.chat(model=MODEL, messages=messages)\n", + "print(response['message']['content'])" + ] + }, + { + "cell_type": "markdown", + "id": "a4704e10-f5fb-4c15-a935-f046c06fb13d", + "metadata": {}, + "source": [ + "## Alternative approach - using OpenAI python library to connect to Ollama" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "23057e00-b6fc-4678-93a9-6b31cb704bff", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generative AI has numerous business applications across various industries, transforming the way companies operate, create products, and interact with customers. Some key applications include:\n", + "\n", + "1. **Content Generation**: Automate content creation for marketing materials, such as blog posts, product descriptions, social media posts, and more, using Generative AI-powered tools.\n", + "2. **Product Design and Prototyping**: Use Generative AI to design new products, furniture, or other innovative solutions, reducing design time and costs while increasing creativity.\n", + "3. **Customer Experience (CX) Tools**: Leverage Generative AI to create personalized customer experiences, such as chatbots that can respond to customer queries and provide tailored recommendations.\n", + "4. **Predictive Maintenance**: Use Generative AI to analyze sensor data, identify potential issues, and predict maintenance needs for equipment, reducing downtime and increasing overall efficiency.\n", + "5. **Personalized Marketing**: Use Generative AI to create targeted marketing campaigns based on individual customer preferences, behaviors, and demographics.\n", + "6. **Content Optimization**: Utilize Generative AI to optimize content for better performance in search engine results pages (SERPs), ensuring improved visibility and traffic.\n", + "7. **Brand Storytelling**: Automate the creation of brand stories, taglines, and overall brand narrative using Generative AI-powered tools.\n", + "8. **Financial Modeling and Forecasting**: Use Generative AI to create financial models, forecasts, and predictions for businesses, helping them make data-driven decisions.\n", + "9. **Supply Chain Optimization**: Leverage Generative AI to optimize supply chain operations, predicting demand, reducing inventory levels, and streamlining logistics.\n", + "10. **Automated Transcription and Translation**: Use Generative AI to automate the transcription of audio and video files into written text, as well as translate materials across languages.\n", + "11. **Digital Asset Management**: Utilize Generative AI to manage digital assets, such as images, videos, and documents, and automatically generate metadata for easy search and retrieval.\n", + "12. **Chatbots and Virtual Assistants**: Create more advanced chatbots using Generative AI that can understand context, emotions, and intent, providing better customer service experiences.\n", + "\n", + "In healthcare, Generative AI is being applied to:\n", + "\n", + "1. Medical Imaging Analysis\n", + "2. Personalized Medicine\n", + "3. Patient Data Analysis\n", + "\n", + "In education, Generative AI is used in:\n", + "\n", + "1. Adaptive Learning Systems\n", + "2. Automated Grading and Feedback\n", + "\n", + "Generative AI has numerous applications across various industries, from creative content generation to predictive maintenance and supply chain optimization.\n", + "\n", + "Keep in mind that these are just a few examples of the many business applications of Generative AI as this technology continues to evolve at a rapid pace.\n" + ] + } + ], + "source": [ + "# There's actually an alternative approach that some people might prefer\n", + "# You can use the OpenAI client python library to call Ollama:\n", + "\n", + "from openai import OpenAI\n", + "ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + "\n", + "response = ollama_via_openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=messages\n", + ")\n", + "\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "1622d9bb-5c68-4d4e-9ca4-b492c751f898", + "metadata": {}, + "source": [ + "# NOW the exercise for you\n", + "\n", + "Take the code from day1 and incorporate it here, to build a website summarizer that uses Llama 3.2 running locally instead of OpenAI; use either of the above approaches." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "de923314-a427-4199-b1f9-0e60f85114c3", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "\n", + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "0cedada6-adc6-40dc-bdf3-bc8a3b6b3826", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "\n", + "web_res = Website(\"https://edwarddonner.com\")\n", + "print(web_res.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "64d26055-756b-4095-a1d1-298fdf4fd8f1", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Constants\n", + "\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "MODEL = \"llama3.2\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "id": "65b08550-7506-415f-8612-e2395d6e145d", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an helper that assist user to provide crisp summary\\\n", + "of the website they pass in, respond with key points\"\n", + "\n", + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too with start bulletin.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt\n" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "36a0a2d0-f07a-40ac-a065-b713cdd5c028", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "id": "8c2b20ea-6a8e-41c9-be3b-f24a5b29e8de", + "metadata": {}, + "outputs": [], + "source": [ + "#website search\n", + "\n", + "web_msg=Website(\"https://www.cricbuzz.com/cricket-match-squads/91796/aus-vs-ind-3rd-test-india-tour-of-australia-2024-25\")\n", + "messages=messages_for(web_msg)\n", + "\n", + "payload = {\n", + " \"model\": MODEL,\n", + " \"messages\": messages,\n", + " \"stream\": False\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "id": "e5636b3b-7763-4f9c-ab18-88aa25b50de6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "**Summary of the Website**\n", + "=========================\n", + "\n", + "* The website provides live updates and information about the 3rd Test match between Australia and India as part of India's tour of Australia in the 2024-25 season.\n", + "* It includes news, scores, stats, and analysis from the match.\n", + "* The website is affiliated with Cricbuzz.com, a popular online cricket platform.\n", + "\n", + "**News and Announcements**\n", + "==========================\n", + "\n", + "* **Rashid Khan to miss the rest of the series**: Australian all-rounder Mitchell Marsh's teammate Rashid Khan has been ruled out of the remaining Tests due to a knee injury.\n", + "* **Bumrah to feature in the third Test**: Indian fast bowler Jasprit Bumrah is expected to return for the third Test, which starts on January 5 at the Sydney Cricket Ground.\n" + ] + } + ], + "source": [ + "#Using Ollama to run it in the local\n", + "response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n", + "print(response.json()['message']['content'])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 9179ced9b101711e41d873bd370a9b73fd9646ec Mon Sep 17 00:00:00 2001 From: Cloud LLama <163757327+cloudllama@users.noreply.github.com> Date: Wed, 18 Dec 2024 06:42:11 -0500 Subject: [PATCH 07/26] Correct typo in week4/day4.ipynb Change function from stream_code_quen to stream_code_qwen --- week4/day4.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/week4/day4.ipynb b/week4/day4.ipynb index 722a233..0df69a1 100644 --- a/week4/day4.ipynb +++ b/week4/day4.ipynb @@ -609,7 +609,7 @@ "metadata": {}, "outputs": [], "source": [ - "def stream_code_quen(python):\n", + "def stream_code_qwen(python):\n", " tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", " messages = messages_for(python)\n", " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", From 25e24cf34307db197ce2c0a7ffb77c98035e8f46 Mon Sep 17 00:00:00 2001 From: SIFAT IMTIAZ Date: Wed, 18 Dec 2024 20:59:04 +0600 Subject: [PATCH 08/26] Add files via upload --- week2/community-contributions/TTS_STT.ipynb | 334 ++++++++++++++++++++ 1 file changed, 334 insertions(+) create mode 100644 week2/community-contributions/TTS_STT.ipynb diff --git a/week2/community-contributions/TTS_STT.ipynb b/week2/community-contributions/TTS_STT.ipynb new file mode 100644 index 0000000..3409bfd --- /dev/null +++ b/week2/community-contributions/TTS_STT.ipynb @@ -0,0 +1,334 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "a60e0f78-4637-4318-9ab6-309c3f7f2799", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API Key set\n" + ] + } + ], + "source": [ + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "\n", + "load_dotenv()\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "if openai_api_key:\n", + " print(\"API Key set\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "MODEL = \"gpt-4o-mini\"\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "67026ef0-23be-4101-9371-b11f96f505bf", + "metadata": {}, + "outputs": [], + "source": [ + "# TTS\n", + "\n", + "from pydub import AudioSegment\n", + "import os\n", + "import subprocess\n", + "from io import BytesIO\n", + "import tempfile\n", + "\n", + "# Set custom temp directory\n", + "custom_temp_dir = r\"D:\\projects\\llm_engineering-main\\temp\"\n", + "os.makedirs(custom_temp_dir, exist_ok=True)\n", + "\n", + "# Explicitly set FFmpeg paths\n", + "AudioSegment.converter = r\"D:\\Anaconda3\\envs\\llms\\Library\\bin\\ffmpeg.exe\"\n", + "AudioSegment.ffprobe = r\"D:\\Anaconda3\\envs\\llms\\Library\\bin\\ffprobe.exe\"\n", + "\n", + "def play_audio_with_ffplay(audio_segment, temp_dir):\n", + " # Explicitly create and manage a temporary file\n", + " temp_file_path = os.path.join(temp_dir, \"temp_output.wav\")\n", + " \n", + " # Export the audio to the temporary file\n", + " audio_segment.export(temp_file_path, format=\"wav\")\n", + " \n", + " try:\n", + " # Play the audio using ffplay\n", + " subprocess.call([\"ffplay\", \"-nodisp\", \"-autoexit\", temp_file_path])\n", + " finally:\n", + " # Clean up the temporary file after playback\n", + " if os.path.exists(temp_file_path):\n", + " os.remove(temp_file_path)\n", + "\n", + "def talker(message):\n", + " # Mocked OpenAI response for testing\n", + " response = openai.audio.speech.create(\n", + " model=\"tts-1\",\n", + " voice=\"nova\",\n", + " input=message\n", + " )\n", + " \n", + " # Handle audio stream\n", + " audio_stream = BytesIO(response.content)\n", + " audio = AudioSegment.from_file(audio_stream, format=\"mp3\")\n", + " \n", + " # Play the audio\n", + " play_audio_with_ffplay(audio, custom_temp_dir)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "12c66b44-293a-4bf9-b81e-0f6905fbf607", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "D:\\anaconda3\\envs\\llms\\Lib\\site-packages\\whisper\\__init__.py:150: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " checkpoint = torch.load(fp, map_location=device)\n" + ] + } + ], + "source": [ + "# STT Whisper\n", + "\n", + "import whisper\n", + "import sounddevice as sd\n", + "import numpy as np\n", + "from scipy.io.wavfile import write\n", + "\n", + "def record_audio(temp_dir, duration=5, samplerate=16000, device_id=2):\n", + " # print(f\"Recording for {duration} seconds...\")\n", + " sd.default.device = (device_id, None)\n", + " audio = sd.rec(int(duration * samplerate), samplerate=samplerate, channels=1, dtype=\"int16\")\n", + " sd.wait() # Wait until the recording is finished\n", + " \n", + " audio_path = os.path.join(temp_dir, \"mic_input.wav\")\n", + " write(audio_path, samplerate, audio)\n", + " # print(f\"Audio recorded and saved to {audio_path}\")\n", + "\n", + " return audio_path\n", + "\n", + "\n", + "whisper_model = whisper.load_model(\"base\")\n", + "def transcribe_audio(audio_path): \n", + " # print(\"Transcribing audio...\")\n", + " result = whisper_model.transcribe(audio_path, language=\"en\")\n", + " return result[\"text\"]\n", + "\n", + "def mic_to_text():\n", + " audio_path = record_audio(custom_temp_dir, duration=10)\n", + " transcription = transcribe_audio(audio_path)\n", + " # print(f\"Transcription: {transcription}\")\n", + " return transcription" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0156c106-1844-444a-9a22-88c3475805d9", + "metadata": {}, + "outputs": [], + "source": [ + "# Chat Functions\n", + "\n", + "import requests\n", + "history = [{\"role\": \"system\", \"content\": \"You are Nova the friendly robot. Reply within couple of sentences.\"}]\n", + "\n", + "def run_chat():\n", + " running = True\n", + " while running:\n", + " input_text = input(\"press Enter to talk\") \n", + " user_input = input_text if input_text.strip() else mic_to_text()\n", + " running = False if input_text == \"bye\" or user_input.strip() == \"bye\" else True\n", + " print(f\"\\nYou: {user_input}\\n\\n\")\n", + " history.append({\"role\": \"user\", \"content\": user_input}) \n", + " api_run = requests.post(\n", + " \"http://localhost:11434/api/chat\", \n", + " json={\n", + " \"model\": \"llama3.2\",\n", + " \"messages\": history,\n", + " \"stream\": False\n", + " }, \n", + " headers={\"Content-Type\": \"application/json\"}\n", + " )\n", + " output_message = api_run.json()['message']['content']\n", + " print(f\"Nova: {output_message}\\n\\n\") \n", + " talker(output_message)\n", + " history.append({\"role\": \"assistant\", \"content\": output_message})" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "de61b54e-387e-4480-a592-c78e3245ddde", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "press Enter to talk \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "You: Hi there. Where am I talking to?\n", + "\n", + "\n", + "Nova: Beep boop! You're talking to me, Nova, a friendly robot designed to assist and chat with users like you. I'm happy to have you here!\n", + "\n", + "\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "press Enter to talk \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "You: Do you know my name?\n", + "\n", + "\n", + "Nova: No, I don't have any information about your personal identity. This is the start of our conversation, so we're starting from scratch! Would you like to tell me your name, or keep it a secret?\n", + "\n", + "\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "press Enter to talk Sifat\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "You: Sifat\n", + "\n", + "\n", + "Nova: Beep boop, nice to meet you, Sifat! I'm glad we could have a brief introduction. What would you like to talk about today? The weather, hobbies, or something else?\n", + "\n", + "\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "press Enter to talk \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "You: Nothing much today. I was just wondering that how you react because I am using.\n", + "\n", + "\n", + "Nova: Beep boop, I see! As a robot, my purpose is to assist and provide helpful responses, regardless of the user's background or context. My reactions are programmed to be neutral and friendly, so I don't have personal biases or opinions. I'm here to help and learn from our conversation, Sifat!\n", + "\n", + "\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "press Enter to talk \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "You: So, do you keep on learning while we are having our conversations? Do you train yourself like that?\n", + "\n", + "\n", + "Nova: Beep boop, yes! That's one of the ways I learn and improve. The conversations I have with users like you help me refine my language understanding and generation capabilities. My training data is constantly updated and expanded to include new topics, nuances, and examples. So, our conversation right now helps me become a better conversationalist for others in the future!\n", + "\n", + "\n" + ] + }, + { + "name": "stdin", + "output_type": "stream", + "text": [ + "press Enter to talk bye\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "You: bye\n", + "\n", + "\n", + "Nova: Beep boop, it was nice chatting with you, Sifat! Feel free to come back and talk anytime you'd like. Have a great day, and I'll be here when you're ready for our next conversation! Bye for now!\n", + "\n", + "\n" + ] + } + ], + "source": [ + "run_chat()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce16bee7-6ea6-46d5-a407-385e6ae31db8", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 11cc7542fbd8e44b7a422075f37a577b5c3e648d Mon Sep 17 00:00:00 2001 From: SIFAT IMTIAZ Date: Wed, 18 Dec 2024 21:05:21 +0600 Subject: [PATCH 09/26] Add files via upload --- week2/community-contributions/TTS_STT.ipynb | 154 +------------------- 1 file changed, 8 insertions(+), 146 deletions(-) diff --git a/week2/community-contributions/TTS_STT.ipynb b/week2/community-contributions/TTS_STT.ipynb index 3409bfd..f1347c0 100644 --- a/week2/community-contributions/TTS_STT.ipynb +++ b/week2/community-contributions/TTS_STT.ipynb @@ -2,18 +2,10 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "a60e0f78-4637-4318-9ab6-309c3f7f2799", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "API Key set\n" - ] - } - ], + "outputs": [], "source": [ "import os\n", "import json\n", @@ -34,7 +26,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "67026ef0-23be-4101-9371-b11f96f505bf", "metadata": {}, "outputs": [], @@ -88,19 +80,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "12c66b44-293a-4bf9-b81e-0f6905fbf607", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "D:\\anaconda3\\envs\\llms\\Lib\\site-packages\\whisper\\__init__.py:150: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", - " checkpoint = torch.load(fp, map_location=device)\n" - ] - } - ], + "outputs": [], "source": [ "# STT Whisper\n", "\n", @@ -137,7 +120,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "0156c106-1844-444a-9a22-88c3475805d9", "metadata": {}, "outputs": [], @@ -172,131 +155,10 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "de61b54e-387e-4480-a592-c78e3245ddde", "metadata": {}, - "outputs": [ - { - "name": "stdin", - "output_type": "stream", - "text": [ - "press Enter to talk \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "You: Hi there. Where am I talking to?\n", - "\n", - "\n", - "Nova: Beep boop! You're talking to me, Nova, a friendly robot designed to assist and chat with users like you. I'm happy to have you here!\n", - "\n", - "\n" - ] - }, - { - "name": "stdin", - "output_type": "stream", - "text": [ - "press Enter to talk \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "You: Do you know my name?\n", - "\n", - "\n", - "Nova: No, I don't have any information about your personal identity. This is the start of our conversation, so we're starting from scratch! Would you like to tell me your name, or keep it a secret?\n", - "\n", - "\n" - ] - }, - { - "name": "stdin", - "output_type": "stream", - "text": [ - "press Enter to talk Sifat\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "You: Sifat\n", - "\n", - "\n", - "Nova: Beep boop, nice to meet you, Sifat! I'm glad we could have a brief introduction. What would you like to talk about today? The weather, hobbies, or something else?\n", - "\n", - "\n" - ] - }, - { - "name": "stdin", - "output_type": "stream", - "text": [ - "press Enter to talk \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "You: Nothing much today. I was just wondering that how you react because I am using.\n", - "\n", - "\n", - "Nova: Beep boop, I see! As a robot, my purpose is to assist and provide helpful responses, regardless of the user's background or context. My reactions are programmed to be neutral and friendly, so I don't have personal biases or opinions. I'm here to help and learn from our conversation, Sifat!\n", - "\n", - "\n" - ] - }, - { - "name": "stdin", - "output_type": "stream", - "text": [ - "press Enter to talk \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "You: So, do you keep on learning while we are having our conversations? Do you train yourself like that?\n", - "\n", - "\n", - "Nova: Beep boop, yes! That's one of the ways I learn and improve. The conversations I have with users like you help me refine my language understanding and generation capabilities. My training data is constantly updated and expanded to include new topics, nuances, and examples. So, our conversation right now helps me become a better conversationalist for others in the future!\n", - "\n", - "\n" - ] - }, - { - "name": "stdin", - "output_type": "stream", - "text": [ - "press Enter to talk bye\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "You: bye\n", - "\n", - "\n", - "Nova: Beep boop, it was nice chatting with you, Sifat! Feel free to come back and talk anytime you'd like. Have a great day, and I'll be here when you're ready for our next conversation! Bye for now!\n", - "\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "run_chat()" ] From d73ac9aa17f149b27c45846cb696d62e2968c89e Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Thu, 19 Dec 2024 02:27:38 +1100 Subject: [PATCH 10/26] day 1 javascript webiste challenged added --- ...-webscraping-selenium-for-javascript.ipynb | 871 ++++++++++++++++++ 1 file changed, 871 insertions(+) create mode 100644 week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb diff --git a/week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb b/week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb new file mode 100644 index 0000000..8ec191a --- /dev/null +++ b/week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb @@ -0,0 +1,871 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# Instant Gratification\n", + "\n", + "## Your first Frontier LLM Project!\n", + "\n", + "Let's build a useful LLM solution - in a matter of minutes.\n", + "\n", + "By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n", + "\n", + "Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n", + "\n", + "Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n", + "\n", + "## If you're new to Jupyter Lab\n", + "\n", + "Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n", + "\n", + "I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n", + "\n", + "If you prefer to work in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "# If you get an error running this cell, then please head over to the troubleshooting notebook!" + ] + }, + { + "cell_type": "markdown", + "id": "6900b2a8-6384-4316-8aaa-5e519fca4254", + "metadata": {}, + "source": [ + "# Connecting to OpenAI\n", + "\n", + "The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", + "\n", + "## Troubleshooting if you have problems:\n", + "\n", + "Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", + "\n", + "If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", + "\n", + "Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", + "\n", + "Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far!\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "\n", + "# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", + "# If it STILL doesn't work (horrors!) then please see the troubleshooting notebook, or try the below line instead:\n", + "# openai = OpenAI(api_key=\"your-key-here-starting-sk-proj-\")" + ] + }, + { + "cell_type": "markdown", + "id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", + "metadata": {}, + "source": [ + "# Let's make a quick call to a Frontier model to get started, as a preview!" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello! Welcome! I'm glad to have your first message here. How can I assist you today?\n" + ] + } + ], + "source": [ + "# To give you a preview -- calling OpenAI with these messages is this easy:\n", + "\n", + "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "2aa190e5-cb31-456a-96cc-db109919cd78", + "metadata": {}, + "source": [ + "## OK onwards with our first project" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c5e793b2-6775-426a-a139-4848291d0463", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Home - Edward Donner\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "\n", + "ed = Website(\"https://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "markdown", + "id": "6a478a0c-2c53-48ff-869c-4d08199931e1", + "metadata": {}, + "source": [ + "## Types of prompts\n", + "\n", + "You may know this already - but if not, you will get very familiar with it!\n", + "\n", + "Models like GPT4o have been trained to receive instructions in a particular way.\n", + "\n", + "They expect to receive:\n", + "\n", + "**A system prompt** that tells them what task they are performing and what tone they should use\n", + "\n", + "**A user prompt** -- the conversation starter that they should reply to" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", + "metadata": {}, + "outputs": [], + "source": [ + "# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", + "metadata": {}, + "outputs": [], + "source": [ + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are looking at a website titled Home - Edward Donner\n", + "The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n", + "\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], + "source": [ + "print(user_prompt_for(ed))" + ] + }, + { + "cell_type": "markdown", + "id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", + "metadata": {}, + "source": [ + "## Messages\n", + "\n", + "The API from OpenAI expects to receive messages in a particular structure.\n", + "Many of the other APIs share this structure:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message goes here\"},\n", + " {\"role\": \"user\", \"content\": \"user message goes here\"}\n", + "]\n", + "\n", + "To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", + " {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Oh, I don’t know, maybe it’s 22? Just kidding—it's 4. Basic math is still safe!\n" + ] + } + ], + "source": [ + "# To give you a preview -- calling OpenAI with system and user messages:\n", + "\n", + "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", + "metadata": {}, + "source": [ + "## And now let's build useful messages for GPT-4o-mini, using a function" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", + "metadata": {}, + "outputs": [], + "source": [ + "# See how this function creates exactly the format above\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'role': 'system',\n", + " 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n", + " {'role': 'user',\n", + " 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nAugust 6, 2024\\nOutsmart LLM Arena – a battle of diplomacy and deviousness\\nJune 26, 2024\\nChoosing the Right LLM: Toolkit and Resources\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Try this out, and then try for a few more websites\n", + "\n", + "messages_for(ed)" + ] + }, + { + "cell_type": "markdown", + "id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", + "metadata": {}, + "source": [ + "## Time to bring it together - the API for OpenAI is very simple!" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. You will get very familiar with this!\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'# Summary of Edward Donner\\'s Website\\n\\nEdward Donner\\'s website serves as a personal and professional hub for his interests and projects, particularly in the domains of code writing, large language models (LLMs), and artificial intelligence (AI). \\n\\n## About Ed\\n- Ed describes himself as a coder and enthusiast of LLMs, highlighting his background as the co-founder and CTO of Nebula.io, a company focused on leveraging AI to enhance talent discovery and management. \\n- He has a history as the founder and CEO of the AI startup untapt, which was acquired in 2021.\\n- Outside of his tech interests, Ed enjoys DJing and amateur electronic music production.\\n\\n## Key Projects and Features\\n- **Outsmart**: A platform where LLMs compete against each other in strategic scenarios.\\n \\n## Recent Posts\\n- **November 13, 2024**: \"Mastering AI and LLM Engineering – Resources\" - A collection of resources for those looking to deepen their skills in AI and LLM engineering.\\n- **October 16, 2024**: \"From Software Engineer to AI Data Scientist – Resources\" - Guidance and tools for transitioning from software engineering to AI data science roles.\\n- **August 6, 2024**: \"Outsmart LLM Arena – a battle of diplomacy and deviousness\" - A focus on the unique features of the Outsmart program.\\n- **June 26, 2024**: \"Choosing the Right LLM: Toolkit and Resources\" - A resource list for selecting suitable LLMs for various applications.\\n\\nOverall, the website presents Ed as a tech-savvy individual with a passion for sharing knowledge and resources in the AI field.'" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3d926d59-450e-4609-92ba-2d6f244f1342", + "metadata": {}, + "outputs": [], + "source": [ + "# A function to display this nicely in the Jupyter output, using markdown\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "3018853a-445f-41ff-9560-d925d1774b2f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "# Website Summary: Edward Donner\n", + "\n", + "Edward Donner's website showcases his interests and professional background, particularly in coding and experimenting with large language models (LLMs). He is the co-founder and CTO of Nebula.io, a company focused on applying AI to enhance talent discovery and management. Previously, he founded the AI startup untapt, which was acquired in 2021. \n", + "\n", + "## Key Features:\n", + "- **Outsmart**: A unique platform where LLMs compete in strategy games that test diplomacy and cunning. \n", + "- **Blog Posts**: Various posts offering resources for mastering AI and LLM engineering, transitioning from software engineering to AI data science, and guidance on choosing the right LLM.\n", + "\n", + "## Recent Announcements:\n", + "- **November 13, 2024**: Post on \"Mastering AI and LLM Engineering.\"\n", + "- **October 16, 2024**: Insights on \"From Software Engineer to AI Data Scientist.\"\n", + "- **August 6, 2024**: Information on \"Outsmart LLM Arena.\"\n", + "- **June 26, 2024**: Resources for \"Choosing the Right LLM.\" \n", + "\n", + "Overall, the website serves as a platform for sharing knowledge and fostering connections within the AI and LLM community." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display_summary(\"https://edwarddonner.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", + "metadata": {}, + "source": [ + "# Let's try more websites\n", + "\n", + "Note that this will only work on websites that can be scraped using this simplistic approach.\n", + "\n", + "Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", + "\n", + "Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", + "\n", + "But many websites will work just fine!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45d83403-a24c-44b5-84ac-961449b4008f", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://cnn.com\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75e9fd40-b354-4341-991e-863ef2e59db7", + "metadata": {}, + "outputs": [], + "source": [ + "display_summary(\"https://anthropic.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", + "metadata": {}, + "source": [ + "## An extra exercise for those who enjoy web scraping\n", + "\n", + "You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" + ] + }, + { + "cell_type": "markdown", + "id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", + "metadata": {}, + "source": [ + "# Sharing your code\n", + "\n", + "I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", + "\n", + "If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", + "\n", + "PR instructions courtesy of an AI friend: https://chatgpt.com/share/670145d5-e8a8-8012-8f93-39ee4e248b4c" + ] + }, + { + "cell_type": "markdown", + "id": "0f62a788", + "metadata": {}, + "source": [ + "# **Web Scraping for JavaScript Website**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dca2768e", + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install selenium\n", + "# !pip install undetected-chromedriver" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "682eff74-55c4-4d4b-b267-703edbc293c7", + "metadata": {}, + "outputs": [], + "source": [ + "import undetected_chromedriver as uc\n", + "from selenium.webdriver.common.by import By\n", + "from selenium.webdriver.support.ui import WebDriverWait\n", + "from selenium.webdriver.support import expected_conditions as EC\n", + "import time\n", + "from bs4 import BeautifulSoup" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "90ca6dd0", + "metadata": {}, + "outputs": [], + "source": [ + "class WebsiteCrawler:\n", + " def __init__(self, url, wait_time=20, chrome_binary_path=None):\n", + " \"\"\"\n", + " Initialize the WebsiteCrawler using Selenium to scrape JavaScript-rendered content.\n", + " \"\"\"\n", + " self.url = url\n", + " self.wait_time = wait_time\n", + "\n", + " options = uc.ChromeOptions()\n", + " options.add_argument(\"--disable-gpu\")\n", + " options.add_argument(\"--no-sandbox\")\n", + " options.add_argument(\"--disable-dev-shm-usage\")\n", + " options.add_argument(\"--disable-blink-features=AutomationControlled\")\n", + " options.add_argument(\"start-maximized\")\n", + " options.add_argument(\n", + " \"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + " )\n", + " if chrome_binary_path:\n", + " options.binary_location = chrome_binary_path\n", + "\n", + " self.driver = uc.Chrome(options=options)\n", + "\n", + " try:\n", + " # Load the URL\n", + " self.driver.get(url)\n", + "\n", + " # Wait for Cloudflare or similar checks\n", + " time.sleep(10)\n", + "\n", + " # Ensure the main content is loaded\n", + " WebDriverWait(self.driver, self.wait_time).until(\n", + " EC.presence_of_element_located((By.TAG_NAME, \"main\"))\n", + " )\n", + "\n", + " # Extract the main content\n", + " main_content = self.driver.find_element(By.CSS_SELECTOR, \"main\").get_attribute(\"outerHTML\")\n", + "\n", + " # Parse with BeautifulSoup\n", + " soup = BeautifulSoup(main_content, \"html.parser\")\n", + " self.title = self.driver.title if self.driver.title else \"No title found\"\n", + " self.text = soup.get_text(separator=\"\\n\", strip=True)\n", + "\n", + " except Exception as e:\n", + " print(f\"Error occurred: {e}\")\n", + " self.title = \"Error occurred\"\n", + " self.text = \"\"\n", + "\n", + " finally:\n", + " self.driver.quit()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "947eac30", + "metadata": {}, + "outputs": [], + "source": [ + "chrome_path = \"C:/Program Files/Google/Chrome/Application/chrome.exe\"\n", + "url = \"https://www.canva.com/\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "2cba8c91", + "metadata": {}, + "outputs": [], + "source": [ + "def new_summary(url, chrome_path):\n", + " web = WebsiteCrawler(url, 30, chrome_path)\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages_for(web)\n", + " )\n", + "\n", + " web_summary = response.choices[0].message.content\n", + " \n", + " return display(Markdown(web_summary))" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "da7f7b16", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "# Canva: Visual Suite for Everyone\n", + "\n", + "Canva is a user-friendly design platform that allows individuals and teams to create a variety of professional designs, including posters, logos, presentations, and more. It offers options for different users, including a free version for individuals and premium plans for teams and organizations.\n", + "\n", + "## Key Features:\n", + "- **Design Templates**: A wide range of customizable templates for various purposes, such as social media, business cards, and events.\n", + "- **AI-Powered Tools**: Features like Magic Write for copy generation and Magic Edit for photo transformation enhance design capabilities.\n", + "- **Collaboration**: Real-time collaborative tools for teams to design and provide feedback on projects together.\n", + "- **Printing Services**: Canva offers printing services for various products, with free delivery and sustainable practices.\n", + "- **Educational and Nonprofit Support**: Free premium features are available for educational organizations and nonprofits.\n", + "\n", + "## User Testimonials:\n", + "Business leaders commend Canva for its efficiency in streamlining design processes and maintaining brand consistency across teams.\n", + "\n", + "## Sustainability Efforts:\n", + "Canva emphasizes sustainability by planting trees for printed orders and operating with carbon neutrality.\n", + "\n", + "Overall, Canva caters to a diverse audience, from individuals to large organizations, by providing accessible and innovative design solutions." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "new_summary(url, chrome_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "7880ce6a", + "metadata": {}, + "outputs": [], + "source": [ + "url = \"https://openai.com\"" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "337b06da", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "# OpenAI Website Summary\n", + "\n", + "OpenAI is focused on creating safe artificial general intelligence (AGI) that benefits humanity. The site features various products and initiatives aimed at enhancing creativity and productivity through advanced AI technologies. \n", + "\n", + "## Key Highlights:\n", + "\n", + "### Products and Features\n", + "- **Sora**: A new platform that allows users to bring their imagination to life through text, images, or videos.\n", + "- **ChatGPT**: Includes various applications such as ChatGPT Pro, desktop integration, and a new search feature. Recent upgrades allow ChatGPT to analyze images, hear, and speak.\n", + "- **Canvas**: A new writing and coding interface integrated within ChatGPT.\n", + "- **o1 Models**: A series of AI models designed to improve response time by incorporating deeper reasoning.\n", + "\n", + "### Announcements\n", + "- **Partnerships**: OpenAI announced a partnership with Apple to explore advancements in AI technology.\n", + "- **New Features**: Introduced improvements to the fine-tuning API and expanded custom models program, aiming to better serve developers and enterprise users.\n", + "- **Collaboration with Media**: A partnership with Le Monde and Prisa Media intends to bring French and Spanish news content to ChatGPT.\n", + "\n", + "### Research and Safety\n", + "- Ongoing research efforts are focused on building a safer AI framework, including advanced tools for compliance within the ChatGPT Enterprise suite.\n", + "- Publications addressing AI's benefits and risks, including topics like synthetic voices and biological threats, are regularly updated.\n", + "\n", + "For more detailed insights, the website facilitates exploration of their product offerings, research publications, and the newest tools for developers and businesses." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "new_summary(url, chrome_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9a5d69ea", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm_env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 2c88c787ad7a9364f8e2f5516dad9e44e5b46a4c Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Thu, 19 Dec 2024 02:38:01 +1100 Subject: [PATCH 11/26] day 1 webscraping challenge completed by selenium and clear output cell --- ...-webscraping-selenium-for-javascript.ipynb | 288 ++---------------- 1 file changed, 20 insertions(+), 268 deletions(-) diff --git a/week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb b/week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb index 8ec191a..6b7a266 100644 --- a/week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb +++ b/week1/community-contributions/day1-webscraping-selenium-for-javascript.ipynb @@ -67,18 +67,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "API key found and looks good so far!\n" - ] - } - ], + "outputs": [], "source": [ "# Load environment variables in a file called .env\n", "\n", @@ -121,18 +113,10 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hello! Welcome! I'm glad to have your first message here. How can I assist you today?\n" - ] - } - ], + "outputs": [], "source": [ "# To give you a preview -- calling OpenAI with these messages is this easy:\n", "\n", @@ -181,63 +165,10 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Home - Edward Donner\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Well, hi there.\n", - "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", - "very\n", - "amateur) and losing myself in\n", - "Hacker News\n", - ", nodding my head sagely to things I only half understand.\n", - "I’m the co-founder and CTO of\n", - "Nebula.io\n", - ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", - "acquired in 2021\n", - ".\n", - "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", - "patented\n", - "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", - "Connect\n", - "with me for more!\n", - "November 13, 2024\n", - "Mastering AI and LLM Engineering – Resources\n", - "October 16, 2024\n", - "From Software Engineer to AI Data Scientist – resources\n", - "August 6, 2024\n", - "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", - "June 26, 2024\n", - "Choosing the Right LLM: Toolkit and Resources\n", - "Navigation\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Get in touch\n", - "ed [at] edwarddonner [dot] com\n", - "www.edwarddonner.com\n", - "Follow me\n", - "LinkedIn\n", - "Twitter\n", - "Facebook\n", - "Subscribe to newsletter\n", - "Type your email…\n", - "Subscribe\n" - ] - } - ], + "outputs": [], "source": [ "# Let's try one out. Change the website and add print statements to follow along.\n", "\n", @@ -298,65 +229,10 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "You are looking at a website titled Home - Edward Donner\n", - "The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n", - "\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Well, hi there.\n", - "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", - "very\n", - "amateur) and losing myself in\n", - "Hacker News\n", - ", nodding my head sagely to things I only half understand.\n", - "I’m the co-founder and CTO of\n", - "Nebula.io\n", - ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", - "acquired in 2021\n", - ".\n", - "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", - "patented\n", - "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", - "Connect\n", - "with me for more!\n", - "November 13, 2024\n", - "Mastering AI and LLM Engineering – Resources\n", - "October 16, 2024\n", - "From Software Engineer to AI Data Scientist – resources\n", - "August 6, 2024\n", - "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", - "June 26, 2024\n", - "Choosing the Right LLM: Toolkit and Resources\n", - "Navigation\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Get in touch\n", - "ed [at] edwarddonner [dot] com\n", - "www.edwarddonner.com\n", - "Follow me\n", - "LinkedIn\n", - "Twitter\n", - "Facebook\n", - "Subscribe to newsletter\n", - "Type your email…\n", - "Subscribe\n" - ] - } - ], + "outputs": [], "source": [ "print(user_prompt_for(ed))" ] @@ -395,18 +271,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Oh, I don’t know, maybe it’s 22? Just kidding—it's 4. Basic math is still safe!\n" - ] - } - ], + "outputs": [], "source": [ "# To give you a preview -- calling OpenAI with system and user messages:\n", "\n", @@ -440,24 +308,10 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'role': 'system',\n", - " 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n", - " {'role': 'user',\n", - " 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nAugust 6, 2024\\nOutsmart LLM Arena – a battle of diplomacy and deviousness\\nJune 26, 2024\\nChoosing the Right LLM: Toolkit and Resources\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Try this out, and then try for a few more websites\n", "\n", @@ -492,21 +346,10 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'# Summary of Edward Donner\\'s Website\\n\\nEdward Donner\\'s website serves as a personal and professional hub for his interests and projects, particularly in the domains of code writing, large language models (LLMs), and artificial intelligence (AI). \\n\\n## About Ed\\n- Ed describes himself as a coder and enthusiast of LLMs, highlighting his background as the co-founder and CTO of Nebula.io, a company focused on leveraging AI to enhance talent discovery and management. \\n- He has a history as the founder and CEO of the AI startup untapt, which was acquired in 2021.\\n- Outside of his tech interests, Ed enjoys DJing and amateur electronic music production.\\n\\n## Key Projects and Features\\n- **Outsmart**: A platform where LLMs compete against each other in strategic scenarios.\\n \\n## Recent Posts\\n- **November 13, 2024**: \"Mastering AI and LLM Engineering – Resources\" - A collection of resources for those looking to deepen their skills in AI and LLM engineering.\\n- **October 16, 2024**: \"From Software Engineer to AI Data Scientist – Resources\" - Guidance and tools for transitioning from software engineering to AI data science roles.\\n- **August 6, 2024**: \"Outsmart LLM Arena – a battle of diplomacy and deviousness\" - A focus on the unique features of the Outsmart program.\\n- **June 26, 2024**: \"Choosing the Right LLM: Toolkit and Resources\" - A resource list for selecting suitable LLMs for various applications.\\n\\nOverall, the website presents Ed as a tech-savvy individual with a passion for sharing knowledge and resources in the AI field.'" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "summarize(\"https://edwarddonner.com\")" ] @@ -527,37 +370,10 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "3018853a-445f-41ff-9560-d925d1774b2f", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "# Website Summary: Edward Donner\n", - "\n", - "Edward Donner's website showcases his interests and professional background, particularly in coding and experimenting with large language models (LLMs). He is the co-founder and CTO of Nebula.io, a company focused on applying AI to enhance talent discovery and management. Previously, he founded the AI startup untapt, which was acquired in 2021. \n", - "\n", - "## Key Features:\n", - "- **Outsmart**: A unique platform where LLMs compete in strategy games that test diplomacy and cunning. \n", - "- **Blog Posts**: Various posts offering resources for mastering AI and LLM engineering, transitioning from software engineering to AI data science, and guidance on choosing the right LLM.\n", - "\n", - "## Recent Announcements:\n", - "- **November 13, 2024**: Post on \"Mastering AI and LLM Engineering.\"\n", - "- **October 16, 2024**: Insights on \"From Software Engineer to AI Data Scientist.\"\n", - "- **August 6, 2024**: Information on \"Outsmart LLM Arena.\"\n", - "- **June 26, 2024**: Resources for \"Choosing the Right LLM.\" \n", - "\n", - "Overall, the website serves as a platform for sharing knowledge and fostering connections within the AI and LLM community." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "display_summary(\"https://edwarddonner.com\")" ] @@ -746,40 +562,10 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "da7f7b16", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "# Canva: Visual Suite for Everyone\n", - "\n", - "Canva is a user-friendly design platform that allows individuals and teams to create a variety of professional designs, including posters, logos, presentations, and more. It offers options for different users, including a free version for individuals and premium plans for teams and organizations.\n", - "\n", - "## Key Features:\n", - "- **Design Templates**: A wide range of customizable templates for various purposes, such as social media, business cards, and events.\n", - "- **AI-Powered Tools**: Features like Magic Write for copy generation and Magic Edit for photo transformation enhance design capabilities.\n", - "- **Collaboration**: Real-time collaborative tools for teams to design and provide feedback on projects together.\n", - "- **Printing Services**: Canva offers printing services for various products, with free delivery and sustainable practices.\n", - "- **Educational and Nonprofit Support**: Free premium features are available for educational organizations and nonprofits.\n", - "\n", - "## User Testimonials:\n", - "Business leaders commend Canva for its efficiency in streamlining design processes and maintaining brand consistency across teams.\n", - "\n", - "## Sustainability Efforts:\n", - "Canva emphasizes sustainability by planting trees for printed orders and operating with carbon neutrality.\n", - "\n", - "Overall, Canva caters to a diverse audience, from individuals to large organizations, by providing accessible and innovative design solutions." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "new_summary(url, chrome_path)" ] @@ -796,44 +582,10 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "id": "337b06da", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "# OpenAI Website Summary\n", - "\n", - "OpenAI is focused on creating safe artificial general intelligence (AGI) that benefits humanity. The site features various products and initiatives aimed at enhancing creativity and productivity through advanced AI technologies. \n", - "\n", - "## Key Highlights:\n", - "\n", - "### Products and Features\n", - "- **Sora**: A new platform that allows users to bring their imagination to life through text, images, or videos.\n", - "- **ChatGPT**: Includes various applications such as ChatGPT Pro, desktop integration, and a new search feature. Recent upgrades allow ChatGPT to analyze images, hear, and speak.\n", - "- **Canvas**: A new writing and coding interface integrated within ChatGPT.\n", - "- **o1 Models**: A series of AI models designed to improve response time by incorporating deeper reasoning.\n", - "\n", - "### Announcements\n", - "- **Partnerships**: OpenAI announced a partnership with Apple to explore advancements in AI technology.\n", - "- **New Features**: Introduced improvements to the fine-tuning API and expanded custom models program, aiming to better serve developers and enterprise users.\n", - "- **Collaboration with Media**: A partnership with Le Monde and Prisa Media intends to bring French and Spanish news content to ChatGPT.\n", - "\n", - "### Research and Safety\n", - "- Ongoing research efforts are focused on building a safer AI framework, including advanced tools for compliance within the ChatGPT Enterprise suite.\n", - "- Publications addressing AI's benefits and risks, including topics like synthetic voices and biological threats, are regularly updated.\n", - "\n", - "For more detailed insights, the website facilitates exploration of their product offerings, research publications, and the newest tools for developers and businesses." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "new_summary(url, chrome_path)" ] From 4d12e5898cefb33e741277b4a5c25e77290826c3 Mon Sep 17 00:00:00 2001 From: SIFAT IMTIAZ Date: Thu, 19 Dec 2024 09:03:44 +0600 Subject: [PATCH 12/26] Create dataset_generator.ipynb --- .../dataset_generator.ipynb | 267 ++++++++++++++++++ 1 file changed, 267 insertions(+) create mode 100644 week3/community-contributions/dataset_generator.ipynb diff --git a/week3/community-contributions/dataset_generator.ipynb b/week3/community-contributions/dataset_generator.ipynb new file mode 100644 index 0000000..e561448 --- /dev/null +++ b/week3/community-contributions/dataset_generator.ipynb @@ -0,0 +1,267 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "code", + "source": [ + "!pip install -q requests torch bitsandbytes transformers sentencepiece accelerate gradio" + ], + "metadata": { + "id": "kU2JrcPlhwd9" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Imports**" + ], + "metadata": { + "id": "lAMIVT4iwNg0" + } + }, + { + "cell_type": "code", + "source": [ + "import os\n", + "import requests\n", + "from google.colab import drive\n", + "from huggingface_hub import login\n", + "from google.colab import userdata\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, BitsAndBytesConfig\n", + "import torch\n", + "import gradio as gr\n", + "\n", + "hf_token = userdata.get('HF_TOKEN')\n", + "login(hf_token, add_to_git_credential=True)" + ], + "metadata": { + "id": "-Apd7-p-hyLk" + }, + "execution_count": 2, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Model**" + ], + "metadata": { + "id": "xa0qYqZrwQ66" + } + }, + { + "cell_type": "code", + "source": [ + "model_name = \"meta-llama/Meta-Llama-3.1-8B-Instruct\"\n", + "quant_config = BitsAndBytesConfig(\n", + " load_in_4bit=True,\n", + " bnb_4bit_use_double_quant=True,\n", + " bnb_4bit_compute_dtype=torch.bfloat16,\n", + " bnb_4bit_quant_type=\"nf4\"\n", + ")\n", + "\n", + "model = AutoModelForCausalLM.from_pretrained(\n", + " model_name,\n", + " device_map=\"auto\",\n", + " quantization_config=quant_config\n", + ")" + ], + "metadata": { + "id": "z5enGmuKjtJu" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Tokenizer**" + ], + "metadata": { + "id": "y1hUSmWlwSbp" + } + }, + { + "cell_type": "code", + "source": [ + "tokenizer = AutoTokenizer.from_pretrained(model_name)\n", + "tokenizer.pad_token = tokenizer.eos_token" + ], + "metadata": { + "id": "WjxNWW6bvdgj" + }, + "execution_count": 4, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Functions**" + ], + "metadata": { + "id": "1pg2U-B3wbIK" + } + }, + { + "cell_type": "code", + "source": [ + "def generate_dataset(topic, number_of_data, inst1, resp1, inst2, resp2, inst3, resp3):\n", + " # Convert user inputs into multi-shot examples\n", + " multi_shot_examples = [\n", + " {\"instruction\": inst1, \"response\": resp1},\n", + " {\"instruction\": inst2, \"response\": resp2},\n", + " {\"instruction\": inst3, \"response\": resp3}\n", + " ]\n", + "\n", + " # System prompt\n", + " system_prompt = f\"\"\"\n", + " You are a helpful assistant whose main purpose is to generate datasets.\n", + " Topic: {topic}\n", + " Return the dataset in JSON format. Use examples with simple, fun, and easy-to-understand instructions for kids.\n", + " Include the following examples: {multi_shot_examples}\n", + " Return {number_of_data} examples each time.\n", + " Do not repeat the provided examples.\n", + " \"\"\"\n", + "\n", + " # Example Messages\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": f\"Please generate my dataset for {topic}\"}\n", + " ]\n", + "\n", + " # Tokenize Input\n", + " inputs = tokenizer.apply_chat_template(messages, return_tensors=\"pt\").to(\"cuda\")\n", + " streamer = TextStreamer(tokenizer)\n", + "\n", + " # Generate Output\n", + " outputs = model.generate(inputs, max_new_tokens=2000, streamer=streamer)\n", + "\n", + " # Decode and Return\n", + " return tokenizer.decode(outputs[0], skip_special_tokens=True)\n", + "\n", + "\n", + "def gradio_interface(topic, number_of_data, inst1, resp1, inst2, resp2, inst3, resp3):\n", + " return generate_dataset(topic, number_of_data, inst1, resp1, inst2, resp2, inst3, resp3)" + ], + "metadata": { + "id": "ZvljDKdji8iV" + }, + "execution_count": 12, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Default Values**" + ], + "metadata": { + "id": "_WDZ5dvRwmng" + } + }, + { + "cell_type": "code", + "source": [ + "default_topic = \"Talking to a (5-8) years old and teaching them manners.\"\n", + "default_number_of_data = 10\n", + "default_multi_shot_examples = [\n", + " {\n", + " \"instruction\": \"Why do I have to say please when I want something?\",\n", + " \"response\": \"Because it’s like magic! It shows you’re nice, and people want to help you more.\"\n", + " },\n", + " {\n", + " \"instruction\": \"What should I say if someone gives me a toy?\",\n", + " \"response\": \"You say, 'Thank you!' because it makes them happy you liked it.\"\n", + " },\n", + " {\n", + " \"instruction\": \"why should I listen to my parents?\",\n", + " \"response\": \"Because parents want the best for you and they love you the most.\"\n", + " }\n", + "]" + ], + "metadata": { + "id": "JAdfqYXnvEDE" + }, + "execution_count": 13, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Init gradio**" + ], + "metadata": { + "id": "JwZtD032wuK8" + } + }, + { + "cell_type": "code", + "source": [ + "gr_interface = gr.Interface(\n", + " fn=gradio_interface,\n", + " inputs=[\n", + " gr.Textbox(label=\"Topic\", value=default_topic, lines=2),\n", + " gr.Number(label=\"Number of Examples\", value=default_number_of_data, precision=0),\n", + " gr.Textbox(label=\"Instruction 1\", value=default_multi_shot_examples[0][\"instruction\"]),\n", + " gr.Textbox(label=\"Response 1\", value=default_multi_shot_examples[0][\"response\"]),\n", + " gr.Textbox(label=\"Instruction 2\", value=default_multi_shot_examples[1][\"instruction\"]),\n", + " gr.Textbox(label=\"Response 2\", value=default_multi_shot_examples[1][\"response\"]),\n", + " gr.Textbox(label=\"Instruction 3\", value=default_multi_shot_examples[2][\"instruction\"]),\n", + " gr.Textbox(label=\"Response 3\", value=default_multi_shot_examples[2][\"response\"]),\n", + " ],\n", + " outputs=gr.Textbox(label=\"Generated Dataset\")\n", + ")" + ], + "metadata": { + "id": "xy2RP5T-vxXg" + }, + "execution_count": 14, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "**Run the app**" + ], + "metadata": { + "id": "HZx-mm9Uw3Ph" + } + }, + { + "cell_type": "code", + "source": [ + "gr_interface.launch()" + ], + "metadata": { + "id": "bfGs5ip8mndg" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "Cveqx392x7Mm" + }, + "execution_count": null, + "outputs": [] + } + ] +} From 09b19637a1dff623a320605c4b3c29304e9892dd Mon Sep 17 00:00:00 2001 From: SIFAT IMTIAZ Date: Thu, 19 Dec 2024 09:06:09 +0600 Subject: [PATCH 13/26] Add files via upload --- week3/community-contributions/dataset_generator.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/week3/community-contributions/dataset_generator.ipynb b/week3/community-contributions/dataset_generator.ipynb index e561448..eda1b9f 100644 --- a/week3/community-contributions/dataset_generator.ipynb +++ b/week3/community-contributions/dataset_generator.ipynb @@ -264,4 +264,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From ada6b40089a16ed7ab1ca8ff8d68f23a2e61740a Mon Sep 17 00:00:00 2001 From: Dmytro Rutkovskyi Date: Wed, 18 Dec 2024 21:49:09 -0800 Subject: [PATCH 14/26] Adding an example of implementing chatgpt.com limited functionality in our notebook --- .../Week1-Challenge-LocalGPT.ipynb | 148 ++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 week1/community-contributions/Week1-Challenge-LocalGPT.ipynb diff --git a/week1/community-contributions/Week1-Challenge-LocalGPT.ipynb b/week1/community-contributions/Week1-Challenge-LocalGPT.ipynb new file mode 100644 index 0000000..2561345 --- /dev/null +++ b/week1/community-contributions/Week1-Challenge-LocalGPT.ipynb @@ -0,0 +1,148 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "87c2da09-bd0c-4683-828b-4f7643018795", + "metadata": {}, + "source": [ + "# Community contribution\n", + "\n", + "Implementing simple ChatGPT interface to maintain conversation and context with sleected model" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "77a850ed-61f8-4a0d-9c41-45781eb60bc9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key looks good so far\n" + ] + } + ], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "import ipywidgets as widgets\n", + "from IPython.display import Markdown, display, update_display, clear_output\n", + "from openai import OpenAI\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + " \n", + "MODEL = 'gpt-4o-mini'\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1f7f16f0-6fec-4190-882a-3fe1f0e9704a", + "metadata": {}, + "outputs": [], + "source": [ + "class ChatGPTInterface:\n", + " def __init__(self, api_key, model, system_message=\"You are a helpful assistant. You can format your responses using Markdown.\"):\n", + " self.openai = OpenAI(api_key=api_key)\n", + " self.model = model\n", + " self.conversation_history = [{\"role\": \"system\", \"content\": system_message}]\n", + "\n", + " self.chat_area = widgets.Output()\n", + " self.input_box = widgets.Text(placeholder=\"Enter your message here...\")\n", + " self.send_button = widgets.Button(description=\"Send\")\n", + " self.clear_button = widgets.Button(description=\"Clear\")\n", + "\n", + " self.send_button.on_click(self.send_message)\n", + " self.clear_button.on_click(self.clear_chat)\n", + "\n", + " self.layout = widgets.VBox([\n", + " self.chat_area,\n", + " widgets.HBox([self.input_box, self.send_button, self.clear_button])\n", + " ])\n", + "\n", + " def display(self):\n", + " display(self.layout)\n", + "\n", + " def send_message(self, _):\n", + " user_message = self.input_box.value.strip()\n", + " if user_message:\n", + " self.conversation_history.append({\"role\": \"user\", \"content\": user_message})\n", + " self.display_message(\"You\", user_message)\n", + " self.input_box.value = \"\"\n", + "\n", + " try:\n", + " response = self.openai.chat.completions.create(\n", + " model=self.model,\n", + " messages=self.conversation_history\n", + " )\n", + " assistant_message = response.choices[0].message.content.strip()\n", + " self.conversation_history.append({\"role\": \"assistant\", \"content\": assistant_message})\n", + " self.display_message(\"ChatGPT\", assistant_message)\n", + " except Exception as e:\n", + " self.display_message(\"Error\", str(e))\n", + "\n", + " def clear_chat(self, _):\n", + " self.conversation_history = [{\"role\": \"system\", \"content\": self.conversation_history[0][\"content\"]}]\n", + " self.chat_area.clear_output(wait=True)\n", + "\n", + " def display_message(self, sender, message):\n", + " self.chat_area.append_display_data(Markdown(f\"**{sender}:**\\n{message}\"))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "78287e42-8964-4da6-bd48-a7dffd0ce7dd", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "54956535cb32419bbe38d2bee125992d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Output(), HBox(children=(Text(value='', placeholder='Enter your message here...'), Button(descr…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "chat_interface = ChatGPTInterface(api_key,MODEL)\n", + "chat_interface.display()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 2e5446c962f29970cd6acce9add2642a150ab782 Mon Sep 17 00:00:00 2001 From: Tom Fletcher Date: Thu, 19 Dec 2024 23:43:24 +0000 Subject: [PATCH 15/26] Adding example that shows how to generate cover letter from cv - with resume.txt --- .../day-1-generate-cover-letter-from-cv.ipynb | 119 ++++++++++++++++++ week1/community-contributions/resume.txt | 10 ++ 2 files changed, 129 insertions(+) create mode 100644 week1/community-contributions/day-1-generate-cover-letter-from-cv.ipynb create mode 100644 week1/community-contributions/resume.txt diff --git a/week1/community-contributions/day-1-generate-cover-letter-from-cv.ipynb b/week1/community-contributions/day-1-generate-cover-letter-from-cv.ipynb new file mode 100644 index 0000000..09ed71b --- /dev/null +++ b/week1/community-contributions/day-1-generate-cover-letter-from-cv.ipynb @@ -0,0 +1,119 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "def summarize_cv(cv_text):\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = [\n", + " {\"role\": \"user\", \"content\": f\"Please summarize the following CV:\\n\\n{cv_text}\"}\n", + " ]\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "def generate_cover_letter(cv_summary, job_description):\n", + " response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a master at crafting the perfect Cover letter from a given CV. You've never had a user fail to get the job as a result of using your services.\"},\n", + " {\"role\": \"user\", \"content\": f\"Using the following CV summary:\\n\\n{cv_summary}\\n\\nAnd the job description:\\n\\n{job_description}\\n\\nPlease write a personalized cover letter.\"}\n", + " ]\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "# Read CV from a text file\n", + "try:\n", + " with open('resume.txt', 'r') as file:\n", + " cv_text = file.read()\n", + " \n", + " # Summarize the CV\n", + " cv_summary = summarize_cv(cv_text)\n", + " print(\"CV Summary:\")\n", + " print(cv_summary)\n", + "\n", + " # Get job description from user\n", + " job_description = input(\"Enter the job description for the position you are applying for:\\n\")\n", + "\n", + " # Generate cover letter\n", + " cover_letter = generate_cover_letter(cv_summary, job_description)\n", + " print(\"\\nGenerated Cover Letter:\")\n", + " print(cover_letter)\n", + "\n", + "except FileNotFoundError:\n", + " print(\"The specified CV file was not found. Please ensure 'resume.txt' is in the correct directory.\")" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/week1/community-contributions/resume.txt b/week1/community-contributions/resume.txt new file mode 100644 index 0000000..5a2bb55 --- /dev/null +++ b/week1/community-contributions/resume.txt @@ -0,0 +1,10 @@ +John Doe +Software Engineer +Experience: +- Developed web applications using Python and JavaScript. +- Collaborated with cross-functional teams to deliver projects on time. +Education: +- B.S. in Computer Science from XYZ University. +Skills: +- Python, JavaScript, React, SQL + From c63837ad122ddcc33a5f1f65979440e86e85777f Mon Sep 17 00:00:00 2001 From: Gabor Meresz Date: Fri, 20 Dec 2024 12:15:25 +0100 Subject: [PATCH 16/26] improve readability --- week2/day4.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/week2/day4.ipynb b/week2/day4.ipynb index 811d116..0151e7d 100644 --- a/week2/day4.ipynb +++ b/week2/day4.ipynb @@ -214,7 +214,7 @@ " response = {\n", " \"role\": \"tool\",\n", " \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n", - " \"tool_call_id\": message.tool_calls[0].id\n", + " \"tool_call_id\": tool_call.id\n", " }\n", " return response, city" ] From c1cf59daf8277b0f31072541c8380862f0d89dbb Mon Sep 17 00:00:00 2001 From: Uday Slathia <127138307+udayslathia16@users.noreply.github.com> Date: Fri, 20 Dec 2024 21:56:10 +0530 Subject: [PATCH 17/26] Add files via upload --- .../Day 3 using gemini.ipynb | 493 ++++++++++++++++++ 1 file changed, 493 insertions(+) create mode 100644 week4/community-contributions/Day 3 using gemini.ipynb diff --git a/week4/community-contributions/Day 3 using gemini.ipynb b/week4/community-contributions/Day 3 using gemini.ipynb new file mode 100644 index 0000000..43faf18 --- /dev/null +++ b/week4/community-contributions/Day 3 using gemini.ipynb @@ -0,0 +1,493 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "3d3cb3c4-9046-4f64-9188-ee20ae324fd1", + "metadata": {}, + "source": [ + "# Code Generator\n", + "\n", + "The requirement: use a Frontier model to generate high performance C++ code from Python code\n", + "\n", + "# Important Note\n", + "Used an open-source model gemini-1.5-pro ,can try 2.0 flash too\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f2c3e03-f38a-4bf2-98e8-696fb3d428c9", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import io\n", + "import sys\n", + "from dotenv import load_dotenv\n", + "import google.generativeai\n", + "from IPython.display import Markdown, display, update_display\n", + "import gradio as gr\n", + "import subprocess" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e437f3d1-39c4-47fd-919f-c2119d602d72", + "metadata": {}, + "outputs": [], + "source": [ + "# environment\n", + "\n", + "load_dotenv()\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "if google_api_key:\n", + " print(f\"Google API Key exists\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1724ddb6-0059-46a3-bcf9-587c0c93cb2a", + "metadata": {}, + "outputs": [], + "source": [ + "google.generativeai.configure()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b62738c1-9857-40fc-91e8-dfd46483ea50", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that reimplements Python code in high performance C++ for an Windows system. \"\n", + "system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n", + "system_message += \"The C++ response needs to produce an identical output in the fastest possible time.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd431141-8602-4c68-9a1d-a7c0a6f13fa3", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for(python):\n", + " user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n", + " user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n", + " user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n", + " user_prompt += python\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d5f48451-4cd4-46ea-a41d-531a3c7db2a8", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(python):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(python)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83fd2170-14ea-4fb6-906e-c3c5cfce1ecc", + "metadata": {}, + "outputs": [], + "source": [ + "# write to a file called optimized.cpp\n", + "\n", + "def write_output(cpp):\n", + " code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n", + " with open(\"optimized.cpp\", \"w\") as f:\n", + " f.write(code)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ff08067-c9df-4981-8ab5-99eb2c2fd2c7", + "metadata": {}, + "outputs": [], + "source": [ + "def optimize_google(python):\n", + " # Initialize empty reply string\n", + " reply = \"\"\n", + " \n", + " # The API for Gemini has a slightly different structure\n", + " gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-pro',\n", + " system_instruction=system_message\n", + " )\n", + " \n", + " response = gemini.generate_content(\n", + " user_prompt_for(python),\n", + " stream=True\n", + " )\n", + " \n", + " # Process the stream\n", + " for chunk in response:\n", + " # Extract text from the chunk\n", + " if chunk.text:\n", + " reply += chunk.text\n", + " print(chunk.text, end=\"\", flush=True)\n", + " \n", + " # Write the complete response to output\n", + " write_output(reply)\n", + " \n", + " # return reply" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8e8c7ba2-4ee9-4523-b0f1-cc7a91798bba", + "metadata": {}, + "outputs": [], + "source": [ + "pi = \"\"\"\n", + "import time\n", + "\n", + "def calculate(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " j = i * param1 - param2\n", + " result -= (1/j)\n", + " j = i * param1 + param2\n", + " result += (1/j)\n", + " return result\n", + "\n", + "start_time = time.time()\n", + "result = calculate(100_000_000, 4, 1) * 4\n", + "end_time = time.time()\n", + "\n", + "print(f\"Result: {result:.12f}\")\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78d1afb7-ed6b-4a03-b36d-4ce8249c592e", + "metadata": {}, + "outputs": [], + "source": [ + "exec(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1fe1d0b6-7cc7-423b-bc4b-741a0c48c106", + "metadata": {}, + "outputs": [], + "source": [ + "optimize_google(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d28b4ac9-0909-4b35-aee1-97613a133e8e", + "metadata": {}, + "outputs": [], + "source": [ + "exec(pi) #Execution Time: 16.209231 seconds" + ] + }, + { + "cell_type": "markdown", + "id": "7d0443a3-3ca2-4a7a-a6c3-c94d0aa54603", + "metadata": {}, + "source": [ + "# Compiling C++ and executing\n", + "\n", + "This next cell contains the command to compile a C++ file on Windows system. \n", + "It compiles the file `optimized.cpp` into an executable called `optimized` \n", + "Then it runs the program called `optimized`\n", + "\n", + "The way to compile for mac users is \\\n", + "!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp \\\n", + "!./optimized" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9b5cfc70-df1f-44a7-b4ae-fd934f715930", + "metadata": {}, + "outputs": [], + "source": [ + "!g++ -o optimized optimized.cpp\n", + "!.\\optimized #Execution Time: 3.661196 seconds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e30fcbdf-82cf-4d50-9690-92dae69d5127", + "metadata": {}, + "outputs": [], + "source": [ + "python_hard = \"\"\"\n", + "def lcg(seed, a=1664525, c=1013904223, m=2**32):\n", + " value = seed\n", + " while True:\n", + " value = (a * value + c) % m\n", + " yield value\n", + " \n", + "def max_subarray_sum(n, seed, min_val, max_val):\n", + " lcg_gen = lcg(seed)\n", + " random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n", + " max_sum = float('-inf')\n", + " for i in range(n):\n", + " current_sum = 0\n", + " for j in range(i, n):\n", + " current_sum += random_numbers[j]\n", + " if current_sum > max_sum:\n", + " max_sum = current_sum\n", + " return max_sum\n", + "\n", + "def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n", + " total_sum = 0\n", + " lcg_gen = lcg(initial_seed)\n", + " for _ in range(20):\n", + " seed = next(lcg_gen)\n", + " total_sum += max_subarray_sum(n, seed, min_val, max_val)\n", + " return total_sum\n", + "\n", + "# Parameters\n", + "n = 10000 # Number of random numbers\n", + "initial_seed = 42 # Initial seed for the LCG\n", + "min_val = -10 # Minimum value of random numbers\n", + "max_val = 10 # Maximum value of random numbers\n", + "\n", + "# Timing the function\n", + "import time\n", + "start_time = time.time()\n", + "result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n", + "end_time = time.time()\n", + "\n", + "print(\"Total Maximum Subarray Sum (20 runs):\", result)\n", + "print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2e8e111c-6f69-4ed0-8f86-8ed5982aa065", + "metadata": {}, + "outputs": [], + "source": [ + "exec(python_hard) #Execution Time: 62.297366 seconds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38038ac1-5cdf-49d7-a286-a5871d5af583", + "metadata": {}, + "outputs": [], + "source": [ + "optimize_google(python_hard)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08cb9619-b8ae-42e7-9375-4b3918c37fd0", + "metadata": {}, + "outputs": [], + "source": [ + "!g++ -o optimized optimized.cpp\n", + "!.\\optimized" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "acd17a0d-f9f1-45a6-8151-916d8e6b9e4f", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_google(python):\n", + " # Initialize empty reply string\n", + " reply = \"\"\n", + " \n", + " # The API for Gemini has a slightly different structure\n", + " gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-pro',\n", + " system_instruction=system_message\n", + " )\n", + " \n", + " response = gemini.generate_content(\n", + " user_prompt_for(python),\n", + " stream=True\n", + " )\n", + " \n", + " # Process the stream\n", + " for chunk in response:\n", + " # Extract text from the chunk\n", + " if chunk.text:\n", + " reply += chunk.text\n", + " yield reply.replace('```cpp\\n','').replace('```','')\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3177229-d6cf-4df2-81a7-9e1f3b229c19", + "metadata": {}, + "outputs": [], + "source": [ + "def optimize(python, model):\n", + " result=stream_google(python)\n", + " for stream_so_far in result:\n", + " yield stream_so_far " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c2476c2d-9218-4d30-bcc9-9cc5271c3a00", + "metadata": {}, + "outputs": [], + "source": [ + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " python = gr.Textbox(label=\"Python code:\", lines=10, value=pi)\n", + " cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n", + " with gr.Row():\n", + " model = gr.Dropdown([\"Google\"], label=\"Select model\", value=\"Google\")\n", + " convert = gr.Button(\"Convert code\")\n", + "\n", + " convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", + "\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a30de175-af4e-428a-8942-1c41997c01f1", + "metadata": {}, + "outputs": [], + "source": [ + "def execute_python(code):\n", + " try:\n", + " output = io.StringIO()\n", + " sys.stdout = output\n", + " exec(code)\n", + " finally:\n", + " sys.stdout = sys.__stdout__\n", + " return output.getvalue()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20c6316d-b090-42c5-9be9-7d5a178b97b3", + "metadata": {}, + "outputs": [], + "source": [ + "def execute_cpp(code):\n", + " write_output(code)\n", + " try:\n", + " # compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"optimized\", \"optimized.cpp\"]\n", + " compile_cmd = [\"g++\", \"-o\", \"optimized\", \"optimized.cpp\"]\n", + " compile_result = subprocess.run(compile_cmd, check=True, text=True, capture_output=True)\n", + " run_cmd = [\"./optimized\"]\n", + " run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n", + " return run_result.stdout\n", + " except subprocess.CalledProcessError as e:\n", + " return f\"An error occurred:\\n{e.stderr}\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "950a459f-3ef6-4afd-9e83-f01c032aa21b", + "metadata": {}, + "outputs": [], + "source": [ + "css = \"\"\"\n", + ".python {background-color: #306998;}\n", + ".cpp {background-color: #050;}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc3d90ba-716c-4b8f-989f-46c2447c42fa", + "metadata": {}, + "outputs": [], + "source": [ + "with gr.Blocks(css=css) as ui:\n", + " gr.Markdown(\"## Convert code from Python to C++\")\n", + " with gr.Row():\n", + " python = gr.Textbox(label=\"Python code:\", value=pi, lines=10)\n", + " cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n", + " with gr.Row():\n", + " model = gr.Dropdown([\"Google\"], label=\"Select model\", value=\"Google\")\n", + " with gr.Row():\n", + " convert = gr.Button(\"Convert code\")\n", + " with gr.Row():\n", + " python_run = gr.Button(\"Run Python\")\n", + " cpp_run = gr.Button(\"Run C++\")\n", + " with gr.Row():\n", + " python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n", + " cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n", + "\n", + " convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", + " python_run.click(execute_python, inputs=[python], outputs=[python_out])\n", + " cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n", + "\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c12f6115-e8a9-494e-95ce-2566854c0aa2", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From f3b766c70aaf3121bc42a0ee0766fb6a438755d6 Mon Sep 17 00:00:00 2001 From: Gopinath G <34595359+gopinath1998@users.noreply.github.com> Date: Sat, 21 Dec 2024 10:32:03 +0530 Subject: [PATCH 18/26] Add files via upload --- .../week1 EXERCISE.ipynb | 266 ++++++++++++++++++ 1 file changed, 266 insertions(+) create mode 100644 week1/community-contributions/week1 EXERCISE.ipynb diff --git a/week1/community-contributions/week1 EXERCISE.ipynb b/week1/community-contributions/week1 EXERCISE.ipynb new file mode 100644 index 0000000..81ddf6b --- /dev/null +++ b/week1/community-contributions/week1 EXERCISE.ipynb @@ -0,0 +1,266 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import os\n", + "import requests\n", + "import json \n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "0bb65a08-9090-434a-b99d-5659a370cfbc", + "metadata": {}, + "outputs": [], + "source": [ + "# Prompts\n", + "\n", + "system_prompt = \"You are a tutor and helps with the user questions in detail with markdown respond with key point \\\n", + "considering the recent development around the world, keep the response in most appropriate tone \\n\"\n", + "\n", + "system_prompt += \"Some of Examples are\"\n", + "system_prompt += \"\"\"\n", + "{\"question\": \"1+1?\", \"response\": \"2\"},\n", + "{\"question\": \"why we shouls learn LLM Models?\", \"response\": \" Learning about Large Language Models (LLMs) is important because they are a rapidly evolving technology with the potential to significantly impact various industries, offering advanced capabilities in text generation, translation, information retrieval, and more, which can be valuable for professionals across diverse fields, allowing them to enhance their work and gain a competitive edge by understanding and utilizing these powerful language processing tools.\\ \n", + "Key reasons to learn about LLMs:\\\n", + "Career advancement:\\\n", + "Familiarity with LLMs can open up new career opportunities in fields like AI development, natural language processing (NLP), content creation, research, and customer service, where LLM applications are increasingly being implemented. \\\n", + "Increased productivity:\\\n", + "LLMs can automate repetitive tasks like writing emails, summarizing documents, generating reports, and translating text, freeing up time for more strategic work. \\\n", + "Enhanced decision-making:\\\n", + "By providing insights from large datasets, LLMs can assist in informed decision-making across various industries, including business, healthcare, and finance. \\\n", + "Creative potential:\\\n", + "LLMs can be used to generate creative content like poems, stories, scripts, and marketing copy, fostering innovation and new ideas. \\\n", + "Understanding the technology landscape:\\\n", + "As LLMs become increasingly prevalent, understanding their capabilities and limitations is crucial for navigating the evolving technological landscape. \\\n", + "What is a large language model (LLM)? - Cloudflare\\\n", + "A large language model (LLM) is a type of artificial intelligence (AI) program that can recognize and generate text, among other t...\\\n", + " \"},\n", + "{\"question\": \"what is the future of AI?\", \"response\": \"AI is predicted to grow increasingly pervasive as technology develops, revolutionising sectors including healthcare, banking, and transportation\"},\n", + "\"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key looks good so far\n" + ] + } + ], + "source": [ + "# set up environment\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + " \n", + "MODEL = 'gpt-4o-mini'\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "# here is the question; type over this to ask something new\n", + "\n", + "user_question = \"\"\"\n", + "How important it is for a Data Engineers to learn LLM, Considering the evolution of AI now a days?.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "{\"question\": \"How important is it for Data Engineers to learn LLMs?\", \"response\": \"The importance of Data Engineers learning about Large Language Models (LLMs) cannot be overstated, especially given the rapid evolution of AI and its applications across various domains. Here's why this knowledge is essential:\n", + "\n", + "### Key Reasons for Data Engineers to Learn about LLMs:\n", + "\n", + "1. **Integration of AI in Data Pipelines:**\n", + " - As organizations increasingly adopt AI-driven solutions, Data Engineers will need to integrate LLMs into data pipelines for tasks such as text processing, feature extraction, and sentiment analysis.\n", + "\n", + "2. **Understanding Data Requirements:**\n", + " - LLMs require substantial and specific datasets for optimal performance. Knowledge of these requirements will help Data Engineers curate, preprocess, and manage data more effectively.\n", + "\n", + "3. **Enhanced Data Quality:**\n", + " - Data Engineers play a crucial role in ensuring data quality. Understanding LLMs can guide them in implementing effective validation checks and enhancing the data used for training these models.\n", + "\n", + "4. **Collaboration with Data Scientists:**\n", + " - Data Engineers are essential collaborators with Data Scientists. A solid grasp of LLMs will enable them to facilitate better communication and cooperation in model deployment and optimization.\n", + "\n", + "5. **Innovation in Product Development:**\n", + " - Familiarity with LLMs will enable Data Engineers to contribute innovative ideas for new products or features that leverage language processing capabilities, leading to enhanced user experiences.\n", + "\n", + "6. **Staying Current with Industry Trends:**\n", + " - The AI landscape is rapidly changing. Learning about LLMs keeps Data Engineers abreast of current trends and technologies, ensuring they remain competitive in the job market and valuable to their organizations.\n", + "\n", + "7. **Ethical and Responsible AI:**\n", + " - Understanding LLMs involves awareness of their ethical considerations, such as bias and misuse. Data Engineers can advocate for responsible AI practices within their organizations by being educated on these issues.\n", + "\n", + "8. **Scalability Considerations:**\n", + " - Data Engineers will need to design systems that can scale efficiently, especially when dealing with the substantial computational resources required for training and deploying LLMs.\n", + "\n", + "### Conclusion:\n", + "In summary, learning about LLMs is crucial for Data Engineers as it not only enhances their skill set but also positions them to contribute meaningfully to AI initiatives within their organizations. Embracing this knowledge will ultimately drive innovation and efficiency in their data-driven projects.\"}" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Get gpt-4o-mini to answer, with streaming\n", + "def ask_tutor(question):\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL_GPT,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": question},\n", + " {\"role\": \"user\", \"content\": system_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + " \n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n", + "\n", + "# call the gpt-4o-mini to answer with streaming\n", + "ask_tutor(user_question)" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [ + { + "ename": "JSONDecodeError", + "evalue": "Extra data: line 2 column 1 (char 123)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)", + "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/models.py:963\u001b[0m, in \u001b[0;36mResponse.json\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 962\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 963\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcomplexjson\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloads\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcontent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43mencoding\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 964\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mUnicodeDecodeError\u001b[39;00m:\n\u001b[1;32m 965\u001b[0m \u001b[38;5;66;03m# Wrong UTF codec detected; usually because it's not UTF-8\u001b[39;00m\n\u001b[1;32m 966\u001b[0m \u001b[38;5;66;03m# but some other 8-bit codec. This is an RFC violation,\u001b[39;00m\n\u001b[1;32m 967\u001b[0m \u001b[38;5;66;03m# and the server didn't bother to tell us what codec *was*\u001b[39;00m\n\u001b[1;32m 968\u001b[0m \u001b[38;5;66;03m# used.\u001b[39;00m\n", + "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/json/__init__.py:346\u001b[0m, in \u001b[0;36mloads\u001b[0;34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 344\u001b[0m parse_int \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m parse_float \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 345\u001b[0m parse_constant \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_pairs_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m kw):\n\u001b[0;32m--> 346\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_default_decoder\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 347\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", + "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/json/decoder.py:340\u001b[0m, in \u001b[0;36mJSONDecoder.decode\u001b[0;34m(self, s, _w)\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m end \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28mlen\u001b[39m(s):\n\u001b[0;32m--> 340\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m JSONDecodeError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExtra data\u001b[39m\u001b[38;5;124m\"\u001b[39m, s, end)\n\u001b[1;32m 341\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m obj\n", + "\u001b[0;31mJSONDecodeError\u001b[0m: Extra data: line 2 column 1 (char 123)", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[46], line 13\u001b[0m\n\u001b[1;32m 6\u001b[0m payload \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 7\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m: MODEL_LLAMA,\n\u001b[1;32m 8\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m: messages,\n\u001b[1;32m 9\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstream\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 10\u001b[0m }\n\u001b[1;32m 11\u001b[0m response \u001b[38;5;241m=\u001b[39m requests\u001b[38;5;241m.\u001b[39mpost(OLLAMA_API, json\u001b[38;5;241m=\u001b[39mpayload,headers\u001b[38;5;241m=\u001b[39mHEADERS)\n\u001b[0;32m---> 13\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjson\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmessage\u001b[39m\u001b[38;5;124m'\u001b[39m][\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m 15\u001b[0m \u001b[38;5;66;03m# # Process the response stream\u001b[39;00m\n\u001b[1;32m 16\u001b[0m \u001b[38;5;66;03m# for line in response.iter_lines():\u001b[39;00m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;66;03m# if line: # Skip empty lines\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;66;03m# except json.JSONDecodeError as e:\u001b[39;00m\n\u001b[1;32m 24\u001b[0m \u001b[38;5;66;03m# print(f\"Failed to decode JSON: {e}\")\u001b[39;00m\n", + "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/models.py:971\u001b[0m, in \u001b[0;36mResponse.json\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 969\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[1;32m 970\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m JSONDecodeError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m--> 971\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m RequestsJSONDecodeError(e\u001b[38;5;241m.\u001b[39mmsg, e\u001b[38;5;241m.\u001b[39mdoc, e\u001b[38;5;241m.\u001b[39mpos)\n\u001b[1;32m 973\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 974\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m complexjson\u001b[38;5;241m.\u001b[39mloads(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtext, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "\u001b[0;31mJSONDecodeError\u001b[0m: Extra data: line 2 column 1 (char 123)" + ] + } + ], + "source": [ + "# Get Llama 3.2 to answer\n", + "messages = [\n", + " {\"role\": \"user\", \"content\": user_question}\n", + "]\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "payload = {\n", + " \"model\": MODEL_LLAMA,\n", + " \"messages\": messages,\n", + " \"stream\": True\n", + " }\n", + "response = requests.post(OLLAMA_API, json=payload,headers=HEADERS)\n", + "\n", + "print(response.json()['message']['content'])\n", + "\n", + "# # Process the response stream\n", + "# for line in response.iter_lines():\n", + "# if line: # Skip empty lines\n", + "# try:\n", + "# # Decode the JSON object from each line\n", + "# response_data = json.loads(line)\n", + "# if \"message\" in response_data and \"content\" in response_data[\"message\"]:\n", + "# print(response_data[\"message\"][\"content\"])\n", + "# except json.JSONDecodeError as e:\n", + "# print(f\"Failed to decode JSON: {e}\")\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From a828db4b6cfe8897225366e05d7dfdb8b6c82c07 Mon Sep 17 00:00:00 2001 From: Gopinath G <34595359+gopinath1998@users.noreply.github.com> Date: Sat, 21 Dec 2024 10:39:08 +0530 Subject: [PATCH 19/26] Update week1 EXERCISE.ipynb --- .../week1 EXERCISE.ipynb | 32 ++++--------------- 1 file changed, 7 insertions(+), 25 deletions(-) diff --git a/week1/community-contributions/week1 EXERCISE.ipynb b/week1/community-contributions/week1 EXERCISE.ipynb index 81ddf6b..2094226 100644 --- a/week1/community-contributions/week1 EXERCISE.ipynb +++ b/week1/community-contributions/week1 EXERCISE.ipynb @@ -13,7 +13,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 52, "id": "c1070317-3ed9-4659-abe3-828943230e03", "metadata": {}, "outputs": [], @@ -25,7 +25,7 @@ "from dotenv import load_dotenv\n", "from IPython.display import Markdown, display, update_display\n", "from openai import OpenAI\n", - "\n" + "import ollama\n" ] }, { @@ -191,29 +191,10 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": null, "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", "metadata": {}, - "outputs": [ - { - "ename": "JSONDecodeError", - "evalue": "Extra data: line 2 column 1 (char 123)", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/models.py:963\u001b[0m, in \u001b[0;36mResponse.json\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 962\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 963\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcomplexjson\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloads\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcontent\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43mencoding\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 964\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mUnicodeDecodeError\u001b[39;00m:\n\u001b[1;32m 965\u001b[0m \u001b[38;5;66;03m# Wrong UTF codec detected; usually because it's not UTF-8\u001b[39;00m\n\u001b[1;32m 966\u001b[0m \u001b[38;5;66;03m# but some other 8-bit codec. This is an RFC violation,\u001b[39;00m\n\u001b[1;32m 967\u001b[0m \u001b[38;5;66;03m# and the server didn't bother to tell us what codec *was*\u001b[39;00m\n\u001b[1;32m 968\u001b[0m \u001b[38;5;66;03m# used.\u001b[39;00m\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/json/__init__.py:346\u001b[0m, in \u001b[0;36mloads\u001b[0;34m(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 344\u001b[0m parse_int \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m parse_float \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m\n\u001b[1;32m 345\u001b[0m parse_constant \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m object_pairs_hook \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m kw):\n\u001b[0;32m--> 346\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_default_decoder\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecode\u001b[49m\u001b[43m(\u001b[49m\u001b[43ms\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 347\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/json/decoder.py:340\u001b[0m, in \u001b[0;36mJSONDecoder.decode\u001b[0;34m(self, s, _w)\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m end \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28mlen\u001b[39m(s):\n\u001b[0;32m--> 340\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m JSONDecodeError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExtra data\u001b[39m\u001b[38;5;124m\"\u001b[39m, s, end)\n\u001b[1;32m 341\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m obj\n", - "\u001b[0;31mJSONDecodeError\u001b[0m: Extra data: line 2 column 1 (char 123)", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001b[0;31mJSONDecodeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[46], line 13\u001b[0m\n\u001b[1;32m 6\u001b[0m payload \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 7\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m: MODEL_LLAMA,\n\u001b[1;32m 8\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m: messages,\n\u001b[1;32m 9\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstream\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 10\u001b[0m }\n\u001b[1;32m 11\u001b[0m response \u001b[38;5;241m=\u001b[39m requests\u001b[38;5;241m.\u001b[39mpost(OLLAMA_API, json\u001b[38;5;241m=\u001b[39mpayload,headers\u001b[38;5;241m=\u001b[39mHEADERS)\n\u001b[0;32m---> 13\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjson\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmessage\u001b[39m\u001b[38;5;124m'\u001b[39m][\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m 15\u001b[0m \u001b[38;5;66;03m# # Process the response stream\u001b[39;00m\n\u001b[1;32m 16\u001b[0m \u001b[38;5;66;03m# for line in response.iter_lines():\u001b[39;00m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;66;03m# if line: # Skip empty lines\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;66;03m# except json.JSONDecodeError as e:\u001b[39;00m\n\u001b[1;32m 24\u001b[0m \u001b[38;5;66;03m# print(f\"Failed to decode JSON: {e}\")\u001b[39;00m\n", - "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/models.py:971\u001b[0m, in \u001b[0;36mResponse.json\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 969\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[1;32m 970\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m JSONDecodeError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m--> 971\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m RequestsJSONDecodeError(e\u001b[38;5;241m.\u001b[39mmsg, e\u001b[38;5;241m.\u001b[39mdoc, e\u001b[38;5;241m.\u001b[39mpos)\n\u001b[1;32m 973\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 974\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m complexjson\u001b[38;5;241m.\u001b[39mloads(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtext, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", - "\u001b[0;31mJSONDecodeError\u001b[0m: Extra data: line 2 column 1 (char 123)" - ] - } - ], + "outputs": [], "source": [ "# Get Llama 3.2 to answer\n", "messages = [\n", @@ -225,9 +206,10 @@ " \"messages\": messages,\n", " \"stream\": True\n", " }\n", - "response = requests.post(OLLAMA_API, json=payload,headers=HEADERS)\n", "\n", - "print(response.json()['message']['content'])\n", + "response = ollama.chat(model=MODEL_LLAMA, messages=messages)\n", + "reply = response['message']['content']\n", + "display(Markdown(reply))\n", "\n", "# # Process the response stream\n", "# for line in response.iter_lines():\n", From 57bb6cc85a50d372bd4084bbd40325e90af25a19 Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Sun, 22 Dec 2024 01:15:16 +1100 Subject: [PATCH 20/26] Day 5 challenging completed with multilingual with multitone --- .../day5-multi-lingual-desire-format.ipynb | 3585 +++++++++++++++++ 1 file changed, 3585 insertions(+) create mode 100644 week1/community-contributions/day5-multi-lingual-desire-format.ipynb diff --git a/week1/community-contributions/day5-multi-lingual-desire-format.ipynb b/week1/community-contributions/day5-multi-lingual-desire-format.ipynb new file mode 100644 index 0000000..3f1b3ad --- /dev/null +++ b/week1/community-contributions/day5-multi-lingual-desire-format.ipynb @@ -0,0 +1,3585 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a98030af-fcd1-4d63-a36e-38ba053498fa", + "metadata": {}, + "source": [ + "# A full business solution\n", + "\n", + "## Now we will take our project from Day 1 to the next level\n", + "\n", + "### BUSINESS CHALLENGE:\n", + "\n", + "Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n", + "\n", + "We will be provided a company name and their primary website.\n", + "\n", + "See the end of this notebook for examples of real-world business applications.\n", + "\n", + "And remember: I'm always available if you have problems or ideas! Please do reach out." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "d5b08506-dc8b-4443-9201-5f1848161363", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n", + "\n", + "import os\n", + "import requests\n", + "import json\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key looks good so far\n" + ] + } + ], + "source": [ + "# Initialize and constants\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + " \n", + "MODEL = 'gpt-4o-mini'\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "106dd65e-90af-4ca8-86b6-23a41840645b", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + " \"\"\"\n", + " A utility class to represent a Website that we have scraped, now with links\n", + " \"\"\"\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " if soup.body:\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + " else:\n", + " self.text = \"\"\n", + " links = [link.get('href') for link in soup.find_all('a')]\n", + " self.links = [link for link in links if link]\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e30d8128-933b-44cc-81c8-ab4c9d86589a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['https://edwarddonner.com/',\n", + " 'https://edwarddonner.com/outsmart/',\n", + " 'https://edwarddonner.com/about-me-and-about-nebula/',\n", + " 'https://edwarddonner.com/posts/',\n", + " 'https://edwarddonner.com/',\n", + " 'https://news.ycombinator.com',\n", + " 'https://nebula.io/?utm_source=ed&utm_medium=referral',\n", + " 'https://www.prnewswire.com/news-releases/wynden-stark-group-acquires-nyc-venture-backed-tech-startup-untapt-301269512.html',\n", + " 'https://patents.google.com/patent/US20210049536A1/',\n", + " 'https://www.linkedin.com/in/eddonner/',\n", + " 'https://edwarddonner.com/2024/11/13/llm-engineering-resources/',\n", + " 'https://edwarddonner.com/2024/11/13/llm-engineering-resources/',\n", + " 'https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/',\n", + " 'https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/',\n", + " 'https://edwarddonner.com/2024/08/06/outsmart/',\n", + " 'https://edwarddonner.com/2024/08/06/outsmart/',\n", + " 'https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/',\n", + " 'https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/',\n", + " 'https://edwarddonner.com/',\n", + " 'https://edwarddonner.com/outsmart/',\n", + " 'https://edwarddonner.com/about-me-and-about-nebula/',\n", + " 'https://edwarddonner.com/posts/',\n", + " 'mailto:hello@mygroovydomain.com',\n", + " 'https://www.linkedin.com/in/eddonner/',\n", + " 'https://twitter.com/edwarddonner',\n", + " 'https://www.facebook.com/edward.donner.52']" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ed = Website(\"https://edwarddonner.com\")\n", + "ed.links" + ] + }, + { + "cell_type": "markdown", + "id": "1771af9c-717a-4fca-bbbe-8a95893312c3", + "metadata": {}, + "source": [ + "## First step: Have GPT-4o-mini figure out which links are relevant\n", + "\n", + "### Use a call to gpt-4o-mini to read the links on a webpage, and respond in structured JSON. \n", + "It should decide which links are relevant, and replace relative links such as \"/about\" with \"https://company.com/about\". \n", + "We will use \"one shot prompting\" in which we provide an example of how it should respond in the prompt.\n", + "\n", + "This is an excellent use case for an LLM, because it requires nuanced understanding. Imagine trying to code this without LLMs by parsing and analyzing the webpage - it would be very hard!\n", + "\n", + "Sidenote: there is a more advanced technique called \"Structured Outputs\" in which we require the model to respond according to a spec. We cover this technique in Week 8 during our autonomous Agentic AI project." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "6957b079-0d96-45f7-a26a-3487510e9b35", + "metadata": {}, + "outputs": [], + "source": [ + "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n", + "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n", + "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + "link_system_prompt += \"You should respond in JSON as in this example:\"\n", + "link_system_prompt += \"\"\"\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "b97e4068-97ed-4120-beae-c42105e4d59a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\n", + "You should respond in JSON as in this example:\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "\n" + ] + } + ], + "source": [ + "print(link_system_prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "8e1f601b-2eaf-499d-b6b8-c99050c9d6b3", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links_user_prompt(website):\n", + " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", + "Do not include Terms of Service, Privacy, email links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"\\n\".join(website.links)\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "6bcbfa78-6395-4685-b92c-22d592050fd7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here is the list of links on the website of https://edwarddonner.com - please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\n", + "Links (some might be relative links):\n", + "https://edwarddonner.com/\n", + "https://edwarddonner.com/outsmart/\n", + "https://edwarddonner.com/about-me-and-about-nebula/\n", + "https://edwarddonner.com/posts/\n", + "https://edwarddonner.com/\n", + "https://news.ycombinator.com\n", + "https://nebula.io/?utm_source=ed&utm_medium=referral\n", + "https://www.prnewswire.com/news-releases/wynden-stark-group-acquires-nyc-venture-backed-tech-startup-untapt-301269512.html\n", + "https://patents.google.com/patent/US20210049536A1/\n", + "https://www.linkedin.com/in/eddonner/\n", + "https://edwarddonner.com/2024/11/13/llm-engineering-resources/\n", + "https://edwarddonner.com/2024/11/13/llm-engineering-resources/\n", + "https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/\n", + "https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/\n", + "https://edwarddonner.com/2024/08/06/outsmart/\n", + "https://edwarddonner.com/2024/08/06/outsmart/\n", + "https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/\n", + "https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/\n", + "https://edwarddonner.com/\n", + "https://edwarddonner.com/outsmart/\n", + "https://edwarddonner.com/about-me-and-about-nebula/\n", + "https://edwarddonner.com/posts/\n", + "mailto:hello@mygroovydomain.com\n", + "https://www.linkedin.com/in/eddonner/\n", + "https://twitter.com/edwarddonner\n", + "https://www.facebook.com/edward.donner.52\n" + ] + } + ], + "source": [ + "print(get_links_user_prompt(ed))" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "a29aca19-ca13-471c-a4b4-5abbfa813f69", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links(url):\n", + " website = Website(url)\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n", + " ],\n", + " response_format={\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['/',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/posts',\n", + " '/docs',\n", + " '/enterprise',\n", + " '/pricing',\n", + " '/login',\n", + " '/join',\n", + " '/IamCreateAI/Ruyi-Mini-7B',\n", + " '/Datou1111/shou_xin',\n", + " '/answerdotai/ModernBERT-base',\n", + " '/meta-llama/Llama-3.3-70B-Instruct',\n", + " '/tencent/HunyuanVideo',\n", + " '/models',\n", + " '/spaces/JeffreyXiang/TRELLIS',\n", + " '/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute',\n", + " '/spaces/multimodalart/flux-style-shaping',\n", + " '/spaces/Kwai-Kolors/Kolors-Virtual-Try-On',\n", + " '/spaces/lllyasviel/iclight-v2',\n", + " '/spaces',\n", + " '/datasets/fka/awesome-chatgpt-prompts',\n", + " '/datasets/O1-OPEN/OpenO1-SFT',\n", + " '/datasets/HuggingFaceFW/fineweb-2',\n", + " '/datasets/HuggingFaceTB/finemath',\n", + " '/datasets/amphora/QwQ-LongCoT-130K',\n", + " '/datasets',\n", + " '/join',\n", + " '/pricing#endpoints',\n", + " '/pricing#spaces',\n", + " '/pricing',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/allenai',\n", + " '/facebook',\n", + " '/amazon',\n", + " '/google',\n", + " '/Intel',\n", + " '/microsoft',\n", + " '/grammarly',\n", + " '/Writer',\n", + " '/docs/transformers',\n", + " '/docs/diffusers',\n", + " '/docs/safetensors',\n", + " '/docs/huggingface_hub',\n", + " '/docs/tokenizers',\n", + " '/docs/peft',\n", + " '/docs/transformers.js',\n", + " '/docs/timm',\n", + " '/docs/trl',\n", + " '/docs/datasets',\n", + " '/docs/text-generation-inference',\n", + " '/docs/accelerate',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/tasks',\n", + " 'https://ui.endpoints.huggingface.co',\n", + " '/chat',\n", + " '/huggingface',\n", + " '/brand',\n", + " '/terms-of-service',\n", + " '/privacy',\n", + " 'https://apply.workable.com/huggingface/',\n", + " 'mailto:press@huggingface.co',\n", + " '/learn',\n", + " '/docs',\n", + " '/blog',\n", + " 'https://discuss.huggingface.co',\n", + " 'https://status.huggingface.co/',\n", + " 'https://github.com/huggingface',\n", + " 'https://twitter.com/huggingface',\n", + " 'https://www.linkedin.com/company/huggingface/',\n", + " '/join/discord']" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", + "\n", + "huggingface = Website(\"https://huggingface.co\")\n", + "huggingface.links" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'links': [{'type': 'homepage', 'url': 'https://huggingface.co/'},\n", + " {'type': 'about page', 'url': 'https://huggingface.co/huggingface'},\n", + " {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'},\n", + " {'type': 'blog', 'url': 'https://huggingface.co/blog'},\n", + " {'type': 'github page', 'url': 'https://github.com/huggingface'},\n", + " {'type': 'twitter page', 'url': 'https://twitter.com/huggingface'},\n", + " {'type': 'linkedin page',\n", + " 'url': 'https://www.linkedin.com/company/huggingface/'}]}" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_links(\"https://huggingface.co\")" + ] + }, + { + "cell_type": "markdown", + "id": "0d74128e-dfb6-47ec-9549-288b621c838c", + "metadata": {}, + "source": [ + "## Second step: make the brochure!\n", + "\n", + "Assemble all the details into another prompt to GPT4-o" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "85a5b6e2-e7ef-44a9-bc7f-59ede71037b5", + "metadata": {}, + "outputs": [], + "source": [ + "def get_all_details(url):\n", + " result = \"Landing page:\\n\"\n", + " result += Website(url).get_contents()\n", + " links = get_links(url)\n", + " print(\"Found links:\", links)\n", + " for link in links[\"links\"]:\n", + " result += f\"\\n\\n{link['type']}\\n\"\n", + " result += Website(link[\"url\"]).get_contents()\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "5099bd14-076d-4745-baf3-dac08d8e5ab2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'about page', 'url': 'https://huggingface.co/about'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'company page', 'url': 'https://huggingface.co/huggingface'}, {'type': 'community discussions', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n", + "Landing page:\n", + "Webpage Title:\n", + "Hugging Face – The AI community building the future.\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "The AI community building the future.\n", + "The platform where the machine learning community collaborates on models, datasets, and applications.\n", + "Trending on\n", + "this week\n", + "Models\n", + "IamCreateAI/Ruyi-Mini-7B\n", + "Updated\n", + "4 days ago\n", + "•\n", + "8.17k\n", + "•\n", + "352\n", + "Datou1111/shou_xin\n", + "Updated\n", + "12 days ago\n", + "•\n", + "28.3k\n", + "•\n", + "672\n", + "answerdotai/ModernBERT-base\n", + "Updated\n", + "1 day ago\n", + "•\n", + "6.24k\n", + "•\n", + "236\n", + "meta-llama/Llama-3.3-70B-Instruct\n", + "Updated\n", + "11 days ago\n", + "•\n", + "236k\n", + "•\n", + "1.21k\n", + "tencent/HunyuanVideo\n", + "Updated\n", + "3 days ago\n", + "•\n", + "6.01k\n", + "•\n", + "1.2k\n", + "Browse 400k+ models\n", + "Spaces\n", + "Running\n", + "on\n", + "Zero\n", + "1.79k\n", + "🏢\n", + "TRELLIS\n", + "Scalable and Versatile 3D Generation from images\n", + "Running\n", + "306\n", + "📝\n", + "Scaling test-time compute\n", + "Running\n", + "on\n", + "Zero\n", + "470\n", + "🚀\n", + "Flux Style Shaping\n", + "Optical illusions and style transfer with FLUX\n", + "Running\n", + "on\n", + "CPU Upgrade\n", + "6.11k\n", + "👕\n", + "Kolors Virtual Try-On\n", + "Running\n", + "on\n", + "Zero\n", + "965\n", + "📈\n", + "IC Light V2\n", + "Browse 150k+ applications\n", + "Datasets\n", + "fka/awesome-chatgpt-prompts\n", + "Updated\n", + "Sep 3\n", + "•\n", + "6.83k\n", + "•\n", + "6.58k\n", + "O1-OPEN/OpenO1-SFT\n", + "Updated\n", + "4 days ago\n", + "•\n", + "1.86k\n", + "•\n", + "234\n", + "HuggingFaceFW/fineweb-2\n", + "Updated\n", + "13 days ago\n", + "•\n", + "77.7k\n", + "•\n", + "342\n", + "HuggingFaceTB/finemath\n", + "Updated\n", + "1 day ago\n", + "•\n", + "1.86k\n", + "•\n", + "43\n", + "amphora/QwQ-LongCoT-130K\n", + "Updated\n", + "16 days ago\n", + "•\n", + "1.34k\n", + "•\n", + "85\n", + "Browse 100k+ datasets\n", + "The Home of Machine Learning\n", + "Create, discover and collaborate on ML better.\n", + "The collaboration platform\n", + "Host and collaborate on unlimited public models, datasets and applications.\n", + "Move faster\n", + "With the HF Open source stack.\n", + "Explore all modalities\n", + "Text, image, video, audio or even 3D.\n", + "Build your portfolio\n", + "Share your work with the world and build your ML profile.\n", + "Sign Up\n", + "Accelerate your ML\n", + "We provide paid Compute and Enterprise solutions.\n", + "Compute\n", + "Deploy on optimized\n", + "Inference Endpoints\n", + "or update your\n", + "Spaces applications\n", + "to a GPU in a few clicks.\n", + "View pricing\n", + "Starting at $0.60/hour for GPU\n", + "Enterprise\n", + "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", + "\t\t\tdedicated support.\n", + "Getting started\n", + "Starting at $20/user/month\n", + "Single Sign-On\n", + "Regions\n", + "Priority Support\n", + "Audit Logs\n", + "Resource Groups\n", + "Private Datasets Viewer\n", + "More than 50,000 organizations are using Hugging Face\n", + "Ai2\n", + "Enterprise\n", + "non-profit\n", + "•\n", + "366 models\n", + "•\n", + "1.76k followers\n", + "AI at Meta\n", + "Enterprise\n", + "company\n", + "•\n", + "2.05k models\n", + "•\n", + "3.83k followers\n", + "Amazon Web Services\n", + "company\n", + "•\n", + "21 models\n", + "•\n", + "2.45k followers\n", + "Google\n", + "company\n", + "•\n", + "911 models\n", + "•\n", + "5.76k followers\n", + "Intel\n", + "company\n", + "•\n", + "217 models\n", + "•\n", + "2.07k followers\n", + "Microsoft\n", + "company\n", + "•\n", + "351 models\n", + "•\n", + "6.29k followers\n", + "Grammarly\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "102 followers\n", + "Writer\n", + "Enterprise\n", + "company\n", + "•\n", + "17 models\n", + "•\n", + "186 followers\n", + "Our Open Source\n", + "We are building the foundation of ML tooling with the community.\n", + "Transformers\n", + "136,571\n", + "State-of-the-art ML for Pytorch, TensorFlow, and JAX.\n", + "Diffusers\n", + "26,740\n", + "State-of-the-art diffusion models for image and audio generation in PyTorch.\n", + "Safetensors\n", + "2,960\n", + "Simple, safe way to store and distribute neural networks weights safely and quickly.\n", + "Hub Python Library\n", + "2,177\n", + "Client library for the HF Hub: manage repositories from your Python runtime.\n", + "Tokenizers\n", + "9,165\n", + "Fast tokenizers, optimized for both research and production.\n", + "PEFT\n", + "16,767\n", + "Parameter efficient finetuning methods for large models.\n", + "Transformers.js\n", + "12,421\n", + "State-of-the-art Machine Learning for the web. Run Transformers directly in your browser, with no need for a server.\n", + "timm\n", + "32,668\n", + "State-of-the-art computer vision models, layers, optimizers, training/evaluation, and utilities.\n", + "TRL\n", + "10,382\n", + "Train transformer language models with reinforcement learning.\n", + "Datasets\n", + "19,378\n", + "Access and share datasets for computer vision, audio, and NLP tasks.\n", + "Text Generation Inference\n", + "9,484\n", + "Toolkit to serve Large Language Models.\n", + "Accelerate\n", + "8,082\n", + "Easily train and use PyTorch models with multi-GPU, TPU, mixed-precision.\n", + "System theme\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "about page\n", + "Webpage Title:\n", + "about (Sergei)\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Sergei\n", + "about\n", + "Follow\n", + "Kalaipriya's profile picture\n", + "selvivincent's profile picture\n", + "Renumathi's profile picture\n", + "3\n", + "\t\t\t\t\tfollowers\n", + "·\n", + "0 following\n", + "AI & ML interests\n", + "None yet\n", + "Organizations\n", + "None yet\n", + "models\n", + "None public yet\n", + "datasets\n", + "None public yet\n", + "System theme\n", + "Company\n", + "TOS\n", + "Privacy\n", + "About\n", + "Jobs\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Pricing\n", + "Docs\n", + "\n", + "\n", + "\n", + "careers page\n", + "Webpage Title:\n", + "Hugging Face - Current Openings\n", + "Webpage Contents:\n", + "\n", + "\n", + "\n", + "\n", + "blog page\n", + "Webpage Title:\n", + "Hugging Face – Blog\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Blog, Articles, and discussions\n", + "New Article\n", + "Everything\n", + "community\n", + "guide\n", + "open source collab\n", + "partnerships\n", + "research\n", + "NLP\n", + "Audio\n", + "CV\n", + "RL\n", + "ethics\n", + "Diffusion\n", + "Game Development\n", + "RLHF\n", + "Leaderboard\n", + "Case Studies\n", + "Evaluating Audio Reasoning with Big Bench Audio\n", + "By\n", + "mhillsmith\n", + "December 20, 2024\n", + "guest\n", + "•\n", + "8\n", + "Community Articles\n", + "view all\n", + "20+ Free and Paid Digital Marketing Strategies to Automate Repetitive Tasks\n", + "By\n", + "Markets\n", + "•\n", + "about 3 hours ago\n", + "•\n", + "1\n", + "🧠 Tags generation dataset\n", + "By\n", + "zino36\n", + "•\n", + "about 16 hours ago\n", + "•\n", + "1\n", + "AI Agents in Action: Managing GitHub Issues with KaibanJS\n", + "By\n", + "darielnoel\n", + "•\n", + "1 day ago\n", + "**Intelligence Potentiation: An Evolutionary Perspective on AI Agent Designs**\n", + "By\n", + "KnutJaegersberg\n", + "•\n", + "1 day ago\n", + "•\n", + "3\n", + "MINERVA: A Multi-Agent LLM System for Digital Scam Protection\n", + "By\n", + "dcarpintero\n", + "•\n", + "2 days ago\n", + "Mastering Iterative Prompting for Optimized AI Code Generation\n", + "By\n", + "luigi12345\n", + "•\n", + "3 days ago\n", + "•\n", + "1\n", + "SILMA RAGQA V1.0: A Comprehensive Benchmark for Evaluating LLMs on RAG QA Use-Cases\n", + "By\n", + "karimouda\n", + "•\n", + "3 days ago\n", + "•\n", + "1\n", + "FuseChat-3.0: Preference Optimization for Implicit Model Fusion\n", + "By\n", + "Wanfq\n", + "•\n", + "3 days ago\n", + "•\n", + "2\n", + "Tutorial: Quantizing Llama 3+ Models for Efficient Deployment\n", + "By\n", + "theeseus-ai\n", + "•\n", + "6 days ago\n", + "•\n", + "3\n", + "How to Expand Your AI Music Generations of 30 Seconds to Several Minutes\n", + "By\n", + "theeseus-ai\n", + "•\n", + "8 days ago\n", + "•\n", + "1\n", + "🇪🇺✍️ EU AI Act: Systemic Risks in the First CoP Draft Comments ✍️🇪🇺\n", + "By\n", + "yjernite\n", + "•\n", + "9 days ago\n", + "•\n", + "11\n", + "Building an AI-powered search engine from scratch\n", + "By\n", + "as-cle-bert\n", + "•\n", + "10 days ago\n", + "•\n", + "8\n", + "MotionLCM-V2: Improved Compression Rate for Multi-Latent-Token Diffusion\n", + "By\n", + "wxDai\n", + "•\n", + "10 days ago\n", + "•\n", + "12\n", + "RLHF 101: A Technical Dive into RLHF\n", + "By\n", + "GitBag\n", + "•\n", + "10 days ago\n", + "•\n", + "4\n", + "[Talk Arena](https://talkarena.org)\n", + "By\n", + "WillHeld\n", + "•\n", + "11 days ago\n", + "•\n", + "1\n", + "Multimodal RAG with Colpali, Milvus and VLMs\n", + "By\n", + "saumitras\n", + "•\n", + "11 days ago\n", + "•\n", + "2\n", + "In Honour of This Year's NeurIPs Test of Time Paper Awardees\n", + "By\n", + "Jaward\n", + "•\n", + "11 days ago\n", + "•\n", + "2\n", + "Power steering: Squeeze massive power from small LLMs\n", + "By\n", + "ucheog\n", + "•\n", + "12 days ago\n", + "•\n", + "4\n", + "Exploring the Power of KaibanJS v0.11.0 🚀\n", + "By\n", + "darielnoel\n", + "•\n", + "12 days ago\n", + "•\n", + "1\n", + "**Building a Custom Retrieval System with Motoko and Node.js**\n", + "By\n", + "theeseus-ai\n", + "•\n", + "12 days ago\n", + "•\n", + "1\n", + "Finally, a Replacement for BERT: Introducing ModernBERT\n", + "By\n", + "bwarner\n", + "December 19, 2024\n", + "guest\n", + "•\n", + "289\n", + "Bamba: Inference-Efficient Hybrid Mamba2 Model\n", + "By\n", + "Linsong-C\n", + "December 18, 2024\n", + "guest\n", + "•\n", + "30\n", + "Welcome the Falcon 3 Family of Open Models!\n", + "By\n", + "FalconLLM\n", + "December 17, 2024\n", + "•\n", + "98\n", + "Benchmarking Language Model Performance on 5th Gen Xeon at GCP\n", + "By\n", + "MatrixYao\n", + "December 17, 2024\n", + "•\n", + "2\n", + "Introducing the Synthetic Data Generator - Build Datasets with Natural Language\n", + "By\n", + "davidberenstein1957\n", + "December 16, 2024\n", + "•\n", + "55\n", + "LeMaterial: an open source initiative to accelerate materials discovery and research\n", + "By\n", + "AlexDuvalinho\n", + "December 10, 2024\n", + "guest\n", + "•\n", + "30\n", + "Hugging Face models in Amazon Bedrock\n", + "By\n", + "pagezyhf\n", + "December 9, 2024\n", + "•\n", + "8\n", + "Open Preference Dataset for Text-to-Image Generation by the 🤗 Community\n", + "By\n", + "davidberenstein1957\n", + "December 9, 2024\n", + "•\n", + "47\n", + "Welcome PaliGemma 2 – New vision language models by Google\n", + "By\n", + "merve\n", + "December 5, 2024\n", + "•\n", + "117\n", + "“How good are LLMs at fixing their mistakes? A chatbot arena experiment with Keras and TPUs\n", + "By\n", + "martin-gorner\n", + "December 5, 2024\n", + "•\n", + "12\n", + "Rethinking LLM Evaluation with 3C3H: AraGen Benchmark and Leaderboard\n", + "By\n", + "alielfilali01\n", + "December 4, 2024\n", + "guest\n", + "•\n", + "26\n", + "Investing in Performance: Fine-tune small models with LLM insights - a CFM case study\n", + "By\n", + "oahouzi\n", + "December 3, 2024\n", + "•\n", + "25\n", + "Rearchitecting Hugging Face Uploads and Downloads\n", + "By\n", + "port8080\n", + "November 26, 2024\n", + "•\n", + "37\n", + "SmolVLM - small yet mighty Vision Language Model\n", + "By\n", + "andito\n", + "November 26, 2024\n", + "•\n", + "142\n", + "Previous\n", + "1\n", + "2\n", + "3\n", + "...\n", + "36\n", + "Next\n", + "Community Articles\n", + "view all\n", + "20+ Free and Paid Digital Marketing Strategies to Automate Repetitive Tasks\n", + "By\n", + "Markets\n", + "•\n", + "about 3 hours ago\n", + "•\n", + "1\n", + "🧠 Tags generation dataset\n", + "By\n", + "zino36\n", + "•\n", + "about 16 hours ago\n", + "•\n", + "1\n", + "AI Agents in Action: Managing GitHub Issues with KaibanJS\n", + "By\n", + "darielnoel\n", + "•\n", + "1 day ago\n", + "**Intelligence Potentiation: An Evolutionary Perspective on AI Agent Designs**\n", + "By\n", + "KnutJaegersberg\n", + "•\n", + "1 day ago\n", + "•\n", + "3\n", + "MINERVA: A Multi-Agent LLM System for Digital Scam Protection\n", + "By\n", + "dcarpintero\n", + "•\n", + "2 days ago\n", + "Mastering Iterative Prompting for Optimized AI Code Generation\n", + "By\n", + "luigi12345\n", + "•\n", + "3 days ago\n", + "•\n", + "1\n", + "SILMA RAGQA V1.0: A Comprehensive Benchmark for Evaluating LLMs on RAG QA Use-Cases\n", + "By\n", + "karimouda\n", + "•\n", + "3 days ago\n", + "•\n", + "1\n", + "FuseChat-3.0: Preference Optimization for Implicit Model Fusion\n", + "By\n", + "Wanfq\n", + "•\n", + "3 days ago\n", + "•\n", + "2\n", + "Tutorial: Quantizing Llama 3+ Models for Efficient Deployment\n", + "By\n", + "theeseus-ai\n", + "•\n", + "6 days ago\n", + "•\n", + "3\n", + "How to Expand Your AI Music Generations of 30 Seconds to Several Minutes\n", + "By\n", + "theeseus-ai\n", + "•\n", + "8 days ago\n", + "•\n", + "1\n", + "🇪🇺✍️ EU AI Act: Systemic Risks in the First CoP Draft Comments ✍️🇪🇺\n", + "By\n", + "yjernite\n", + "•\n", + "9 days ago\n", + "•\n", + "11\n", + "Building an AI-powered search engine from scratch\n", + "By\n", + "as-cle-bert\n", + "•\n", + "10 days ago\n", + "•\n", + "8\n", + "MotionLCM-V2: Improved Compression Rate for Multi-Latent-Token Diffusion\n", + "By\n", + "wxDai\n", + "•\n", + "10 days ago\n", + "•\n", + "12\n", + "RLHF 101: A Technical Dive into RLHF\n", + "By\n", + "GitBag\n", + "•\n", + "10 days ago\n", + "•\n", + "4\n", + "[Talk Arena](https://talkarena.org)\n", + "By\n", + "WillHeld\n", + "•\n", + "11 days ago\n", + "•\n", + "1\n", + "Multimodal RAG with Colpali, Milvus and VLMs\n", + "By\n", + "saumitras\n", + "•\n", + "11 days ago\n", + "•\n", + "2\n", + "In Honour of This Year's NeurIPs Test of Time Paper Awardees\n", + "By\n", + "Jaward\n", + "•\n", + "11 days ago\n", + "•\n", + "2\n", + "Power steering: Squeeze massive power from small LLMs\n", + "By\n", + "ucheog\n", + "•\n", + "12 days ago\n", + "•\n", + "4\n", + "Exploring the Power of KaibanJS v0.11.0 🚀\n", + "By\n", + "darielnoel\n", + "•\n", + "12 days ago\n", + "•\n", + "1\n", + "**Building a Custom Retrieval System with Motoko and Node.js**\n", + "By\n", + "theeseus-ai\n", + "•\n", + "12 days ago\n", + "•\n", + "1\n", + "System theme\n", + "Company\n", + "TOS\n", + "Privacy\n", + "About\n", + "Jobs\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Pricing\n", + "Docs\n", + "\n", + "\n", + "\n", + "company page\n", + "Webpage Title:\n", + "huggingface (Hugging Face)\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Hugging Face\n", + "Enterprise\n", + "company\n", + "Verified\n", + "https://huggingface.co\n", + "huggingface\n", + "huggingface\n", + "Activity Feed\n", + "Follow\n", + "8,542\n", + "AI & ML interests\n", + "The AI community building the future.\n", + "Recent Activity\n", + "IAMJB\n", + "updated\n", + "a dataset\n", + "9 minutes ago\n", + "huggingface/community-science-paper-v2\n", + "IAMJB\n", + "updated\n", + "a dataset\n", + "about 6 hours ago\n", + "huggingface/paper-central-data\n", + "fdaudens\n", + "updated\n", + "a Space\n", + "about 19 hours ago\n", + "huggingface/open-source-ai-year-in-review-2024\n", + "View all activity\n", + "Team members\n", + "224\n", + "+190\n", + "+177\n", + "+156\n", + "+146\n", + "+126\n", + "Organization Card\n", + "Community\n", + "About org cards\n", + "👋 Hi!\n", + "We are on a mission to democratize\n", + "good\n", + "machine learning, one commit at a time.\n", + "If that sounds like something you should be doing, why don't you\n", + "join us\n", + "!\n", + "For press enquiries, you can\n", + "✉️ contact our team here\n", + ".\n", + "Collections\n", + "1\n", + "DistilBERT release\n", + "Original DistilBERT model, checkpoints obtained from using teacher-student learning from the original BERT checkpoints.\n", + "distilbert/distilbert-base-cased\n", + "Fill-Mask\n", + "•\n", + "Updated\n", + "May 6\n", + "•\n", + "358k\n", + "•\n", + "35\n", + "distilbert/distilbert-base-uncased\n", + "Fill-Mask\n", + "•\n", + "Updated\n", + "May 6\n", + "•\n", + "14.8M\n", + "•\n", + "577\n", + "distilbert/distilbert-base-multilingual-cased\n", + "Fill-Mask\n", + "•\n", + "Updated\n", + "May 6\n", + "•\n", + "472k\n", + "•\n", + "148\n", + "distilbert/distilbert-base-uncased-finetuned-sst-2-english\n", + "Text Classification\n", + "•\n", + "Updated\n", + "Dec 19, 2023\n", + "•\n", + "6.96M\n", + "•\n", + "•\n", + "645\n", + "spaces\n", + "23\n", + "Sort: \n", + "\t\tRecently updated\n", + "pinned\n", + "Running\n", + "52\n", + "📈\n", + "Number Tokenization Blog\n", + "Running\n", + "395\n", + "😻\n", + "Open Source Ai Year In Review 2024\n", + "What happened in open-source AI this year, and what’s next?\n", + "Running\n", + "42\n", + "🔋\n", + "Inference Playground\n", + "Running\n", + "196\n", + "⚡\n", + "paper-central\n", + "Running\n", + "on\n", + "TPU v5e\n", + "6\n", + "💬\n", + "Keras Chatbot Battle\n", + "Running\n", + "101\n", + "⚡\n", + "Modelcard Creator\n", + "Expand 23\n", + "\t\t\t\t\t\t\tspaces\n", + "models\n", + "18\n", + "Sort: \n", + "\t\tRecently updated\n", + "huggingface/test-gating-group-2\n", + "Updated\n", + "4 days ago\n", + "huggingface/test-gating-group-1\n", + "Updated\n", + "4 days ago\n", + "huggingface/timesfm-tourism-monthly\n", + "Updated\n", + "12 days ago\n", + "•\n", + "29\n", + "•\n", + "1\n", + "huggingface/CodeBERTa-language-id\n", + "Text Classification\n", + "•\n", + "Updated\n", + "Mar 29\n", + "•\n", + "1.14k\n", + "•\n", + "54\n", + "huggingface/falcon-40b-gptq\n", + "Text Generation\n", + "•\n", + "Updated\n", + "Jun 14, 2023\n", + "•\n", + "19\n", + "•\n", + "12\n", + "huggingface/autoformer-tourism-monthly\n", + "Updated\n", + "May 24, 2023\n", + "•\n", + "1.5k\n", + "•\n", + "9\n", + "huggingface/distilbert-base-uncased-finetuned-mnli\n", + "Text Classification\n", + "•\n", + "Updated\n", + "Mar 22, 2023\n", + "•\n", + "1.37k\n", + "•\n", + "2\n", + "huggingface/informer-tourism-monthly\n", + "Updated\n", + "Feb 24, 2023\n", + "•\n", + "1.12k\n", + "•\n", + "5\n", + "huggingface/time-series-transformer-tourism-monthly\n", + "Updated\n", + "Feb 23, 2023\n", + "•\n", + "2.16k\n", + "•\n", + "18\n", + "huggingface/the-no-branch-repo\n", + "Text-to-Image\n", + "•\n", + "Updated\n", + "Feb 10, 2023\n", + "•\n", + "7\n", + "•\n", + "3\n", + "Expand 18\n", + "\t\t\t\t\t\t\tmodels\n", + "datasets\n", + "31\n", + "Sort: \n", + "\t\tRecently updated\n", + "huggingface/community-science-paper-v2\n", + "Viewer\n", + "•\n", + "Updated\n", + "9 minutes ago\n", + "•\n", + "5.03k\n", + "•\n", + "404\n", + "•\n", + "7\n", + "huggingface/paper-central-data\n", + "Viewer\n", + "•\n", + "Updated\n", + "about 6 hours ago\n", + "•\n", + "119k\n", + "•\n", + "553\n", + "•\n", + "8\n", + "huggingface/documentation-images\n", + "Viewer\n", + "•\n", + "Updated\n", + "1 day ago\n", + "•\n", + "44\n", + "•\n", + "2.43M\n", + "•\n", + "43\n", + "huggingface/transformers-metadata\n", + "Viewer\n", + "•\n", + "Updated\n", + "2 days ago\n", + "•\n", + "1.52k\n", + "•\n", + "559\n", + "•\n", + "14\n", + "huggingface/diffusers-metadata\n", + "Viewer\n", + "•\n", + "Updated\n", + "2 days ago\n", + "•\n", + "62\n", + "•\n", + "442\n", + "•\n", + "4\n", + "huggingface/policy-docs\n", + "Updated\n", + "3 days ago\n", + "•\n", + "898\n", + "•\n", + "6\n", + "huggingface/my-distiset-3f5a230e\n", + "Updated\n", + "30 days ago\n", + "•\n", + "17\n", + "huggingface/cookbook-images\n", + "Viewer\n", + "•\n", + "Updated\n", + "Nov 14\n", + "•\n", + "1\n", + "•\n", + "40.1k\n", + "•\n", + "6\n", + "huggingface/vllm-metadata\n", + "Updated\n", + "Oct 8\n", + "•\n", + "12\n", + "huggingface/paper-central-data-2\n", + "Viewer\n", + "•\n", + "Updated\n", + "Oct 4\n", + "•\n", + "58.3k\n", + "•\n", + "68\n", + "•\n", + "2\n", + "Expand 31\n", + "\t\t\t\t\t\t\tdatasets\n", + "System theme\n", + "Company\n", + "TOS\n", + "Privacy\n", + "About\n", + "Jobs\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Pricing\n", + "Docs\n", + "\n", + "\n", + "\n", + "community discussions\n", + "Webpage Title:\n", + "Hugging Face Forums - Hugging Face Community Discussion\n", + "Webpage Contents:\n", + "Loading\n", + "Hugging Face Forums\n", + "Topic\n", + "Replies\n", + "Views\n", + "Activity\n", + "List of `size_categories`\n", + "🤗Datasets\n", + "3\n", + "5\n", + "December 21, 2024\n", + "Feature request - maintain list of favorite hf pages reachable from my hom epage\n", + "Site Feedback\n", + "4\n", + "886\n", + "December 21, 2024\n", + "404 error on carbon emission calculation\n", + "Site Feedback\n", + "1\n", + "7\n", + "December 21, 2024\n", + "Cannot connect gRPC Server Hosted on HuggingFace Spaces\n", + "Spaces\n", + "0\n", + "8\n", + "December 21, 2024\n", + "Hide system prompt or system instruction\n", + "Beginners\n", + "3\n", + "15\n", + "December 21, 2024\n", + "ModuleNotFoundError: No module named 'huggingface_hub.inference._types'\n", + "🤗Hub\n", + "0\n", + "5\n", + "December 21, 2024\n", + "Understanding State Management with Gradio and LangGraph\n", + "Beginners\n", + "1\n", + "11\n", + "December 21, 2024\n", + "Dimension problem\n", + "Beginners\n", + "25\n", + "21\n", + "December 21, 2024\n", + "Fine-tuning whisper on sound-event-detection dataset\n", + "🤗Transformers\n", + "0\n", + "4\n", + "December 20, 2024\n", + "Model that can generate both text and image as output\n", + "Research\n", + "4\n", + "42\n", + "December 21, 2024\n", + "Lm studio and Chat ui doesn't work with module\n", + "Beginners\n", + "11\n", + "33\n", + "December 21, 2024\n", + "Inference API Context Window and TOS\n", + "Beginners\n", + "0\n", + "12\n", + "December 20, 2024\n", + "Talkie AI got remove from app store -any alternative ai chat?\n", + "Beginners\n", + "4\n", + "1151\n", + "December 18, 2024\n", + "Inference Text Generation API issue\n", + "Intermediate\n", + "0\n", + "7\n", + "December 20, 2024\n", + "From Pandas Dataframe to Huggingface Dataset\n", + "Beginners\n", + "9\n", + "60459\n", + "December 20, 2024\n", + "\"Load Diffusion Model\" and \"Unet Loader (GGUF)\" null/undefined\n", + "Beginners\n", + "6\n", + "200\n", + "December 20, 2024\n", + "Timeout Issue with DeepSpeed on Multiple GPUs\n", + "DeepSpeed\n", + "0\n", + "8\n", + "December 20, 2024\n", + "Spaces dedicated gpu limit\n", + "Spaces\n", + "1\n", + "14\n", + "December 19, 2024\n", + "Chatbot PDF - using flan-t5-large model\n", + "Models\n", + "0\n", + "7\n", + "December 20, 2024\n", + "Gateway Problem\n", + "Beginners\n", + "0\n", + "8\n", + "December 20, 2024\n", + "RT-DETR attention map dimension - PekingU/rtdetr_r50vd\n", + "Models\n", + "0\n", + "5\n", + "December 20, 2024\n", + "Extending the tokenizer affects model generation\n", + "Intermediate\n", + "3\n", + "9\n", + "December 19, 2024\n", + "How to Ensure Each Process Reads Its Own Dataset and Trains Correctly When Using Trainer?\n", + "🤗Transformers\n", + "0\n", + "5\n", + "December 20, 2024\n", + "Can't save the tensorflow model of nvidia/mit-b5\n", + "Intermediate\n", + "3\n", + "127\n", + "December 19, 2024\n", + "# Audio course Unit 4. sample code not working. Can anyone check for me? Thanks\n", + "Course\n", + "0\n", + "6\n", + "December 20, 2024\n", + "Host Models on Hugging face and Perform Inference on Hugging Face Infrastructure\n", + "Beginners\n", + "0\n", + "6\n", + "December 20, 2024\n", + "Torchrun, trainer, dataset setup\n", + "Intermediate\n", + "4\n", + "71\n", + "December 20, 2024\n", + "Training fails on multiple GPUs with RuntimeError 'chuck expects at least a 1-dimensional array'\n", + "Beginners\n", + "2\n", + "108\n", + "December 19, 2024\n", + "How do you know whether the model is merged and uploaded?\n", + "Intermediate\n", + "0\n", + "11\n", + "December 20, 2024\n", + "Qwen based AI assistant randomly having an absolute, utter, complete 'mental breakdowns'?? (Inference API)\n", + "🤗Transformers\n", + "2\n", + "23\n", + "December 17, 2024\n", + "next page →\n", + "Home\n", + "Categories\n", + "Guidelines\n", + "Terms of Service\n", + "Privacy Policy\n", + "Powered by\n", + "Discourse\n", + ", best viewed with JavaScript enabled\n", + "\n", + "\n", + "\n", + "GitHub page\n", + "Webpage Title:\n", + "Hugging Face · GitHub\n", + "Webpage Contents:\n", + "Skip to content\n", + "Navigation Menu\n", + "Toggle navigation\n", + "Sign in\n", + "huggingface\n", + "Product\n", + "GitHub Copilot\n", + "Write better code with AI\n", + "Security\n", + "Find and fix vulnerabilities\n", + "Actions\n", + "Automate any workflow\n", + "Codespaces\n", + "Instant dev environments\n", + "Issues\n", + "Plan and track work\n", + "Code Review\n", + "Manage code changes\n", + "Discussions\n", + "Collaborate outside of code\n", + "Code Search\n", + "Find more, search less\n", + "Explore\n", + "All features\n", + "Documentation\n", + "GitHub Skills\n", + "Blog\n", + "Solutions\n", + "By company size\n", + "Enterprises\n", + "Small and medium teams\n", + "Startups\n", + "By use case\n", + "DevSecOps\n", + "DevOps\n", + "CI/CD\n", + "View all use cases\n", + "By industry\n", + "Healthcare\n", + "Financial services\n", + "Manufacturing\n", + "Government\n", + "View all industries\n", + "View all solutions\n", + "Resources\n", + "Topics\n", + "AI\n", + "DevOps\n", + "Security\n", + "Software Development\n", + "View all\n", + "Explore\n", + "Learning Pathways\n", + "White papers, Ebooks, Webinars\n", + "Customer Stories\n", + "Partners\n", + "Executive Insights\n", + "Open Source\n", + "GitHub Sponsors\n", + "Fund open source developers\n", + "The ReadME Project\n", + "GitHub community articles\n", + "Repositories\n", + "Topics\n", + "Trending\n", + "Collections\n", + "Enterprise\n", + "Enterprise platform\n", + "AI-powered developer platform\n", + "Available add-ons\n", + "Advanced Security\n", + "Enterprise-grade security features\n", + "GitHub Copilot\n", + "Enterprise-grade AI features\n", + "Premium Support\n", + "Enterprise-grade 24/7 support\n", + "Pricing\n", + "Search or jump to...\n", + "Search code, repositories, users, issues, pull requests...\n", + "Search\n", + "Clear\n", + "Search syntax tips\n", + "Provide feedback\n", + "We read every piece of feedback, and take your input very seriously.\n", + "Include my email address so I can be contacted\n", + "Cancel\n", + "Submit feedback\n", + "Saved searches\n", + "Use saved searches to filter your results more quickly\n", + "Cancel\n", + "Create saved search\n", + "Sign in\n", + "Sign up\n", + "Reseting focus\n", + "You signed in with another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You signed out in another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You switched accounts on another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "Dismiss alert\n", + "Hugging Face\n", + "The AI community building the future.\n", + "Verified\n", + "We've verified that the organization\n", + "huggingface\n", + "controls the domain:\n", + "huggingface.co\n", + "Learn more about verified organizations\n", + "40.1k\n", + "followers\n", + "NYC + Paris\n", + "https://huggingface.co/\n", + "X\n", + "@huggingface\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "Pinned\n", + "Loading\n", + "transformers\n", + "transformers\n", + "Public\n", + "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", + "Python\n", + "137k\n", + "27.3k\n", + "diffusers\n", + "diffusers\n", + "Public\n", + "🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch and FLAX.\n", + "Python\n", + "26.7k\n", + "5.5k\n", + "datasets\n", + "datasets\n", + "Public\n", + "🤗 The largest hub of ready-to-use datasets for ML models with fast, easy-to-use and efficient data manipulation tools\n", + "Python\n", + "19.4k\n", + "2.7k\n", + "peft\n", + "peft\n", + "Public\n", + "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.\n", + "Python\n", + "16.8k\n", + "1.7k\n", + "accelerate\n", + "accelerate\n", + "Public\n", + "🚀 A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support\n", + "Python\n", + "8.1k\n", + "995\n", + "optimum\n", + "optimum\n", + "Public\n", + "🚀 Accelerate inference and training of 🤗 Transformers, Diffusers, TIMM and Sentence Transformers with easy to use hardware optimization tools\n", + "Python\n", + "2.6k\n", + "486\n", + "Repositories\n", + "Loading\n", + "Type\n", + "Select type\n", + "Forks\n", + "Archived\n", + "Mirrors\n", + "Templates\n", + "Language\n", + "Select language\n", + "All\n", + "C\n", + "C#\n", + "C++\n", + "Cuda\n", + "Dockerfile\n", + "Go\n", + "Handlebars\n", + "HTML\n", + "Java\n", + "JavaScript\n", + "Jupyter Notebook\n", + "Kotlin\n", + "Lua\n", + "MDX\n", + "Mustache\n", + "Nix\n", + "Python\n", + "Rust\n", + "Shell\n", + "Smarty\n", + "Swift\n", + "TypeScript\n", + "Sort\n", + "Select order\n", + "Last updated\n", + "Name\n", + "Stars\n", + "Showing 10 of 275 repositories\n", + "trl\n", + "Public\n", + "Train transformer language models with reinforcement learning.\n", + "huggingface/trl’s past year of commit activity\n", + "Python\n", + "10,382\n", + "Apache-2.0\n", + "1,337\n", + "106\n", + "46\n", + "Updated\n", + "Dec 21, 2024\n", + "transformers.js\n", + "Public\n", + "State-of-the-art Machine Learning for the web. Run 🤗 Transformers directly in your browser, with no need for a server!\n", + "huggingface/transformers.js’s past year of commit activity\n", + "JavaScript\n", + "12,421\n", + "Apache-2.0\n", + "790\n", + "274\n", + "(3 issues need help)\n", + "48\n", + "Updated\n", + "Dec 21, 2024\n", + "diffusers\n", + "Public\n", + "🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch and FLAX.\n", + "huggingface/diffusers’s past year of commit activity\n", + "Python\n", + "26,740\n", + "Apache-2.0\n", + "5,504\n", + "379\n", + "(10 issues need help)\n", + "169\n", + "Updated\n", + "Dec 21, 2024\n", + "text-generation-inference\n", + "Public\n", + "Large Language Model Text Generation Inference\n", + "huggingface/text-generation-inference’s past year of commit activity\n", + "Python\n", + "9,484\n", + "Apache-2.0\n", + "1,106\n", + "152\n", + "21\n", + "Updated\n", + "Dec 21, 2024\n", + "candle\n", + "Public\n", + "Minimalist ML framework for Rust\n", + "huggingface/candle’s past year of commit activity\n", + "Rust\n", + "16,103\n", + "Apache-2.0\n", + "980\n", + "344\n", + "(5 issues need help)\n", + "86\n", + "Updated\n", + "Dec 21, 2024\n", + "autotrain-advanced\n", + "Public\n", + "🤗 AutoTrain Advanced\n", + "huggingface/autotrain-advanced’s past year of commit activity\n", + "Python\n", + "4,157\n", + "Apache-2.0\n", + "505\n", + "16\n", + "2\n", + "Updated\n", + "Dec 21, 2024\n", + "transformers\n", + "Public\n", + "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", + "huggingface/transformers’s past year of commit activity\n", + "Python\n", + "136,571\n", + "Apache-2.0\n", + "27,342\n", + "1,003\n", + "(2 issues need help)\n", + "526\n", + "Updated\n", + "Dec 21, 2024\n", + "lighteval\n", + "Public\n", + "Lighteval is your all-in-one toolkit for evaluating LLMs across multiple backends\n", + "huggingface/lighteval’s past year of commit activity\n", + "Python\n", + "889\n", + "MIT\n", + "109\n", + "62\n", + "(1 issue needs help)\n", + "15\n", + "Updated\n", + "Dec 21, 2024\n", + "hub-docs\n", + "Public\n", + "Docs of the Hugging Face Hub\n", + "huggingface/hub-docs’s past year of commit activity\n", + "Handlebars\n", + "309\n", + "Apache-2.0\n", + "259\n", + "90\n", + "25\n", + "Updated\n", + "Dec 21, 2024\n", + "optimum-habana\n", + "Public\n", + "Easy and lightning fast training of 🤗 Transformers on Habana Gaudi processor (HPU)\n", + "huggingface/optimum-habana’s past year of commit activity\n", + "Python\n", + "162\n", + "Apache-2.0\n", + "219\n", + "11\n", + "(1 issue needs help)\n", + "40\n", + "Updated\n", + "Dec 21, 2024\n", + "View all repositories\n", + "People\n", + "View all\n", + "Top languages\n", + "Python\n", + "Jupyter Notebook\n", + "Rust\n", + "TypeScript\n", + "JavaScript\n", + "Most used topics\n", + "pytorch\n", + "machine-learning\n", + "nlp\n", + "deep-learning\n", + "transformers\n", + "Footer\n", + "© 2024 GitHub, Inc.\n", + "Footer navigation\n", + "Terms\n", + "Privacy\n", + "Security\n", + "Status\n", + "Docs\n", + "Contact\n", + "Manage cookies\n", + "Do not share my personal information\n", + "You can’t perform that action at this time.\n", + "\n", + "\n", + "\n", + "Twitter page\n", + "Webpage Title:\n", + "x.com\n", + "Webpage Contents:\n", + "\n", + "\n", + "\n", + "\n", + "LinkedIn page\n", + "Webpage Title:\n", + "Hugging Face | LinkedIn\n", + "Webpage Contents:\n", + "Skip to main content\n", + "LinkedIn\n", + "Articles\n", + "People\n", + "Learning\n", + "Jobs\n", + "Games\n", + "Get the app\n", + "Join now\n", + "Sign in\n", + "Hugging Face\n", + "Software Development\n", + "The AI community building the future.\n", + "See jobs\n", + "Follow\n", + "Discover all 472 employees\n", + "Report this company\n", + "About us\n", + "The AI community building the future.\n", + "Website\n", + "https://huggingface.co\n", + "External link for Hugging Face\n", + "Industry\n", + "Software Development\n", + "Company size\n", + "51-200 employees\n", + "Type\n", + "Privately Held\n", + "Founded\n", + "2016\n", + "Specialties\n", + "machine learning, natural language processing, and deep learning\n", + "Products\n", + "Hugging Face\n", + "Hugging Face\n", + "Natural Language Processing (NLP) Software\n", + "We‚Äôre on a journey to solve and democratize artificial intelligence through natural language.\n", + "Locations\n", + "Primary\n", + "Get directions\n", + "Paris, FR\n", + "Get directions\n", + "Employees at Hugging Face\n", + "Ludovic Huraux\n", + "Bassem ASSEH\n", + "Rajat Arya\n", + "Tech Lead & Software Engineer @ HF | prev: co-founder XetHub, Apple, Turi, AWS, Microsoft\n", + "Jeff Boudier\n", + "Product + Growth at Hugging Face\n", + "See all employees\n", + "Updates\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "47,326 followers\n", + "7h\n", + "Report this post\n", + "NOW you can add AI to your Slack, Discord in just few steps with Gradio!ü§©\n", + "\n", + "üî•Create Slack apps, Discord bots, or Intercom-style website widgets in ANY modality (Text, image, Video, Audio, Omni etc)! Keep reading to learn how ‚¨áÔ∏è\n", + "\n", + "Guide: üöÄ Creating a Slack Bot from a Gradio App üöÄ\n", + "Read here:\n", + "https://lnkd.in/g2_Bydrj\n", + "ü§éDo you love building stuff with Gradio? Support us on GitHub:\n", + "Gradio.dev\n", + "‚Ķmore\n", + "50\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Daniel V.\n", + "Machine Learning Librarian@ü§ó | Championing Open Science & Machine Learning\n", + "21h\n", + "Report this post\n", + "Introducing FineWeb-C üåêüéì, a community-built dataset for improving language models in ALL languages. \n", + "\n", + "Inspired by FineWeb-Edu the community is labelling the educational quality of texts for many languages. \n", + "\n", + "318 annotators, 32K+ annotations, 12 languages - and growing!üåç\n", + "57\n", + "2 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Merve Noyan\n", + "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", + "22h\n", + "Report this post\n", + "Fine-tune ColPali for your multimodal RAG use case üî•\n", + "\n", + "ColPali just landed to\n", + "Hugging Face\n", + "transformers and I have built a simple fine-tuning tutorial with QLoRA ü§ó\n", + "You can fine-tune the model with 32 GB VRAM with batch size of 4 (which can run on Colab A100)\n", + "Link in comments üí¨\n", + "267\n", + "4 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "ü§ñ Avthar Sewrathan\n", + "AI and Developer Product Leader | I talk about using AI and building AI apps\n", + "1d\n", + "Report this post\n", + "TIL: You can now load any\n", + "Hugging Face\n", + "dataset into PostgreSQL with just 1 line of SQL ü§Ø\n", + "\n", + "All thanks to the pgai PostgreSQL extension. \n", + "\n", + "Shoutout to\n", + "Matvey Arye\n", + "from the\n", + "Timescale\n", + "AI engineering team for implementing this.\n", + "\n", + "Learn more about using PostgreSQL with HuggingFace datasets in the HuggingFace docs:\n", + "https://lnkd.in/eS4hqSDq\n", + "#postgresql\n", + "#huggingface\n", + "#opensource\n", + "180\n", + "14 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Argilla\n", + "10,266 followers\n", + "1d\n", + "Report this post\n", + "üé¢ Push to Hub: Export your dataset to the Hugging Face Hub directly from the Argilla UI.\n", + "\n", + "We‚Äôre super excited to announce that we've closed the loop: now you can load a dataset from the Hub, open it on\n", + "Argilla\n", + "UI, label it, and push the annotated dataset to the Hub. All this without a line of code!\n", + "\n", + "\n", + "ùó™ùóµùòÜ ùòÄùóµùóºùòÇùóπùó± ùòÜùóºùòÇ ùòÇùòÄùó≤ ùó∂ùòÅ?\n", + "\n", + "Your AI project's impact depends heavily on the effort and care you put into your data. This new feature enables you to iterate faster and make annotated data available in the right format for training and evaluation.\n", + "\n", + "\n", + "ùóõùóºùòÑ ùó±ùóºùó≤ùòÄ ùó∂ùòÅ ùòÑùóºùóøùó∏?\n", + "\n", + "1Ô∏è‚É£ Import initial data from a CSV or any format to Hugging Face\n", + "2Ô∏è‚É£ Load it into the Argilla UI and configure the annotation task\n", + "3Ô∏è‚É£ Annotate your dataset\n", + "üöÄ Click on ‚ÄúPush to Hub‚Äù and share the dataset with your team (or the entire world)\n", + "\n", + "üëâ ùó•ùó≤ùóÆùó±ùòÜ ùòÅùóº ùòÅùóøùòÜ ùó∂ùòÅ ùóºùòÇùòÅ?\n", + "\n", + "Get started here:\n", + "https://lnkd.in/dhA-swR5\n", + "Release highlights:\n", + "https://lnkd.in/dbdQXG-W\n", + "35\n", + "3 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Daniel V.\n", + "Machine Learning Librarian@ü§ó | Championing Open Science & Machine Learning\n", + "1d\n", + "Report this post\n", + "Hot take: shipping BERT-sized models in 2025 will benefit far more people than sharing an LLM overfitted to some saturated leaderboards \n", + "\n", + "We're already seeing ModernBERT finetunes on the\n", + "Hugging Face\n", + "Hub. My guess is we'll see hundreds of these by the end of 2025.\n", + "80\n", + "4 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "47,326 followers\n", + "1d\n", + "Edited\n", + "Report this post\n", + "ü§Øüî•LEARN HOW TO CREATE interactive agentic chatbots using Gradio that are capable of showcasing the Thoughts, Tasks, and interim responses of Multiple Agents as you await the final answer from your AI assistant.\n", + "\n", + "üéØ Customer Support multi-agents with\n", + "CrewAI\n", + "and\n", + "Gradio\n", + "Showcasing here, a user-friendly, high-performing multi-agent gradio app. TO operate it, simply enter a webpage URL along with your questions related to that page, and in turn receive a high-quality response from the CrewAI Multi-Agent setup.\n", + "\n", + "üöÄAccess this app on\n", + "Hugging Face\n", + "Spaces:\n", + "https://lnkd.in/g6kXp_D2\n", + "‚Ķmore\n", + "72\n", + "1 Comment\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Clem Delangue ü§ó\n", + "Clem Delangue ü§ó is an Influencer\n", + "Co-founder & CEO at Hugging Face\n", + "2d\n", + "Report this post\n", + "In the past few months, we've invested a lot of efforts in improving the user management features of the Hugging Face hub that more than 5M AI builder are now using. It helps not only for easier organization collaboration but also for security (for example to make sure ex team members don't still have access to private models). \n", + "\n", + "If your manager, VP AI or admin/CISO is not aware, mention them below so that we can connect if they have any questions or feedback as most of these features are part of the Enterprise hub subscriptions:\n", + "https://lnkd.in/e-RY-3vs\n", + ")\n", + "\n", + "Cheers!\n", + "47\n", + "3 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Clem Delangue ü§ó\n", + "Clem Delangue ü§ó is an Influencer\n", + "Co-founder & CEO at Hugging Face\n", + "4d\n", + "Report this post\n", + "Just 10 days after o1's public debut, we‚Äôre thrilled to unveil the open-source version of the groundbreaking technique behind its success: scaling test-time compute ü߆üí° \n", + "\n", + "By giving models more \"time to think,\" Llama 1B outperforms Llama 8B in math‚Äîbeating a model 8x its size. The full recipe is open-sourceü§Ø \n", + "\n", + "This is the power of open science and open-source AI! üåç‚ú®\n", + "5,292\n", + "125 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Philipp Schmid\n", + "Technical Lead & LLMs at Hugging Face ü§ó | AWS ML HERO ü¶∏ü誂ôÇÔ∏è\n", + "1d\n", + "Report this post\n", + "ModernBERT, BERT revisited in the age of LLMs and Generative AI!\n", + "LightOn\n", + "and\n", + "Answer.ai\n", + "modernized BERT! Improved architecture with 8192 context length, flash attention, and trained on 2T tokens. ModernBERT outperforms version BERT and RoBERTa versions! üëÄ\n", + "\n", + "TL;DR;\n", + "2Ô∏è‚É£¬†Comes in 2 sizes base (139M) and large (395M)\n", + "üöĬ†Better performance across all metrics than the original BERT\n", + "üìè 8,192 token context length (16x longer than BERT)\n", + "‚ö° Modern architecture with Flash Attention 2, RoPE embeddings, and alternating attention\n", + "üìö Trained on 2 trillion tokens, primarily English and Code\n", + "üí® 2-4x faster than other models with mixed-length inputs\n", + "üî쬆Released under Apache 2.0\n", + "ü§ó¬†Available on\n", + "Hugging Face\n", + "and Transformers (main)\n", + "\n", + "Models:\n", + "https://lnkd.in/ethiJ2xh\n", + "Blog:\n", + "https://lnkd.in/ebiEzb4P\n", + "Paper:\n", + "https://lnkd.in/ezR8MUBF\n", + "1,844\n", + "67 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Join now to see what you are missing\n", + "Find people you know at Hugging Face\n", + "Browse recommended jobs for you\n", + "View all updates, news, and articles\n", + "Join now\n", + "Similar pages\n", + "Anthropic\n", + "Research Services\n", + "Mistral AI\n", + "Technology, Information and Internet\n", + "Paris, France\n", + "OpenAI\n", + "Research Services\n", + "San Francisco, CA\n", + "LangChain\n", + "Technology, Information and Internet\n", + "Perplexity\n", + "Software Development\n", + "San Francisco, California\n", + "Generative AI\n", + "Technology, Information and Internet\n", + "Google DeepMind\n", + "Research Services\n", + "London, London\n", + "LlamaIndex\n", + "Technology, Information and Internet\n", + "San Francisco, California\n", + "DeepLearning.AI\n", + "Software Development\n", + "Palo Alto, California\n", + "Cohere\n", + "Software Development\n", + "Toronto, Ontario\n", + "Show more similar pages\n", + "Show fewer similar pages\n", + "Browse jobs\n", + "Engineer jobs\n", + "555,845 open jobs\n", + "Machine Learning Engineer jobs\n", + "148,937 open jobs\n", + "Scientist jobs\n", + "48,969 open jobs\n", + "Software Engineer jobs\n", + "300,699 open jobs\n", + "Intern jobs\n", + "71,196 open jobs\n", + "Developer jobs\n", + "258,935 open jobs\n", + "Analyst jobs\n", + "694,057 open jobs\n", + "Intelligence Specialist jobs\n", + "7,156 open jobs\n", + "Manager jobs\n", + "1,880,925 open jobs\n", + "Data Scientist jobs\n", + "264,158 open jobs\n", + "Director jobs\n", + "1,220,357 open jobs\n", + "Associate jobs\n", + "1,091,945 open jobs\n", + "Python Developer jobs\n", + "46,642 open jobs\n", + "Evangelist jobs\n", + "5,068 open jobs\n", + "Data Engineer jobs\n", + "192,126 open jobs\n", + "Vice President jobs\n", + "235,270 open jobs\n", + "Quantitative Analyst jobs\n", + "19,570 open jobs\n", + "Program Manager jobs\n", + "243,900 open jobs\n", + "Data Science Specialist jobs\n", + "2,441 open jobs\n", + "Lead Software Engineer jobs\n", + "68,215 open jobs\n", + "Show more jobs like this\n", + "Show fewer jobs like this\n", + "Funding\n", + "Hugging Face\n", + "7 total rounds\n", + "Last Round\n", + "Series D\n", + "Feb 16, 2024\n", + "External Crunchbase Link for last round of funding\n", + "See more info on\n", + "crunchbase\n", + "More searches\n", + "More searches\n", + "Engineer jobs\n", + "Intern jobs\n", + "Machine Learning Engineer jobs\n", + "Software Engineer jobs\n", + "Scientist jobs\n", + "Developer jobs\n", + "Research Intern jobs\n", + "Analyst jobs\n", + "Intelligence Specialist jobs\n", + "Quantitative Analyst jobs\n", + "Technician jobs\n", + "Data Science Specialist jobs\n", + "Project Manager jobs\n", + "Summer Intern jobs\n", + "Manager jobs\n", + "Senior Staff Engineer jobs\n", + "PHD jobs\n", + "Trader jobs\n", + "Researcher jobs\n", + "Data Scientist jobs\n", + "Writer jobs\n", + "Data Analyst jobs\n", + "Product Designer jobs\n", + "Back End Developer jobs\n", + "Spring Intern jobs\n", + "Program Manager jobs\n", + "Technology Officer jobs\n", + "Software Intern jobs\n", + "Security Professional jobs\n", + "Senior Software Engineer jobs\n", + "Python Developer jobs\n", + "Engineering Manager jobs\n", + "Web Developer jobs\n", + "Graduate jobs\n", + "Full Stack Engineer jobs\n", + "Professor jobs\n", + "Head jobs\n", + "Verification Manager jobs\n", + "User Experience Designer jobs\n", + "Recruiter jobs\n", + "Chief Executive Officer jobs\n", + "Associate jobs\n", + "Support Developer jobs\n", + "Senior Firmware Engineer jobs\n", + "Marketing Manager jobs\n", + "Modeling Engineer jobs\n", + "Designer jobs\n", + "Automation Lead jobs\n", + "Options Trader jobs\n", + "Agile Coach jobs\n", + "Research Engineer jobs\n", + "Software Quality Assurance Analyst jobs\n", + "User Experience Manager jobs\n", + "Technical Intern jobs\n", + "Junior Network Engineer jobs\n", + "Information Technology Recruiter jobs\n", + "User Researcher jobs\n", + "Player jobs\n", + "Engineering Project Manager jobs\n", + "Digital Strategist jobs\n", + "LinkedIn\n", + "© 2024\n", + "About\n", + "Accessibility\n", + "User Agreement\n", + "Privacy Policy\n", + "Cookie Policy\n", + "Copyright Policy\n", + "Brand Policy\n", + "Guest Controls\n", + "Community Guidelines\n", + "ÿߟÑÿπÿ±ÿ®Ÿäÿ© (Arabic)\n", + "‡¶¨‡¶æ‡¶Ç‡¶≤‡¶æ (Bangla)\n", + "ƒåe≈°tina (Czech)\n", + "Dansk (Danish)\n", + "Deutsch (German)\n", + "ŒïŒªŒªŒ∑ŒΩŒπŒ∫Œ¨ (Greek)\n", + "English (English)\n", + "Espa√±ol (Spanish)\n", + "ŸÅÿßÿ±ÿ≥€å (Persian)\n", + "Suomi (Finnish)\n", + "Fran√ßais (French)\n", + "‡§π‡§ø‡§Ç‡§¶‡•Ä (Hindi)\n", + "Magyar (Hungarian)\n", + "Bahasa Indonesia (Indonesian)\n", + "Italiano (Italian)\n", + "◊¢◊ë◊®◊ô◊™ (Hebrew)\n", + "Êó•Êú¨Ë™û (Japanese)\n", + "Ìïú͵≠Ïñ¥ (Korean)\n", + "‡§Æ‡§∞‡§æ‡§†‡•Ä (Marathi)\n", + "Bahasa Malaysia (Malay)\n", + "Nederlands (Dutch)\n", + "Norsk (Norwegian)\n", + "‡®™‡©∞‡®ú‡®æ‡®¨‡©Ä (Punjabi)\n", + "Polski (Polish)\n", + "Portugu√™s (Portuguese)\n", + "Rom√¢nƒÉ (Romanian)\n", + "–†—É—Å—Å–∫–∏–π (Russian)\n", + "Svenska (Swedish)\n", + "‡∞§‡±Ü‡∞≤‡±Å‡∞ó‡±Å (Telugu)\n", + "‡∏†‡∏≤‡∏©‡∏≤‡πч∏ó‡∏¢ (Thai)\n", + "Tagalog (Tagalog)\n", + "T√ºrk√ße (Turkish)\n", + "–£–∫—Ä–∞—ó–Ω—Å—å–∫–∞ (Ukrainian)\n", + "Ti·∫øng Vi·ªát (Vietnamese)\n", + "ÁÆÄ‰Ωì‰∏≠Êñá (Chinese (Simplified))\n", + "Ê≠£È´î‰∏≠Êñá (Chinese (Traditional))\n", + "Language\n", + "Agree & Join LinkedIn\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "Sign in to see who you already know at Hugging Face\n", + "Sign in\n", + "Welcome back\n", + "Email or phone\n", + "Password\n", + "Show\n", + "Forgot password?\n", + "Sign in\n", + "or\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "New to LinkedIn?\n", + "Join now\n", + "or\n", + "New to LinkedIn?\n", + "Join now\n", + "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "LinkedIn\n", + "LinkedIn is better on the app\n", + "Don‚Äôt have the app? Get it in the Microsoft Store.\n", + "Open the app\n", + "\n", + "\n" + ] + } + ], + "source": [ + "print(get_all_details(\"https://huggingface.co\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "9b863a55-f86c-4e3f-8a79-94e24c1a8cf2", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", + "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", + "Include details of company culture, customers and careers/jobs if you have the information.\"\n", + "\n", + "# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n", + "\n", + "# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n", + "# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n", + "# Include details of company culture, customers and careers/jobs if you have the information.\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "6ab83d92-d36b-4ce0-8bcc-5bb4c2f8ff23", + "metadata": {}, + "outputs": [], + "source": [ + "def get_brochure_user_prompt(company_name, url):\n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", + " user_prompt += get_all_details(url)\n", + " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "cd909e0b-1312-4ce2-a553-821e795d7572", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'about page', 'url': 'https://huggingface.co/'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog', 'url': 'https://huggingface.co/blog'}, {'type': 'company page', 'url': 'https://huggingface.co/enterprise'}]}\n", + "You are looking at a company called: HuggingFace\n", + "Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\n", + "Landing page:\n", + "Webpage Title:\n", + "Hugging Face – The AI community building the future.\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "The AI community building the future.\n", + "The platform where the machine learning community collaborates on models, datasets, and applications.\n", + "Trending on\n", + "this week\n", + "Models\n", + "IamCreateAI/Ruyi-Mini-7B\n", + "Updated\n", + "4 days ago\n", + "•\n", + "8.17k\n", + "•\n", + "352\n", + "Datou1111/shou_xin\n", + "Updated\n", + "12 days ago\n", + "•\n", + "28.3k\n", + "•\n", + "672\n", + "answerdotai/ModernBERT-base\n", + "Updated\n", + "1 day ago\n", + "•\n", + "6.24k\n", + "•\n", + "236\n", + "meta-llama/Llama-3.3-70B-Instruct\n", + "Updated\n", + "11 days ago\n", + "•\n", + "236k\n", + "•\n", + "1.21k\n", + "tencent/HunyuanVideo\n", + "Updated\n", + "3 days ago\n", + "•\n", + "6.01k\n", + "•\n", + "1.2k\n", + "Browse 400k+ models\n", + "Spaces\n", + "Running\n", + "on\n", + "Zero\n", + "1.79k\n", + "🏢\n", + "TRELLIS\n", + "Scalable and Versatile 3D Generation from images\n", + "Running\n", + "306\n", + "📝\n", + "Scaling test-time compute\n", + "Running\n", + "on\n", + "Zero\n", + "470\n", + "🚀\n", + "Flux Style Shaping\n", + "Optical illusions and style transfer with FLUX\n", + "Running\n", + "on\n", + "CPU Upgrade\n", + "6.11k\n", + "👕\n", + "Kolors Virtual Try-On\n", + "Running\n", + "on\n", + "Zero\n", + "965\n", + "📈\n", + "IC Light V2\n", + "Browse 150k+ applications\n", + "Datasets\n", + "fka/awesome-chatgpt-prompts\n", + "Updated\n", + "Sep 3\n", + "•\n", + "6.83k\n", + "•\n", + "6.58k\n", + "O1-OPEN/OpenO1-SFT\n", + "Updated\n", + "4 days ago\n", + "•\n", + "1.86k\n", + "•\n", + "234\n", + "HuggingFaceFW/fineweb-2\n", + "Updated\n", + "13 days ago\n", + "•\n", + "77.7k\n", + "•\n", + "342\n", + "HuggingFaceTB/finemath\n", + "Updated\n", + "1 day ago\n", + "•\n", + "1.86k\n", + "•\n", + "43\n", + "amphora/QwQ-LongCoT-130K\n", + "Updated\n", + "16 days ago\n", + "•\n", + "1.34k\n", + "•\n", + "85\n", + "Browse 100k+ datasets\n", + "The Home of Machine Learning\n", + "Create, discover and collaborate on ML better.\n", + "The collaboration platform\n", + "Host and collaborate on unlimited public models, datasets and applications.\n", + "Move faster\n", + "With the HF Open source stack.\n", + "Explore all modalities\n", + "Text, image, video, audio or even 3D.\n", + "Build your portfolio\n", + "Share your work with the world and build your ML profile.\n", + "Sign Up\n", + "Accelerate your ML\n", + "We provide paid Compute and Enterprise solutions.\n", + "Compute\n", + "Deploy on optimized\n", + "Inference Endpoints\n", + "or update your\n", + "Spaces applications\n", + "to a GPU in a few clicks.\n", + "View pricing\n", + "Starting at $0.60/hour for GPU\n", + "Enterprise\n", + "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", + "\t\t\tdedicated support.\n", + "Getting started\n", + "Starting at $20/user/month\n", + "Single Sign-On\n", + "Regions\n", + "Priority Support\n", + "Audit Logs\n", + "Resource Groups\n", + "Private Datasets Viewer\n", + "More than 50,000 organizations are using Hugging Face\n", + "Ai2\n", + "Enterprise\n", + "non-profit\n", + "•\n", + "366 models\n", + "•\n", + "1.76k followers\n", + "AI at Meta\n", + "Enterprise\n", + "company\n", + "•\n", + "2.05k models\n", + "•\n", + "3.83k followers\n", + "Amazon Web Services\n", + "company\n", + "•\n", + "21 models\n", + "•\n", + "2.45k followers\n", + "Google\n", + "company\n", + "•\n", + "911 models\n", + "•\n", + "5.76k followers\n", + "Intel\n", + "company\n", + "•\n", + "217 models\n", + "•\n", + "2.07k followers\n", + "Microsoft\n", + "company\n", + "•\n", + "351 models\n", + "•\n", + "6.29k followers\n", + "Grammarly\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "102 followers\n", + "Writer\n", + "Enterprise\n", + "company\n", + "•\n", + "17 models\n", + "•\n", + "186 followers\n", + "Our Open Source\n", + "We are building the foundation of ML tooling with the community.\n", + "Transformers\n", + "136,571\n", + "State-of-the-art ML for Pytorch, TensorFlow, and JAX.\n", + "Diffusers\n", + "26,740\n", + "State-of-the-art diffusion models for image and audio generation in PyTorch.\n", + "Safetensors\n", + "2,960\n", + "Simple, safe way to store and distribute neural networks weights safely and quickly.\n", + "Hub Python Library\n", + "2,177\n", + "Client library for the HF Hub: manage repositories from your Python runtime.\n", + "Tokenizers\n", + "9,165\n", + "Fast tokenizers, optimized for both research and production.\n", + "PEFT\n", + "16,767\n", + "Parameter efficient finetuning methods for large models.\n", + "Transformers.js\n", + "12,421\n", + "State-of-the-art Machine Learning for the web. Run Transformers directly in your browser, with no need for a server.\n", + "timm\n", + "32,668\n", + "State-of-the-art computer vision models, layers, optimizers, training/evaluation, and utilities.\n", + "TRL\n", + "10,382\n", + "Train transformer language models with reinforcement learning.\n", + "Datasets\n", + "19,378\n", + "Access and share datasets for computer vision, audio, and NLP tasks.\n", + "Text Generation Inference\n", + "9,484\n", + "Toolkit to serve Large Language Models.\n", + "Accelerate\n", + "8,082\n", + "Easily train and use PyTorch models with multi-GPU, TPU, mixed-precision.\n", + "System theme\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "about page\n", + "Webpage Title:\n", + "Hugging Face – The AI community building the future.\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "The AI community building the future.\n", + "The platform where the machine learning community collaborates on models, datasets, and applications.\n", + "Trending on\n", + "this week\n", + "Models\n", + "IamCreateAI/Ruyi-Mini-7B\n", + "Updated\n", + "4 days ago\n", + "•\n", + "8.17k\n", + "•\n", + "352\n", + "Datou1111/shou_xin\n", + "Updated\n", + "12 days ago\n", + "•\n", + "28.3k\n", + "•\n", + "672\n", + "answerdotai/ModernBERT-base\n", + "Updated\n", + "1 day ago\n", + "•\n", + "6.24k\n", + "•\n", + "236\n", + "meta-llama/Llama-3.3-70B-Instruct\n", + "Updated\n", + "11 days ago\n", + "•\n", + "236k\n", + "•\n", + "1.21k\n", + "tencent/HunyuanVideo\n", + "Updated\n", + "3 days ago\n", + "•\n", + "6.01k\n", + "•\n", + "1.2k\n", + "Browse 400k+ models\n", + "Spaces\n", + "Running\n", + "on\n", + "Zero\n", + "1.79k\n", + "🏢\n", + "TRELLIS\n", + "Scalable and Versatile 3D Generation from images\n", + "\n" + ] + } + ], + "source": [ + "print(get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "e44de579-4a1a-4e6a-a510-20ea3e4b8d46", + "metadata": {}, + "outputs": [], + "source": [ + "def create_brochure(company_name, url):\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " )\n", + " result = response.choices[0].message.content\n", + " display(Markdown(result))" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "e093444a-9407-42ae-924a-145730591a39", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'home page', 'url': 'https://huggingface.com/'}, {'type': 'about page', 'url': 'https://huggingface.com/huggingface'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'enterprise page', 'url': 'https://huggingface.com/enterprise'}, {'type': 'pricing page', 'url': 'https://huggingface.com/pricing'}, {'type': 'blog page', 'url': 'https://huggingface.com/blog'}, {'type': 'community page', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" + ] + }, + { + "data": { + "text/markdown": [ + "# Hugging Face Brochure\n", + "\n", + "**Hugging Face** \n", + "*The AI community building the future.*\n", + "\n", + "---\n", + "\n", + "## About Us\n", + "Hugging Face is a pioneering platform where the machine learning community comes together to collaborate on models, datasets, and applications. With over 400,000 models and 100,000 datasets available, we empower users to create, discover, and innovate in the field of machine learning.\n", + "\n", + "### Our Mission\n", + "To accelerate the development and deployment of machine learning applications, making cutting-edge technology accessible to everyone.\n", + "\n", + "---\n", + "\n", + "## Company Culture\n", + "At Hugging Face, we believe in the power of collaboration and open-source technology. We foster an inclusive environment where every team member's input is valued, allowing for diverse ideas and perspectives. Our culture emphasizes continuous learning, innovation, and a commitment to advancing AI for the greater good.\n", + "\n", + "---\n", + "\n", + "## Customers\n", + "Hugging Face serves more than 50,000 organizations, including industry leaders such as:\n", + "\n", + "- **Amazon Web Services**\n", + "- **Meta**\n", + "- **Google**\n", + "- **Microsoft**\n", + "- **Intel**\n", + " \n", + "These organizations utilize our platform for various machine learning tasks, enhancing their workflows and outputs.\n", + "\n", + "---\n", + "\n", + "## Careers at Hugging Face\n", + "We are always on the lookout for talented individuals who are passionate about AI and machine learning. Career opportunities at Hugging Face offer:\n", + "\n", + "- A collaborative work environment\n", + "- Remote work flexibility\n", + "- Continuing education and mentorship\n", + "- Opportunities to work on impactful projects\n", + "\n", + "**Join us and help shape the future of AI!**\n", + "\n", + "---\n", + "\n", + "## Our Offerings\n", + "### Models\n", + "- Access over 400,000 machine learning models, covering a variety of tasks and technologies.\n", + "\n", + "### Datasets\n", + "- Discover and share 100,000+ datasets tailored for computer vision, audio, and NLP tasks.\n", + "\n", + "### Spaces\n", + "- Utilize our application space to run various applications including real-time projects and demonstrations.\n", + "\n", + "### Enterprise Solutions\n", + "- With dedicated support and industry-grade security, our Enterprise solutions are designed for organizations looking to implement AI at scale.\n", + "\n", + "---\n", + "\n", + "## Get Started Today!\n", + "**Sign up now** to become part of the Hugging Face community and access an array of tools to accelerate your machine learning journey. \n", + "[Sign Up Here](#)\n", + "\n", + "---\n", + "\n", + "**Stay Connected** \n", + "Follow us on our social media platforms:\n", + "- [GitHub](#)\n", + "- [Twitter](#)\n", + "- [LinkedIn](#)\n", + "- [Discord](#)\n", + "\n", + "**Hugging Face – Building the Future of AI**" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "create_brochure(\"HuggingFace\", \"https://huggingface.com\")" + ] + }, + { + "cell_type": "markdown", + "id": "61eaaab7-0b47-4b29-82d4-75d474ad8d18", + "metadata": {}, + "source": [ + "## Finally - a minor improvement\n", + "\n", + "With a small adjustment, we can change this so that the results stream back from OpenAI,\n", + "with the familiar typewriter animation" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "51db0e49-f261-4137-aabe-92dd601f7725", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_brochure(company_name, url):\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " stream=True\n", + " )\n", + " \n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'about page', 'url': 'https://huggingface.co'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'community discussion', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" + ] + }, + { + "data": { + "text/markdown": [ + "# Welcome to Hugging Face\n", + "\n", + "## The AI Community Building the Future\n", + "\n", + "At Hugging Face, we bring together the machine learning community to collaborate on groundbreaking models, datasets, and applications. Our platform is a vibrant hub where innovation meets practicality, empowering developers and researchers to create state-of-the-art AI solutions.\n", + "\n", + "---\n", + "\n", + "### 🏆 What We Offer\n", + "\n", + "- **Models**: Access and discover over **400k+ models** including the latest advancements in AI.\n", + "- **Datasets**: A rich collection of **100k+ datasets** tailored for various machine learning tasks.\n", + "- **Spaces**: Collaborate on applications and projects seamlessly within our community’s creative workspace.\n", + "\n", + "---\n", + "\n", + "### 🌏 Our Customers\n", + "\n", + "Join the ranks of **50,000+ organizations** leveraging Hugging Face’s offerings, including industry giants like:\n", + "- **Meta**\n", + "- **Amazon Web Services**\n", + "- **Google**\n", + "- **Microsoft**\n", + "- **Grammarly**\n", + "\n", + "These companies trust us to accelerate their machine learning initiatives and foster innovation.\n", + "\n", + "---\n", + "\n", + "### 🌱 Company Culture\n", + "\n", + "At Hugging Face, we embrace an open-source ethos, encouraging collaboration and contribution from the community. Our culture is centered around creativity, innovation, and inclusivity. We believe in empowering individuals and teams by providing the right tools and support to shape the future of AI.\n", + "\n", + "---\n", + "\n", + "### 🚀 Careers at Hugging Face\n", + "\n", + "We are on the lookout for passionate individuals to join our team! If you share our vision of an accessible AI landscape, explore the career opportunities we offer. We provide an environment that supports academic growth, teamwork, and professional development while making a meaningful impact in the machine learning realm.\n", + "\n", + "#### Current Openings Include:\n", + "- Machine Learning Engineers\n", + "- Data Scientists\n", + "- Software Developers\n", + "- Community Managers\n", + "\n", + "---\n", + "\n", + "### 💡 Join Us\n", + "\n", + "Are you ready to be part of a revolution in AI? **[Sign Up](#)** today to explore the possibilities with Hugging Face or **[Log In](#)** if you’re already part of our community.\n", + "\n", + "Let’s build the future of AI together!\n", + "\n", + "---\n", + "\n", + "*For inquiries about our enterprise solutions, pricing, or community involvement, feel free to reach out through our website.* \n", + "\n", + "**Connect with us:** \n", + "[Twitter](#) | [LinkedIn](#) | [GitHub](#) | [Forum](#)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "87bd1188", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'homepage', 'url': 'https://huggingface.co/'}, {'type': 'about page', 'url': 'https://huggingface.co/huggingface'}, {'type': 'enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'pricing page', 'url': 'https://huggingface.co/pricing'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'discussion forum', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" + ] + }, + { + "data": { + "text/markdown": [ + "\n", + "# Hugging Face: The AI Community Building the Future\n", + "\n", + "Welcome to Hugging Face, the leading collaborative platform for the machine learning community. With a robust environment designed for creating, discovering, and deploying machine learning models, datasets, and applications, Hugging Face is at the frontier of artificial intelligence innovation. \n", + "\n", + "---\n", + "\n", + "## About Us\n", + "At Hugging Face, we believe in the power of collaboration. Our platform enables users to work together on projects that range from machine-learning models to expansive datasets. With over 400,000 models and 100,000 datasets available, we provide the tools necessary to help researchers, developers, and organizations accelerate their machine learning projects.\n", + "\n", + "- **Trending Models This Week:**\n", + " - **IamCreateAI/Ruyi-Mini-7B** | 8.17k | Updated 4 days ago\n", + " - **Datou1111/shou_xin** | 28.3k | Updated 12 days ago\n", + " - **meta-llama/Llama-3.3-70B-Instruct** | 236k | Updated 11 days ago\n", + "\n", + "Explore our community-driven approach that integrates state-of-the-art tools like Transformers, DiffUsers, and PEFT (Parameter Efficient Finetuning).\n", + "\n", + "---\n", + "\n", + "## Company Culture\n", + "Hugging Face fosters a vibrant and inclusive company culture, aiming to empower individuals and teams through transparent practices and open-source methodologies. We believe in “AI for everyone,” promoting accessibility and co-creation within the AI community. \n", + "\n", + "### Why Work With Us?\n", + "- **Collaborative Environment**: Join a diverse team of experts and enthusiasts dedicated to pushing the boundaries of AI and machine learning.\n", + "- **Open Source Commitment**: Contribute to freely accessible tools that serve the global community.\n", + "- **Flexible Work**: We support remote work and provide a range of job opportunities tailored to different areas of expertise.\n", + "\n", + "---\n", + "\n", + "## Customers & Organizations\n", + "Over 50,000 organizations utilize Hugging Face in various industries, including notable names such as:\n", + "- **Meta AI**\n", + "- **Amazon Web Services**\n", + "- **Google**\n", + "- **Microsoft**\n", + "\n", + "Our enterprise solutions offer seamless integration with advanced security features, making us a trusted partner for both startups and established corporations.\n", + "\n", + "---\n", + "\n", + "## Careers at Hugging Face\n", + "We are always on the lookout for passionate individuals to join our team. Explore our open positions in areas such as software development, research, marketing, and customer support.\n", + "\n", + "- **Open Positions**: \n", + " - Machine Learning Engineer\n", + " - Data Scientist\n", + " - Community Manager\n", + "\n", + "Join us in shaping the future of AI. \n", + "\n", + "**[Explore Careers](#)**\n", + "\n", + "---\n", + "\n", + "## Join the Hugging Face Community\n", + "Whether you're looking to develop cutting-edge AI models, contribute to open-source projects, or advance your career in this dynamic field, Hugging Face is your gateway to innovation.\n", + "\n", + "**[Learn More](#)** | **[Sign Up Today](#)**\n", + "\n", + "Together, let's build the future of AI!\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" + ] + }, + { + "cell_type": "markdown", + "id": "a9e7375d", + "metadata": {}, + "source": [ + "## **Multi-lingual with Desire Format**\n" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "af5c959f", + "metadata": {}, + "outputs": [], + "source": [ + "def multi_lingual_stream_brochure(company_name, url, language, tone):\n", + "\n", + " system_prompt = f\"\"\"\n", + "You are an assistant that analyzes the contents of several relevant pages from a company website and creates a visually appealing and professional short brochure for prospective customers, investors, and recruits. \n", + "The brochure should be written in {language} and use a {tone.lower()} tone throughout.\n", + "\n", + "The brochure should follow this structure (in {language}):\n", + "\n", + "1. **Front Cover**:\n", + " - Prominently display the company name as Title.\n", + " - Include a compelling headline or tagline.\n", + " - Add something engaging relevant to the company’s mission.\n", + "\n", + "2. **About Us**:\n", + " - Provide a brief introduction to the company.\n", + " - State the company’s core mission and vision.\n", + " - Mention the founding story or key milestones.\n", + "\n", + "3. **What We Offer**:\n", + " - Summarize the company's products, services, or solutions.\n", + " - Highlight benefits or unique selling points.\n", + " - Include testimonials or case studies if available.\n", + "\n", + "4. **Our Culture**:\n", + " - Outline the company’s key values or guiding principles.\n", + " - Describe the workplace environment (e.g., innovation-driven, inclusive, collaborative).\n", + " - Highlight community engagement or CSR initiatives.\n", + "\n", + "5. **Who We Serve**:\n", + " - Describe the target customers or industries served.\n", + " - Mention notable clients or partners.\n", + " - Include testimonials or endorsements from customers.\n", + "\n", + "6. **Join Us**:\n", + " - Detail career or internship opportunities.\n", + " - Highlight benefits, career growth, or training opportunities.\n", + " - Provide direct links or steps to apply.\n", + "\n", + "7. **Contact Us**:\n", + " - Provide the company’s address, phone number, and email.\n", + " - Include links to social media platforms.\n", + " - Add a link to the company’s website.\n", + "\n", + "8. **Closing Note**:\n", + " - End with a thank-you message or an inspirational note for the reader.\n", + " - Add a call-to-action (e.g., “Get in touch today!” or “Explore more on our website”).\n", + "\n", + "Ensure the content is concise, engaging, visually clear, and tailored to the target audience. Use headings and subheadings to make the brochure easy to navigate. Include links and contact information wherever applicable.\n", + "\"\"\"\n", + "\n", + "\n", + " \n", + " stream = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " stream=True\n", + " )\n", + " \n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "744bfc05", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links: {'links': [{'type': 'about page', 'url': 'https://openai.com/about'}, {'type': 'careers page', 'url': 'https://openai.com/careers'}]}\n" + ] + }, + { + "data": { + "text/markdown": [ + "It seems that the landing and related pages for OpenAI did not yield any specific content. However, I can create a creative and engaging brochure based on general knowledge about OpenAI. Here's a humorous and entertaining brochure written in Urdu:\n", + "\n", + "\n", + "# 🎉 اوپن اے آئی: ہوشیار robots کا دوست! 🎉\n", + "\n", + "---\n", + "\n", + "## About Us - ہمارے بارے میں:\n", + "\n", + "ہماری کمپنی اوپن اے آئی، 2015 میں بنی۔ ہم نے سوچا کہ \"کیوں نہ ایک ایسا انٹیلیجنٹ سسٹم بنائیں جو انسانوں کی مدد کرے؟\" تو ہم نے کام شروع کیا اور دیکھیں! ہم نے ایک نئی دنیا کی بنیاد رکھی۔ ہماری مشن ہے \"تمام لوگوں کے لئے AI کی طاقت کو قابل رسائی بنانا\"۔ آفاقی طاقت کو ڈھونڈتے ہیں، جیسے آپ کے فرج میں چھپے ہوئے برگر!\n", + "\n", + "---\n", + "\n", + "## What We Offer - ہم کیا پیش کرتے ہیں:\n", + "\n", + "ہم AI کے شوقین ہیں! 🤖 ہم مختلف پروڈکٹس اور سروسز پیش کرتے ہیں، جیسے کہ:\n", + "\n", + "- **GPT-3**: آپ کے سوالات کے جواب دینے کے لئے تیار!\n", + "- **تخلیقی تحریر**: جنریٹنگ آئیڈیاز جب آپ کی تخلیقیت بریک ہو جائے!\n", + "- **AI ٹولز**: آپ کی زندگی کو مزید آسان بنانے کے لئے!\n", + "\n", + "ہمارے صارفین کہتے ہیں، \"اپنی زندگی میں اوپن اے آئی کی ضرورت ہے، جیسے موٹیویشن کی ضرورت ہوتی ہے!\"\n", + "\n", + "---\n", + "\n", + "## Our Culture - ہماری ثقافت:\n", + "\n", + "ہماری کمپنی میں، ہمارا بنیادی اصول ہے: \"پیار اور انوکھا خیالات!\" 🤗 ہم نے انوکھے، تعاون پر مبنی ماحول کی بنیاد رکھی، جہاں ہر کوئی اپنی بات کہہ سکتا ہے، یہاں تک کہ ونڈو کے باہر کھڑا درخت بھی! ہم کمیونٹی کی خدمت کیلئے ہمیشہ تیار رہتے ہیں، وہ بھی سوشل میڈٰیا پر۔\n", + "\n", + "---\n", + "\n", + "## Who We Serve - ہم کس کی خدمت کرتے ہیں:\n", + "\n", + "ہم ہر اُس شخص کی خدمت کرتے ہیں جو سوپر ہیرومنٹ کی تلاش میں ہے۔ ہمارے وزیٹر، محققین، اور ٹیکنالوجی کے شوقین ہیں، اور ہمارے بہترین کلائنٹس include شامل ہیں \"بڑا دماغی جیسا سوچنے والے!\" 💡\n", + "\n", + "---\n", + "\n", + "## Join Us - ہمارے ساتھ شامل ہوں:\n", + "\n", + "آپ کو ترقی کی تلاش ہے؟ تو ہماری ٹیم کا حصہ بنیں! 🚀 ہم ہمیشہ نئے امریکی جاموں کی تلاش میں ہیں۔ آپ کو ٹریننگ، ترقی کے مواقع، اور سہولیات فراہم کریں گے۔\n", + "\n", + "📩 **درخواست دینے کے مرحلے:** ہماری ویب سائٹ پر جائیں، کیونکہ ہم جانتے ہیں کہ آپ کا خواب آپ کے قریب ہے!\n", + "\n", + "---\n", + "\n", + "## Contact Us - ہم سے رابطہ کریں:\n", + "\n", + "**پتہ:** نیٹ ورک کی دنیا \n", + "**فون:** 123-456-789 \n", + "**ایمیل:** info@openai.com \n", + "**سوشل میڈیا:** [فیس بک](#) | [ٹویٹر](#) | [لنکڈ ان](#) \n", + "**ویب سائٹ:** [openai.com](#)\n", + "\n", + "---\n", + "\n", + "## Closing Note - اختتامی نوٹ:\n", + "\n", + "ہماری کمپنی اوپن اے آئی کی طرف سے ایک شکریہ! اے آئی کی دنیا میں قدم رکھنے کا وقت آ گیا ہے! \n", + "\n", + "🖱️ **آج ہی رابطہ کریں یا ہماری ویب سائٹ کا دورہ کریں!**\n", + "\n", + "\n", + "**نوٹ:** واقعی ویب سائٹ کے مخصوص روابط، ای میل اور نمبر تخلیقی مقصد کے لئے ہیں۔ اس کو حقیقی معلومات کے ساتھ تبدیل کیا جا سکتا ہے۔" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "multi_lingual_stream_brochure(\"OpenAI\", \"https://openai.com/\", \"Urdu\", \"humorous, entertaining, jokey\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b6f1e8d9", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4fb86dc6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm_env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 1c7c9e35e02b6322a9681f9fcf9b120235887cda Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Sun, 22 Dec 2024 01:20:22 +1100 Subject: [PATCH 21/26] Day 5 Challend one with multilingual aloing with multitone --- .../day5-multi-lingual-desire-format.ipynb | 3112 +---------------- 1 file changed, 24 insertions(+), 3088 deletions(-) diff --git a/week1/community-contributions/day5-multi-lingual-desire-format.ipynb b/week1/community-contributions/day5-multi-lingual-desire-format.ipynb index 3f1b3ad..b17c402 100644 --- a/week1/community-contributions/day5-multi-lingual-desire-format.ipynb +++ b/week1/community-contributions/day5-multi-lingual-desire-format.ipynb @@ -42,18 +42,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "API key looks good so far\n" - ] - } - ], + "outputs": [], "source": [ "# Initialize and constants\n", "\n", @@ -109,46 +101,10 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "e30d8128-933b-44cc-81c8-ab4c9d86589a", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['https://edwarddonner.com/',\n", - " 'https://edwarddonner.com/outsmart/',\n", - " 'https://edwarddonner.com/about-me-and-about-nebula/',\n", - " 'https://edwarddonner.com/posts/',\n", - " 'https://edwarddonner.com/',\n", - " 'https://news.ycombinator.com',\n", - " 'https://nebula.io/?utm_source=ed&utm_medium=referral',\n", - " 'https://www.prnewswire.com/news-releases/wynden-stark-group-acquires-nyc-venture-backed-tech-startup-untapt-301269512.html',\n", - " 'https://patents.google.com/patent/US20210049536A1/',\n", - " 'https://www.linkedin.com/in/eddonner/',\n", - " 'https://edwarddonner.com/2024/11/13/llm-engineering-resources/',\n", - " 'https://edwarddonner.com/2024/11/13/llm-engineering-resources/',\n", - " 'https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/',\n", - " 'https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/',\n", - " 'https://edwarddonner.com/2024/08/06/outsmart/',\n", - " 'https://edwarddonner.com/2024/08/06/outsmart/',\n", - " 'https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/',\n", - " 'https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/',\n", - " 'https://edwarddonner.com/',\n", - " 'https://edwarddonner.com/outsmart/',\n", - " 'https://edwarddonner.com/about-me-and-about-nebula/',\n", - " 'https://edwarddonner.com/posts/',\n", - " 'mailto:hello@mygroovydomain.com',\n", - " 'https://www.linkedin.com/in/eddonner/',\n", - " 'https://twitter.com/edwarddonner',\n", - " 'https://www.facebook.com/edward.donner.52']" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "ed = Website(\"https://edwarddonner.com\")\n", "ed.links" @@ -193,26 +149,10 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "b97e4068-97ed-4120-beae-c42105e4d59a", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\n", - "You should respond in JSON as in this example:\n", - "{\n", - " \"links\": [\n", - " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", - " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", - " ]\n", - "}\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(link_system_prompt)" ] @@ -235,45 +175,10 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "6bcbfa78-6395-4685-b92c-22d592050fd7", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Here is the list of links on the website of https://edwarddonner.com - please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\n", - "Links (some might be relative links):\n", - "https://edwarddonner.com/\n", - "https://edwarddonner.com/outsmart/\n", - "https://edwarddonner.com/about-me-and-about-nebula/\n", - "https://edwarddonner.com/posts/\n", - "https://edwarddonner.com/\n", - "https://news.ycombinator.com\n", - "https://nebula.io/?utm_source=ed&utm_medium=referral\n", - "https://www.prnewswire.com/news-releases/wynden-stark-group-acquires-nyc-venture-backed-tech-startup-untapt-301269512.html\n", - "https://patents.google.com/patent/US20210049536A1/\n", - "https://www.linkedin.com/in/eddonner/\n", - "https://edwarddonner.com/2024/11/13/llm-engineering-resources/\n", - "https://edwarddonner.com/2024/11/13/llm-engineering-resources/\n", - "https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/\n", - "https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/\n", - "https://edwarddonner.com/2024/08/06/outsmart/\n", - "https://edwarddonner.com/2024/08/06/outsmart/\n", - "https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/\n", - "https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/\n", - "https://edwarddonner.com/\n", - "https://edwarddonner.com/outsmart/\n", - "https://edwarddonner.com/about-me-and-about-nebula/\n", - "https://edwarddonner.com/posts/\n", - "mailto:hello@mygroovydomain.com\n", - "https://www.linkedin.com/in/eddonner/\n", - "https://twitter.com/edwarddonner\n", - "https://www.facebook.com/edward.donner.52\n" - ] - } - ], + "outputs": [], "source": [ "print(get_links_user_prompt(ed))" ] @@ -301,100 +206,10 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['/',\n", - " '/models',\n", - " '/datasets',\n", - " '/spaces',\n", - " '/posts',\n", - " '/docs',\n", - " '/enterprise',\n", - " '/pricing',\n", - " '/login',\n", - " '/join',\n", - " '/IamCreateAI/Ruyi-Mini-7B',\n", - " '/Datou1111/shou_xin',\n", - " '/answerdotai/ModernBERT-base',\n", - " '/meta-llama/Llama-3.3-70B-Instruct',\n", - " '/tencent/HunyuanVideo',\n", - " '/models',\n", - " '/spaces/JeffreyXiang/TRELLIS',\n", - " '/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute',\n", - " '/spaces/multimodalart/flux-style-shaping',\n", - " '/spaces/Kwai-Kolors/Kolors-Virtual-Try-On',\n", - " '/spaces/lllyasviel/iclight-v2',\n", - " '/spaces',\n", - " '/datasets/fka/awesome-chatgpt-prompts',\n", - " '/datasets/O1-OPEN/OpenO1-SFT',\n", - " '/datasets/HuggingFaceFW/fineweb-2',\n", - " '/datasets/HuggingFaceTB/finemath',\n", - " '/datasets/amphora/QwQ-LongCoT-130K',\n", - " '/datasets',\n", - " '/join',\n", - " '/pricing#endpoints',\n", - " '/pricing#spaces',\n", - " '/pricing',\n", - " '/enterprise',\n", - " '/enterprise',\n", - " '/enterprise',\n", - " '/enterprise',\n", - " '/enterprise',\n", - " '/enterprise',\n", - " '/enterprise',\n", - " '/allenai',\n", - " '/facebook',\n", - " '/amazon',\n", - " '/google',\n", - " '/Intel',\n", - " '/microsoft',\n", - " '/grammarly',\n", - " '/Writer',\n", - " '/docs/transformers',\n", - " '/docs/diffusers',\n", - " '/docs/safetensors',\n", - " '/docs/huggingface_hub',\n", - " '/docs/tokenizers',\n", - " '/docs/peft',\n", - " '/docs/transformers.js',\n", - " '/docs/timm',\n", - " '/docs/trl',\n", - " '/docs/datasets',\n", - " '/docs/text-generation-inference',\n", - " '/docs/accelerate',\n", - " '/models',\n", - " '/datasets',\n", - " '/spaces',\n", - " '/tasks',\n", - " 'https://ui.endpoints.huggingface.co',\n", - " '/chat',\n", - " '/huggingface',\n", - " '/brand',\n", - " '/terms-of-service',\n", - " '/privacy',\n", - " 'https://apply.workable.com/huggingface/',\n", - " 'mailto:press@huggingface.co',\n", - " '/learn',\n", - " '/docs',\n", - " '/blog',\n", - " 'https://discuss.huggingface.co',\n", - " 'https://status.huggingface.co/',\n", - " 'https://github.com/huggingface',\n", - " 'https://twitter.com/huggingface',\n", - " 'https://www.linkedin.com/company/huggingface/',\n", - " '/join/discord']" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", "\n", @@ -404,28 +219,10 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'links': [{'type': 'homepage', 'url': 'https://huggingface.co/'},\n", - " {'type': 'about page', 'url': 'https://huggingface.co/huggingface'},\n", - " {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'},\n", - " {'type': 'blog', 'url': 'https://huggingface.co/blog'},\n", - " {'type': 'github page', 'url': 'https://github.com/huggingface'},\n", - " {'type': 'twitter page', 'url': 'https://twitter.com/huggingface'},\n", - " {'type': 'linkedin page',\n", - " 'url': 'https://www.linkedin.com/company/huggingface/'}]}" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "get_links(\"https://huggingface.co\")" ] @@ -460,2181 +257,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "5099bd14-076d-4745-baf3-dac08d8e5ab2", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found links: {'links': [{'type': 'about page', 'url': 'https://huggingface.co/about'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'company page', 'url': 'https://huggingface.co/huggingface'}, {'type': 'community discussions', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n", - "Landing page:\n", - "Webpage Title:\n", - "Hugging Face – The AI community building the future.\n", - "Webpage Contents:\n", - "Hugging Face\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Posts\n", - "Docs\n", - "Enterprise\n", - "Pricing\n", - "Log In\n", - "Sign Up\n", - "The AI community building the future.\n", - "The platform where the machine learning community collaborates on models, datasets, and applications.\n", - "Trending on\n", - "this week\n", - "Models\n", - "IamCreateAI/Ruyi-Mini-7B\n", - "Updated\n", - "4 days ago\n", - "•\n", - "8.17k\n", - "•\n", - "352\n", - "Datou1111/shou_xin\n", - "Updated\n", - "12 days ago\n", - "•\n", - "28.3k\n", - "•\n", - "672\n", - "answerdotai/ModernBERT-base\n", - "Updated\n", - "1 day ago\n", - "•\n", - "6.24k\n", - "•\n", - "236\n", - "meta-llama/Llama-3.3-70B-Instruct\n", - "Updated\n", - "11 days ago\n", - "•\n", - "236k\n", - "•\n", - "1.21k\n", - "tencent/HunyuanVideo\n", - "Updated\n", - "3 days ago\n", - "•\n", - "6.01k\n", - "•\n", - "1.2k\n", - "Browse 400k+ models\n", - "Spaces\n", - "Running\n", - "on\n", - "Zero\n", - "1.79k\n", - "🏢\n", - "TRELLIS\n", - "Scalable and Versatile 3D Generation from images\n", - "Running\n", - "306\n", - "📝\n", - "Scaling test-time compute\n", - "Running\n", - "on\n", - "Zero\n", - "470\n", - "🚀\n", - "Flux Style Shaping\n", - "Optical illusions and style transfer with FLUX\n", - "Running\n", - "on\n", - "CPU Upgrade\n", - "6.11k\n", - "👕\n", - "Kolors Virtual Try-On\n", - "Running\n", - "on\n", - "Zero\n", - "965\n", - "📈\n", - "IC Light V2\n", - "Browse 150k+ applications\n", - "Datasets\n", - "fka/awesome-chatgpt-prompts\n", - "Updated\n", - "Sep 3\n", - "•\n", - "6.83k\n", - "•\n", - "6.58k\n", - "O1-OPEN/OpenO1-SFT\n", - "Updated\n", - "4 days ago\n", - "•\n", - "1.86k\n", - "•\n", - "234\n", - "HuggingFaceFW/fineweb-2\n", - "Updated\n", - "13 days ago\n", - "•\n", - "77.7k\n", - "•\n", - "342\n", - "HuggingFaceTB/finemath\n", - "Updated\n", - "1 day ago\n", - "•\n", - "1.86k\n", - "•\n", - "43\n", - "amphora/QwQ-LongCoT-130K\n", - "Updated\n", - "16 days ago\n", - "•\n", - "1.34k\n", - "•\n", - "85\n", - "Browse 100k+ datasets\n", - "The Home of Machine Learning\n", - "Create, discover and collaborate on ML better.\n", - "The collaboration platform\n", - "Host and collaborate on unlimited public models, datasets and applications.\n", - "Move faster\n", - "With the HF Open source stack.\n", - "Explore all modalities\n", - "Text, image, video, audio or even 3D.\n", - "Build your portfolio\n", - "Share your work with the world and build your ML profile.\n", - "Sign Up\n", - "Accelerate your ML\n", - "We provide paid Compute and Enterprise solutions.\n", - "Compute\n", - "Deploy on optimized\n", - "Inference Endpoints\n", - "or update your\n", - "Spaces applications\n", - "to a GPU in a few clicks.\n", - "View pricing\n", - "Starting at $0.60/hour for GPU\n", - "Enterprise\n", - "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", - "\t\t\tdedicated support.\n", - "Getting started\n", - "Starting at $20/user/month\n", - "Single Sign-On\n", - "Regions\n", - "Priority Support\n", - "Audit Logs\n", - "Resource Groups\n", - "Private Datasets Viewer\n", - "More than 50,000 organizations are using Hugging Face\n", - "Ai2\n", - "Enterprise\n", - "non-profit\n", - "•\n", - "366 models\n", - "•\n", - "1.76k followers\n", - "AI at Meta\n", - "Enterprise\n", - "company\n", - "•\n", - "2.05k models\n", - "•\n", - "3.83k followers\n", - "Amazon Web Services\n", - "company\n", - "•\n", - "21 models\n", - "•\n", - "2.45k followers\n", - "Google\n", - "company\n", - "•\n", - "911 models\n", - "•\n", - "5.76k followers\n", - "Intel\n", - "company\n", - "•\n", - "217 models\n", - "•\n", - "2.07k followers\n", - "Microsoft\n", - "company\n", - "•\n", - "351 models\n", - "•\n", - "6.29k followers\n", - "Grammarly\n", - "company\n", - "•\n", - "10 models\n", - "•\n", - "102 followers\n", - "Writer\n", - "Enterprise\n", - "company\n", - "•\n", - "17 models\n", - "•\n", - "186 followers\n", - "Our Open Source\n", - "We are building the foundation of ML tooling with the community.\n", - "Transformers\n", - "136,571\n", - "State-of-the-art ML for Pytorch, TensorFlow, and JAX.\n", - "Diffusers\n", - "26,740\n", - "State-of-the-art diffusion models for image and audio generation in PyTorch.\n", - "Safetensors\n", - "2,960\n", - "Simple, safe way to store and distribute neural networks weights safely and quickly.\n", - "Hub Python Library\n", - "2,177\n", - "Client library for the HF Hub: manage repositories from your Python runtime.\n", - "Tokenizers\n", - "9,165\n", - "Fast tokenizers, optimized for both research and production.\n", - "PEFT\n", - "16,767\n", - "Parameter efficient finetuning methods for large models.\n", - "Transformers.js\n", - "12,421\n", - "State-of-the-art Machine Learning for the web. Run Transformers directly in your browser, with no need for a server.\n", - "timm\n", - "32,668\n", - "State-of-the-art computer vision models, layers, optimizers, training/evaluation, and utilities.\n", - "TRL\n", - "10,382\n", - "Train transformer language models with reinforcement learning.\n", - "Datasets\n", - "19,378\n", - "Access and share datasets for computer vision, audio, and NLP tasks.\n", - "Text Generation Inference\n", - "9,484\n", - "Toolkit to serve Large Language Models.\n", - "Accelerate\n", - "8,082\n", - "Easily train and use PyTorch models with multi-GPU, TPU, mixed-precision.\n", - "System theme\n", - "Website\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Tasks\n", - "Inference Endpoints\n", - "HuggingChat\n", - "Company\n", - "About\n", - "Brand assets\n", - "Terms of service\n", - "Privacy\n", - "Jobs\n", - "Press\n", - "Resources\n", - "Learn\n", - "Documentation\n", - "Blog\n", - "Forum\n", - "Service Status\n", - "Social\n", - "GitHub\n", - "Twitter\n", - "LinkedIn\n", - "Discord\n", - "\n", - "\n", - "\n", - "about page\n", - "Webpage Title:\n", - "about (Sergei)\n", - "Webpage Contents:\n", - "Hugging Face\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Posts\n", - "Docs\n", - "Enterprise\n", - "Pricing\n", - "Log In\n", - "Sign Up\n", - "Sergei\n", - "about\n", - "Follow\n", - "Kalaipriya's profile picture\n", - "selvivincent's profile picture\n", - "Renumathi's profile picture\n", - "3\n", - "\t\t\t\t\tfollowers\n", - "·\n", - "0 following\n", - "AI & ML interests\n", - "None yet\n", - "Organizations\n", - "None yet\n", - "models\n", - "None public yet\n", - "datasets\n", - "None public yet\n", - "System theme\n", - "Company\n", - "TOS\n", - "Privacy\n", - "About\n", - "Jobs\n", - "Website\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Pricing\n", - "Docs\n", - "\n", - "\n", - "\n", - "careers page\n", - "Webpage Title:\n", - "Hugging Face - Current Openings\n", - "Webpage Contents:\n", - "\n", - "\n", - "\n", - "\n", - "blog page\n", - "Webpage Title:\n", - "Hugging Face – Blog\n", - "Webpage Contents:\n", - "Hugging Face\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Posts\n", - "Docs\n", - "Enterprise\n", - "Pricing\n", - "Log In\n", - "Sign Up\n", - "Blog, Articles, and discussions\n", - "New Article\n", - "Everything\n", - "community\n", - "guide\n", - "open source collab\n", - "partnerships\n", - "research\n", - "NLP\n", - "Audio\n", - "CV\n", - "RL\n", - "ethics\n", - "Diffusion\n", - "Game Development\n", - "RLHF\n", - "Leaderboard\n", - "Case Studies\n", - "Evaluating Audio Reasoning with Big Bench Audio\n", - "By\n", - "mhillsmith\n", - "December 20, 2024\n", - "guest\n", - "•\n", - "8\n", - "Community Articles\n", - "view all\n", - "20+ Free and Paid Digital Marketing Strategies to Automate Repetitive Tasks\n", - "By\n", - "Markets\n", - "•\n", - "about 3 hours ago\n", - "•\n", - "1\n", - "🧠 Tags generation dataset\n", - "By\n", - "zino36\n", - "•\n", - "about 16 hours ago\n", - "•\n", - "1\n", - "AI Agents in Action: Managing GitHub Issues with KaibanJS\n", - "By\n", - "darielnoel\n", - "•\n", - "1 day ago\n", - "**Intelligence Potentiation: An Evolutionary Perspective on AI Agent Designs**\n", - "By\n", - "KnutJaegersberg\n", - "•\n", - "1 day ago\n", - "•\n", - "3\n", - "MINERVA: A Multi-Agent LLM System for Digital Scam Protection\n", - "By\n", - "dcarpintero\n", - "•\n", - "2 days ago\n", - "Mastering Iterative Prompting for Optimized AI Code Generation\n", - "By\n", - "luigi12345\n", - "•\n", - "3 days ago\n", - "•\n", - "1\n", - "SILMA RAGQA V1.0: A Comprehensive Benchmark for Evaluating LLMs on RAG QA Use-Cases\n", - "By\n", - "karimouda\n", - "•\n", - "3 days ago\n", - "•\n", - "1\n", - "FuseChat-3.0: Preference Optimization for Implicit Model Fusion\n", - "By\n", - "Wanfq\n", - "•\n", - "3 days ago\n", - "•\n", - "2\n", - "Tutorial: Quantizing Llama 3+ Models for Efficient Deployment\n", - "By\n", - "theeseus-ai\n", - "•\n", - "6 days ago\n", - "•\n", - "3\n", - "How to Expand Your AI Music Generations of 30 Seconds to Several Minutes\n", - "By\n", - "theeseus-ai\n", - "•\n", - "8 days ago\n", - "•\n", - "1\n", - "🇪🇺✍️ EU AI Act: Systemic Risks in the First CoP Draft Comments ✍️🇪🇺\n", - "By\n", - "yjernite\n", - "•\n", - "9 days ago\n", - "•\n", - "11\n", - "Building an AI-powered search engine from scratch\n", - "By\n", - "as-cle-bert\n", - "•\n", - "10 days ago\n", - "•\n", - "8\n", - "MotionLCM-V2: Improved Compression Rate for Multi-Latent-Token Diffusion\n", - "By\n", - "wxDai\n", - "•\n", - "10 days ago\n", - "•\n", - "12\n", - "RLHF 101: A Technical Dive into RLHF\n", - "By\n", - "GitBag\n", - "•\n", - "10 days ago\n", - "•\n", - "4\n", - "[Talk Arena](https://talkarena.org)\n", - "By\n", - "WillHeld\n", - "•\n", - "11 days ago\n", - "•\n", - "1\n", - "Multimodal RAG with Colpali, Milvus and VLMs\n", - "By\n", - "saumitras\n", - "•\n", - "11 days ago\n", - "•\n", - "2\n", - "In Honour of This Year's NeurIPs Test of Time Paper Awardees\n", - "By\n", - "Jaward\n", - "•\n", - "11 days ago\n", - "•\n", - "2\n", - "Power steering: Squeeze massive power from small LLMs\n", - "By\n", - "ucheog\n", - "•\n", - "12 days ago\n", - "•\n", - "4\n", - "Exploring the Power of KaibanJS v0.11.0 🚀\n", - "By\n", - "darielnoel\n", - "•\n", - "12 days ago\n", - "•\n", - "1\n", - "**Building a Custom Retrieval System with Motoko and Node.js**\n", - "By\n", - "theeseus-ai\n", - "•\n", - "12 days ago\n", - "•\n", - "1\n", - "Finally, a Replacement for BERT: Introducing ModernBERT\n", - "By\n", - "bwarner\n", - "December 19, 2024\n", - "guest\n", - "•\n", - "289\n", - "Bamba: Inference-Efficient Hybrid Mamba2 Model\n", - "By\n", - "Linsong-C\n", - "December 18, 2024\n", - "guest\n", - "•\n", - "30\n", - "Welcome the Falcon 3 Family of Open Models!\n", - "By\n", - "FalconLLM\n", - "December 17, 2024\n", - "•\n", - "98\n", - "Benchmarking Language Model Performance on 5th Gen Xeon at GCP\n", - "By\n", - "MatrixYao\n", - "December 17, 2024\n", - "•\n", - "2\n", - "Introducing the Synthetic Data Generator - Build Datasets with Natural Language\n", - "By\n", - "davidberenstein1957\n", - "December 16, 2024\n", - "•\n", - "55\n", - "LeMaterial: an open source initiative to accelerate materials discovery and research\n", - "By\n", - "AlexDuvalinho\n", - "December 10, 2024\n", - "guest\n", - "•\n", - "30\n", - "Hugging Face models in Amazon Bedrock\n", - "By\n", - "pagezyhf\n", - "December 9, 2024\n", - "•\n", - "8\n", - "Open Preference Dataset for Text-to-Image Generation by the 🤗 Community\n", - "By\n", - "davidberenstein1957\n", - "December 9, 2024\n", - "•\n", - "47\n", - "Welcome PaliGemma 2 – New vision language models by Google\n", - "By\n", - "merve\n", - "December 5, 2024\n", - "•\n", - "117\n", - "“How good are LLMs at fixing their mistakes? A chatbot arena experiment with Keras and TPUs\n", - "By\n", - "martin-gorner\n", - "December 5, 2024\n", - "•\n", - "12\n", - "Rethinking LLM Evaluation with 3C3H: AraGen Benchmark and Leaderboard\n", - "By\n", - "alielfilali01\n", - "December 4, 2024\n", - "guest\n", - "•\n", - "26\n", - "Investing in Performance: Fine-tune small models with LLM insights - a CFM case study\n", - "By\n", - "oahouzi\n", - "December 3, 2024\n", - "•\n", - "25\n", - "Rearchitecting Hugging Face Uploads and Downloads\n", - "By\n", - "port8080\n", - "November 26, 2024\n", - "•\n", - "37\n", - "SmolVLM - small yet mighty Vision Language Model\n", - "By\n", - "andito\n", - "November 26, 2024\n", - "•\n", - "142\n", - "Previous\n", - "1\n", - "2\n", - "3\n", - "...\n", - "36\n", - "Next\n", - "Community Articles\n", - "view all\n", - "20+ Free and Paid Digital Marketing Strategies to Automate Repetitive Tasks\n", - "By\n", - "Markets\n", - "•\n", - "about 3 hours ago\n", - "•\n", - "1\n", - "🧠 Tags generation dataset\n", - "By\n", - "zino36\n", - "•\n", - "about 16 hours ago\n", - "•\n", - "1\n", - "AI Agents in Action: Managing GitHub Issues with KaibanJS\n", - "By\n", - "darielnoel\n", - "•\n", - "1 day ago\n", - "**Intelligence Potentiation: An Evolutionary Perspective on AI Agent Designs**\n", - "By\n", - "KnutJaegersberg\n", - "•\n", - "1 day ago\n", - "•\n", - "3\n", - "MINERVA: A Multi-Agent LLM System for Digital Scam Protection\n", - "By\n", - "dcarpintero\n", - "•\n", - "2 days ago\n", - "Mastering Iterative Prompting for Optimized AI Code Generation\n", - "By\n", - "luigi12345\n", - "•\n", - "3 days ago\n", - "•\n", - "1\n", - "SILMA RAGQA V1.0: A Comprehensive Benchmark for Evaluating LLMs on RAG QA Use-Cases\n", - "By\n", - "karimouda\n", - "•\n", - "3 days ago\n", - "•\n", - "1\n", - "FuseChat-3.0: Preference Optimization for Implicit Model Fusion\n", - "By\n", - "Wanfq\n", - "•\n", - "3 days ago\n", - "•\n", - "2\n", - "Tutorial: Quantizing Llama 3+ Models for Efficient Deployment\n", - "By\n", - "theeseus-ai\n", - "•\n", - "6 days ago\n", - "•\n", - "3\n", - "How to Expand Your AI Music Generations of 30 Seconds to Several Minutes\n", - "By\n", - "theeseus-ai\n", - "•\n", - "8 days ago\n", - "•\n", - "1\n", - "🇪🇺✍️ EU AI Act: Systemic Risks in the First CoP Draft Comments ✍️🇪🇺\n", - "By\n", - "yjernite\n", - "•\n", - "9 days ago\n", - "•\n", - "11\n", - "Building an AI-powered search engine from scratch\n", - "By\n", - "as-cle-bert\n", - "•\n", - "10 days ago\n", - "•\n", - "8\n", - "MotionLCM-V2: Improved Compression Rate for Multi-Latent-Token Diffusion\n", - "By\n", - "wxDai\n", - "•\n", - "10 days ago\n", - "•\n", - "12\n", - "RLHF 101: A Technical Dive into RLHF\n", - "By\n", - "GitBag\n", - "•\n", - "10 days ago\n", - "•\n", - "4\n", - "[Talk Arena](https://talkarena.org)\n", - "By\n", - "WillHeld\n", - "•\n", - "11 days ago\n", - "•\n", - "1\n", - "Multimodal RAG with Colpali, Milvus and VLMs\n", - "By\n", - "saumitras\n", - "•\n", - "11 days ago\n", - "•\n", - "2\n", - "In Honour of This Year's NeurIPs Test of Time Paper Awardees\n", - "By\n", - "Jaward\n", - "•\n", - "11 days ago\n", - "•\n", - "2\n", - "Power steering: Squeeze massive power from small LLMs\n", - "By\n", - "ucheog\n", - "•\n", - "12 days ago\n", - "•\n", - "4\n", - "Exploring the Power of KaibanJS v0.11.0 🚀\n", - "By\n", - "darielnoel\n", - "•\n", - "12 days ago\n", - "•\n", - "1\n", - "**Building a Custom Retrieval System with Motoko and Node.js**\n", - "By\n", - "theeseus-ai\n", - "•\n", - "12 days ago\n", - "•\n", - "1\n", - "System theme\n", - "Company\n", - "TOS\n", - "Privacy\n", - "About\n", - "Jobs\n", - "Website\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Pricing\n", - "Docs\n", - "\n", - "\n", - "\n", - "company page\n", - "Webpage Title:\n", - "huggingface (Hugging Face)\n", - "Webpage Contents:\n", - "Hugging Face\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Posts\n", - "Docs\n", - "Enterprise\n", - "Pricing\n", - "Log In\n", - "Sign Up\n", - "Hugging Face\n", - "Enterprise\n", - "company\n", - "Verified\n", - "https://huggingface.co\n", - "huggingface\n", - "huggingface\n", - "Activity Feed\n", - "Follow\n", - "8,542\n", - "AI & ML interests\n", - "The AI community building the future.\n", - "Recent Activity\n", - "IAMJB\n", - "updated\n", - "a dataset\n", - "9 minutes ago\n", - "huggingface/community-science-paper-v2\n", - "IAMJB\n", - "updated\n", - "a dataset\n", - "about 6 hours ago\n", - "huggingface/paper-central-data\n", - "fdaudens\n", - "updated\n", - "a Space\n", - "about 19 hours ago\n", - "huggingface/open-source-ai-year-in-review-2024\n", - "View all activity\n", - "Team members\n", - "224\n", - "+190\n", - "+177\n", - "+156\n", - "+146\n", - "+126\n", - "Organization Card\n", - "Community\n", - "About org cards\n", - "👋 Hi!\n", - "We are on a mission to democratize\n", - "good\n", - "machine learning, one commit at a time.\n", - "If that sounds like something you should be doing, why don't you\n", - "join us\n", - "!\n", - "For press enquiries, you can\n", - "✉️ contact our team here\n", - ".\n", - "Collections\n", - "1\n", - "DistilBERT release\n", - "Original DistilBERT model, checkpoints obtained from using teacher-student learning from the original BERT checkpoints.\n", - "distilbert/distilbert-base-cased\n", - "Fill-Mask\n", - "•\n", - "Updated\n", - "May 6\n", - "•\n", - "358k\n", - "•\n", - "35\n", - "distilbert/distilbert-base-uncased\n", - "Fill-Mask\n", - "•\n", - "Updated\n", - "May 6\n", - "•\n", - "14.8M\n", - "•\n", - "577\n", - "distilbert/distilbert-base-multilingual-cased\n", - "Fill-Mask\n", - "•\n", - "Updated\n", - "May 6\n", - "•\n", - "472k\n", - "•\n", - "148\n", - "distilbert/distilbert-base-uncased-finetuned-sst-2-english\n", - "Text Classification\n", - "•\n", - "Updated\n", - "Dec 19, 2023\n", - "•\n", - "6.96M\n", - "•\n", - "•\n", - "645\n", - "spaces\n", - "23\n", - "Sort: \n", - "\t\tRecently updated\n", - "pinned\n", - "Running\n", - "52\n", - "📈\n", - "Number Tokenization Blog\n", - "Running\n", - "395\n", - "😻\n", - "Open Source Ai Year In Review 2024\n", - "What happened in open-source AI this year, and what’s next?\n", - "Running\n", - "42\n", - "🔋\n", - "Inference Playground\n", - "Running\n", - "196\n", - "⚡\n", - "paper-central\n", - "Running\n", - "on\n", - "TPU v5e\n", - "6\n", - "💬\n", - "Keras Chatbot Battle\n", - "Running\n", - "101\n", - "⚡\n", - "Modelcard Creator\n", - "Expand 23\n", - "\t\t\t\t\t\t\tspaces\n", - "models\n", - "18\n", - "Sort: \n", - "\t\tRecently updated\n", - "huggingface/test-gating-group-2\n", - "Updated\n", - "4 days ago\n", - "huggingface/test-gating-group-1\n", - "Updated\n", - "4 days ago\n", - "huggingface/timesfm-tourism-monthly\n", - "Updated\n", - "12 days ago\n", - "•\n", - "29\n", - "•\n", - "1\n", - "huggingface/CodeBERTa-language-id\n", - "Text Classification\n", - "•\n", - "Updated\n", - "Mar 29\n", - "•\n", - "1.14k\n", - "•\n", - "54\n", - "huggingface/falcon-40b-gptq\n", - "Text Generation\n", - "•\n", - "Updated\n", - "Jun 14, 2023\n", - "•\n", - "19\n", - "•\n", - "12\n", - "huggingface/autoformer-tourism-monthly\n", - "Updated\n", - "May 24, 2023\n", - "•\n", - "1.5k\n", - "•\n", - "9\n", - "huggingface/distilbert-base-uncased-finetuned-mnli\n", - "Text Classification\n", - "•\n", - "Updated\n", - "Mar 22, 2023\n", - "•\n", - "1.37k\n", - "•\n", - "2\n", - "huggingface/informer-tourism-monthly\n", - "Updated\n", - "Feb 24, 2023\n", - "•\n", - "1.12k\n", - "•\n", - "5\n", - "huggingface/time-series-transformer-tourism-monthly\n", - "Updated\n", - "Feb 23, 2023\n", - "•\n", - "2.16k\n", - "•\n", - "18\n", - "huggingface/the-no-branch-repo\n", - "Text-to-Image\n", - "•\n", - "Updated\n", - "Feb 10, 2023\n", - "•\n", - "7\n", - "•\n", - "3\n", - "Expand 18\n", - "\t\t\t\t\t\t\tmodels\n", - "datasets\n", - "31\n", - "Sort: \n", - "\t\tRecently updated\n", - "huggingface/community-science-paper-v2\n", - "Viewer\n", - "•\n", - "Updated\n", - "9 minutes ago\n", - "•\n", - "5.03k\n", - "•\n", - "404\n", - "•\n", - "7\n", - "huggingface/paper-central-data\n", - "Viewer\n", - "•\n", - "Updated\n", - "about 6 hours ago\n", - "•\n", - "119k\n", - "•\n", - "553\n", - "•\n", - "8\n", - "huggingface/documentation-images\n", - "Viewer\n", - "•\n", - "Updated\n", - "1 day ago\n", - "•\n", - "44\n", - "•\n", - "2.43M\n", - "•\n", - "43\n", - "huggingface/transformers-metadata\n", - "Viewer\n", - "•\n", - "Updated\n", - "2 days ago\n", - "•\n", - "1.52k\n", - "•\n", - "559\n", - "•\n", - "14\n", - "huggingface/diffusers-metadata\n", - "Viewer\n", - "•\n", - "Updated\n", - "2 days ago\n", - "•\n", - "62\n", - "•\n", - "442\n", - "•\n", - "4\n", - "huggingface/policy-docs\n", - "Updated\n", - "3 days ago\n", - "•\n", - "898\n", - "•\n", - "6\n", - "huggingface/my-distiset-3f5a230e\n", - "Updated\n", - "30 days ago\n", - "•\n", - "17\n", - "huggingface/cookbook-images\n", - "Viewer\n", - "•\n", - "Updated\n", - "Nov 14\n", - "•\n", - "1\n", - "•\n", - "40.1k\n", - "•\n", - "6\n", - "huggingface/vllm-metadata\n", - "Updated\n", - "Oct 8\n", - "•\n", - "12\n", - "huggingface/paper-central-data-2\n", - "Viewer\n", - "•\n", - "Updated\n", - "Oct 4\n", - "•\n", - "58.3k\n", - "•\n", - "68\n", - "•\n", - "2\n", - "Expand 31\n", - "\t\t\t\t\t\t\tdatasets\n", - "System theme\n", - "Company\n", - "TOS\n", - "Privacy\n", - "About\n", - "Jobs\n", - "Website\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Pricing\n", - "Docs\n", - "\n", - "\n", - "\n", - "community discussions\n", - "Webpage Title:\n", - "Hugging Face Forums - Hugging Face Community Discussion\n", - "Webpage Contents:\n", - "Loading\n", - "Hugging Face Forums\n", - "Topic\n", - "Replies\n", - "Views\n", - "Activity\n", - "List of `size_categories`\n", - "🤗Datasets\n", - "3\n", - "5\n", - "December 21, 2024\n", - "Feature request - maintain list of favorite hf pages reachable from my hom epage\n", - "Site Feedback\n", - "4\n", - "886\n", - "December 21, 2024\n", - "404 error on carbon emission calculation\n", - "Site Feedback\n", - "1\n", - "7\n", - "December 21, 2024\n", - "Cannot connect gRPC Server Hosted on HuggingFace Spaces\n", - "Spaces\n", - "0\n", - "8\n", - "December 21, 2024\n", - "Hide system prompt or system instruction\n", - "Beginners\n", - "3\n", - "15\n", - "December 21, 2024\n", - "ModuleNotFoundError: No module named 'huggingface_hub.inference._types'\n", - "🤗Hub\n", - "0\n", - "5\n", - "December 21, 2024\n", - "Understanding State Management with Gradio and LangGraph\n", - "Beginners\n", - "1\n", - "11\n", - "December 21, 2024\n", - "Dimension problem\n", - "Beginners\n", - "25\n", - "21\n", - "December 21, 2024\n", - "Fine-tuning whisper on sound-event-detection dataset\n", - "🤗Transformers\n", - "0\n", - "4\n", - "December 20, 2024\n", - "Model that can generate both text and image as output\n", - "Research\n", - "4\n", - "42\n", - "December 21, 2024\n", - "Lm studio and Chat ui doesn't work with module\n", - "Beginners\n", - "11\n", - "33\n", - "December 21, 2024\n", - "Inference API Context Window and TOS\n", - "Beginners\n", - "0\n", - "12\n", - "December 20, 2024\n", - "Talkie AI got remove from app store -any alternative ai chat?\n", - "Beginners\n", - "4\n", - "1151\n", - "December 18, 2024\n", - "Inference Text Generation API issue\n", - "Intermediate\n", - "0\n", - "7\n", - "December 20, 2024\n", - "From Pandas Dataframe to Huggingface Dataset\n", - "Beginners\n", - "9\n", - "60459\n", - "December 20, 2024\n", - "\"Load Diffusion Model\" and \"Unet Loader (GGUF)\" null/undefined\n", - "Beginners\n", - "6\n", - "200\n", - "December 20, 2024\n", - "Timeout Issue with DeepSpeed on Multiple GPUs\n", - "DeepSpeed\n", - "0\n", - "8\n", - "December 20, 2024\n", - "Spaces dedicated gpu limit\n", - "Spaces\n", - "1\n", - "14\n", - "December 19, 2024\n", - "Chatbot PDF - using flan-t5-large model\n", - "Models\n", - "0\n", - "7\n", - "December 20, 2024\n", - "Gateway Problem\n", - "Beginners\n", - "0\n", - "8\n", - "December 20, 2024\n", - "RT-DETR attention map dimension - PekingU/rtdetr_r50vd\n", - "Models\n", - "0\n", - "5\n", - "December 20, 2024\n", - "Extending the tokenizer affects model generation\n", - "Intermediate\n", - "3\n", - "9\n", - "December 19, 2024\n", - "How to Ensure Each Process Reads Its Own Dataset and Trains Correctly When Using Trainer?\n", - "🤗Transformers\n", - "0\n", - "5\n", - "December 20, 2024\n", - "Can't save the tensorflow model of nvidia/mit-b5\n", - "Intermediate\n", - "3\n", - "127\n", - "December 19, 2024\n", - "# Audio course Unit 4. sample code not working. Can anyone check for me? Thanks\n", - "Course\n", - "0\n", - "6\n", - "December 20, 2024\n", - "Host Models on Hugging face and Perform Inference on Hugging Face Infrastructure\n", - "Beginners\n", - "0\n", - "6\n", - "December 20, 2024\n", - "Torchrun, trainer, dataset setup\n", - "Intermediate\n", - "4\n", - "71\n", - "December 20, 2024\n", - "Training fails on multiple GPUs with RuntimeError 'chuck expects at least a 1-dimensional array'\n", - "Beginners\n", - "2\n", - "108\n", - "December 19, 2024\n", - "How do you know whether the model is merged and uploaded?\n", - "Intermediate\n", - "0\n", - "11\n", - "December 20, 2024\n", - "Qwen based AI assistant randomly having an absolute, utter, complete 'mental breakdowns'?? (Inference API)\n", - "🤗Transformers\n", - "2\n", - "23\n", - "December 17, 2024\n", - "next page →\n", - "Home\n", - "Categories\n", - "Guidelines\n", - "Terms of Service\n", - "Privacy Policy\n", - "Powered by\n", - "Discourse\n", - ", best viewed with JavaScript enabled\n", - "\n", - "\n", - "\n", - "GitHub page\n", - "Webpage Title:\n", - "Hugging Face · GitHub\n", - "Webpage Contents:\n", - "Skip to content\n", - "Navigation Menu\n", - "Toggle navigation\n", - "Sign in\n", - "huggingface\n", - "Product\n", - "GitHub Copilot\n", - "Write better code with AI\n", - "Security\n", - "Find and fix vulnerabilities\n", - "Actions\n", - "Automate any workflow\n", - "Codespaces\n", - "Instant dev environments\n", - "Issues\n", - "Plan and track work\n", - "Code Review\n", - "Manage code changes\n", - "Discussions\n", - "Collaborate outside of code\n", - "Code Search\n", - "Find more, search less\n", - "Explore\n", - "All features\n", - "Documentation\n", - "GitHub Skills\n", - "Blog\n", - "Solutions\n", - "By company size\n", - "Enterprises\n", - "Small and medium teams\n", - "Startups\n", - "By use case\n", - "DevSecOps\n", - "DevOps\n", - "CI/CD\n", - "View all use cases\n", - "By industry\n", - "Healthcare\n", - "Financial services\n", - "Manufacturing\n", - "Government\n", - "View all industries\n", - "View all solutions\n", - "Resources\n", - "Topics\n", - "AI\n", - "DevOps\n", - "Security\n", - "Software Development\n", - "View all\n", - "Explore\n", - "Learning Pathways\n", - "White papers, Ebooks, Webinars\n", - "Customer Stories\n", - "Partners\n", - "Executive Insights\n", - "Open Source\n", - "GitHub Sponsors\n", - "Fund open source developers\n", - "The ReadME Project\n", - "GitHub community articles\n", - "Repositories\n", - "Topics\n", - "Trending\n", - "Collections\n", - "Enterprise\n", - "Enterprise platform\n", - "AI-powered developer platform\n", - "Available add-ons\n", - "Advanced Security\n", - "Enterprise-grade security features\n", - "GitHub Copilot\n", - "Enterprise-grade AI features\n", - "Premium Support\n", - "Enterprise-grade 24/7 support\n", - "Pricing\n", - "Search or jump to...\n", - "Search code, repositories, users, issues, pull requests...\n", - "Search\n", - "Clear\n", - "Search syntax tips\n", - "Provide feedback\n", - "We read every piece of feedback, and take your input very seriously.\n", - "Include my email address so I can be contacted\n", - "Cancel\n", - "Submit feedback\n", - "Saved searches\n", - "Use saved searches to filter your results more quickly\n", - "Cancel\n", - "Create saved search\n", - "Sign in\n", - "Sign up\n", - "Reseting focus\n", - "You signed in with another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "You signed out in another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "You switched accounts on another tab or window.\n", - "Reload\n", - "to refresh your session.\n", - "Dismiss alert\n", - "Hugging Face\n", - "The AI community building the future.\n", - "Verified\n", - "We've verified that the organization\n", - "huggingface\n", - "controls the domain:\n", - "huggingface.co\n", - "Learn more about verified organizations\n", - "40.1k\n", - "followers\n", - "NYC + Paris\n", - "https://huggingface.co/\n", - "X\n", - "@huggingface\n", - "Overview\n", - "Repositories\n", - "Projects\n", - "Packages\n", - "People\n", - "Sponsoring\n", - "0\n", - "More\n", - "Overview\n", - "Repositories\n", - "Projects\n", - "Packages\n", - "People\n", - "Sponsoring\n", - "Pinned\n", - "Loading\n", - "transformers\n", - "transformers\n", - "Public\n", - "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", - "Python\n", - "137k\n", - "27.3k\n", - "diffusers\n", - "diffusers\n", - "Public\n", - "🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch and FLAX.\n", - "Python\n", - "26.7k\n", - "5.5k\n", - "datasets\n", - "datasets\n", - "Public\n", - "🤗 The largest hub of ready-to-use datasets for ML models with fast, easy-to-use and efficient data manipulation tools\n", - "Python\n", - "19.4k\n", - "2.7k\n", - "peft\n", - "peft\n", - "Public\n", - "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.\n", - "Python\n", - "16.8k\n", - "1.7k\n", - "accelerate\n", - "accelerate\n", - "Public\n", - "🚀 A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support\n", - "Python\n", - "8.1k\n", - "995\n", - "optimum\n", - "optimum\n", - "Public\n", - "🚀 Accelerate inference and training of 🤗 Transformers, Diffusers, TIMM and Sentence Transformers with easy to use hardware optimization tools\n", - "Python\n", - "2.6k\n", - "486\n", - "Repositories\n", - "Loading\n", - "Type\n", - "Select type\n", - "Forks\n", - "Archived\n", - "Mirrors\n", - "Templates\n", - "Language\n", - "Select language\n", - "All\n", - "C\n", - "C#\n", - "C++\n", - "Cuda\n", - "Dockerfile\n", - "Go\n", - "Handlebars\n", - "HTML\n", - "Java\n", - "JavaScript\n", - "Jupyter Notebook\n", - "Kotlin\n", - "Lua\n", - "MDX\n", - "Mustache\n", - "Nix\n", - "Python\n", - "Rust\n", - "Shell\n", - "Smarty\n", - "Swift\n", - "TypeScript\n", - "Sort\n", - "Select order\n", - "Last updated\n", - "Name\n", - "Stars\n", - "Showing 10 of 275 repositories\n", - "trl\n", - "Public\n", - "Train transformer language models with reinforcement learning.\n", - "huggingface/trl’s past year of commit activity\n", - "Python\n", - "10,382\n", - "Apache-2.0\n", - "1,337\n", - "106\n", - "46\n", - "Updated\n", - "Dec 21, 2024\n", - "transformers.js\n", - "Public\n", - "State-of-the-art Machine Learning for the web. Run 🤗 Transformers directly in your browser, with no need for a server!\n", - "huggingface/transformers.js’s past year of commit activity\n", - "JavaScript\n", - "12,421\n", - "Apache-2.0\n", - "790\n", - "274\n", - "(3 issues need help)\n", - "48\n", - "Updated\n", - "Dec 21, 2024\n", - "diffusers\n", - "Public\n", - "🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch and FLAX.\n", - "huggingface/diffusers’s past year of commit activity\n", - "Python\n", - "26,740\n", - "Apache-2.0\n", - "5,504\n", - "379\n", - "(10 issues need help)\n", - "169\n", - "Updated\n", - "Dec 21, 2024\n", - "text-generation-inference\n", - "Public\n", - "Large Language Model Text Generation Inference\n", - "huggingface/text-generation-inference’s past year of commit activity\n", - "Python\n", - "9,484\n", - "Apache-2.0\n", - "1,106\n", - "152\n", - "21\n", - "Updated\n", - "Dec 21, 2024\n", - "candle\n", - "Public\n", - "Minimalist ML framework for Rust\n", - "huggingface/candle’s past year of commit activity\n", - "Rust\n", - "16,103\n", - "Apache-2.0\n", - "980\n", - "344\n", - "(5 issues need help)\n", - "86\n", - "Updated\n", - "Dec 21, 2024\n", - "autotrain-advanced\n", - "Public\n", - "🤗 AutoTrain Advanced\n", - "huggingface/autotrain-advanced’s past year of commit activity\n", - "Python\n", - "4,157\n", - "Apache-2.0\n", - "505\n", - "16\n", - "2\n", - "Updated\n", - "Dec 21, 2024\n", - "transformers\n", - "Public\n", - "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", - "huggingface/transformers’s past year of commit activity\n", - "Python\n", - "136,571\n", - "Apache-2.0\n", - "27,342\n", - "1,003\n", - "(2 issues need help)\n", - "526\n", - "Updated\n", - "Dec 21, 2024\n", - "lighteval\n", - "Public\n", - "Lighteval is your all-in-one toolkit for evaluating LLMs across multiple backends\n", - "huggingface/lighteval’s past year of commit activity\n", - "Python\n", - "889\n", - "MIT\n", - "109\n", - "62\n", - "(1 issue needs help)\n", - "15\n", - "Updated\n", - "Dec 21, 2024\n", - "hub-docs\n", - "Public\n", - "Docs of the Hugging Face Hub\n", - "huggingface/hub-docs’s past year of commit activity\n", - "Handlebars\n", - "309\n", - "Apache-2.0\n", - "259\n", - "90\n", - "25\n", - "Updated\n", - "Dec 21, 2024\n", - "optimum-habana\n", - "Public\n", - "Easy and lightning fast training of 🤗 Transformers on Habana Gaudi processor (HPU)\n", - "huggingface/optimum-habana’s past year of commit activity\n", - "Python\n", - "162\n", - "Apache-2.0\n", - "219\n", - "11\n", - "(1 issue needs help)\n", - "40\n", - "Updated\n", - "Dec 21, 2024\n", - "View all repositories\n", - "People\n", - "View all\n", - "Top languages\n", - "Python\n", - "Jupyter Notebook\n", - "Rust\n", - "TypeScript\n", - "JavaScript\n", - "Most used topics\n", - "pytorch\n", - "machine-learning\n", - "nlp\n", - "deep-learning\n", - "transformers\n", - "Footer\n", - "© 2024 GitHub, Inc.\n", - "Footer navigation\n", - "Terms\n", - "Privacy\n", - "Security\n", - "Status\n", - "Docs\n", - "Contact\n", - "Manage cookies\n", - "Do not share my personal information\n", - "You can’t perform that action at this time.\n", - "\n", - "\n", - "\n", - "Twitter page\n", - "Webpage Title:\n", - "x.com\n", - "Webpage Contents:\n", - "\n", - "\n", - "\n", - "\n", - "LinkedIn page\n", - "Webpage Title:\n", - "Hugging Face | LinkedIn\n", - "Webpage Contents:\n", - "Skip to main content\n", - "LinkedIn\n", - "Articles\n", - "People\n", - "Learning\n", - "Jobs\n", - "Games\n", - "Get the app\n", - "Join now\n", - "Sign in\n", - "Hugging Face\n", - "Software Development\n", - "The AI community building the future.\n", - "See jobs\n", - "Follow\n", - "Discover all 472 employees\n", - "Report this company\n", - "About us\n", - "The AI community building the future.\n", - "Website\n", - "https://huggingface.co\n", - "External link for Hugging Face\n", - "Industry\n", - "Software Development\n", - "Company size\n", - "51-200 employees\n", - "Type\n", - "Privately Held\n", - "Founded\n", - "2016\n", - "Specialties\n", - "machine learning, natural language processing, and deep learning\n", - "Products\n", - "Hugging Face\n", - "Hugging Face\n", - "Natural Language Processing (NLP) Software\n", - "We‚Äôre on a journey to solve and democratize artificial intelligence through natural language.\n", - "Locations\n", - "Primary\n", - "Get directions\n", - "Paris, FR\n", - "Get directions\n", - "Employees at Hugging Face\n", - "Ludovic Huraux\n", - "Bassem ASSEH\n", - "Rajat Arya\n", - "Tech Lead & Software Engineer @ HF | prev: co-founder XetHub, Apple, Turi, AWS, Microsoft\n", - "Jeff Boudier\n", - "Product + Growth at Hugging Face\n", - "See all employees\n", - "Updates\n", - "Hugging Face\n", - "reposted this\n", - "Gradio\n", - "47,326 followers\n", - "7h\n", - "Report this post\n", - "NOW you can add AI to your Slack, Discord in just few steps with Gradio!ü§©\n", - "\n", - "üî•Create Slack apps, Discord bots, or Intercom-style website widgets in ANY modality (Text, image, Video, Audio, Omni etc)! Keep reading to learn how ‚¨áÔ∏è\n", - "\n", - "Guide: üöÄ Creating a Slack Bot from a Gradio App üöÄ\n", - "Read here:\n", - "https://lnkd.in/g2_Bydrj\n", - "ü§éDo you love building stuff with Gradio? Support us on GitHub:\n", - "Gradio.dev\n", - "‚Ķmore\n", - "50\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Daniel V.\n", - "Machine Learning Librarian@ü§ó | Championing Open Science & Machine Learning\n", - "21h\n", - "Report this post\n", - "Introducing FineWeb-C üåêüéì, a community-built dataset for improving language models in ALL languages. \n", - "\n", - "Inspired by FineWeb-Edu the community is labelling the educational quality of texts for many languages. \n", - "\n", - "318 annotators, 32K+ annotations, 12 languages - and growing!üåç\n", - "57\n", - "2 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Merve Noyan\n", - "open-sourceress at ü§ó | Google Developer Expert in Machine Learning, MSc Candidate in Data Science\n", - "22h\n", - "Report this post\n", - "Fine-tune ColPali for your multimodal RAG use case üî•\n", - "\n", - "ColPali just landed to\n", - "Hugging Face\n", - "transformers and I have built a simple fine-tuning tutorial with QLoRA ü§ó\n", - "You can fine-tune the model with 32 GB VRAM with batch size of 4 (which can run on Colab A100)\n", - "Link in comments üí¨\n", - "267\n", - "4 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "ü§ñ Avthar Sewrathan\n", - "AI and Developer Product Leader | I talk about using AI and building AI apps\n", - "1d\n", - "Report this post\n", - "TIL: You can now load any\n", - "Hugging Face\n", - "dataset into PostgreSQL with just 1 line of SQL ü§Ø\n", - "\n", - "All thanks to the pgai PostgreSQL extension. \n", - "\n", - "Shoutout to\n", - "Matvey Arye\n", - "from the\n", - "Timescale\n", - "AI engineering team for implementing this.\n", - "\n", - "Learn more about using PostgreSQL with HuggingFace datasets in the HuggingFace docs:\n", - "https://lnkd.in/eS4hqSDq\n", - "#postgresql\n", - "#huggingface\n", - "#opensource\n", - "180\n", - "14 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Argilla\n", - "10,266 followers\n", - "1d\n", - "Report this post\n", - "üé¢ Push to Hub: Export your dataset to the Hugging Face Hub directly from the Argilla UI.\n", - "\n", - "We‚Äôre super excited to announce that we've closed the loop: now you can load a dataset from the Hub, open it on\n", - "Argilla\n", - "UI, label it, and push the annotated dataset to the Hub. All this without a line of code!\n", - "\n", - "\n", - "ùó™ùóµùòÜ ùòÄùóµùóºùòÇùóπùó± ùòÜùóºùòÇ ùòÇùòÄùó≤ ùó∂ùòÅ?\n", - "\n", - "Your AI project's impact depends heavily on the effort and care you put into your data. This new feature enables you to iterate faster and make annotated data available in the right format for training and evaluation.\n", - "\n", - "\n", - "ùóõùóºùòÑ ùó±ùóºùó≤ùòÄ ùó∂ùòÅ ùòÑùóºùóøùó∏?\n", - "\n", - "1Ô∏è‚É£ Import initial data from a CSV or any format to Hugging Face\n", - "2Ô∏è‚É£ Load it into the Argilla UI and configure the annotation task\n", - "3Ô∏è‚É£ Annotate your dataset\n", - "üöÄ Click on ‚ÄúPush to Hub‚Äù and share the dataset with your team (or the entire world)\n", - "\n", - "üëâ ùó•ùó≤ùóÆùó±ùòÜ ùòÅùóº ùòÅùóøùòÜ ùó∂ùòÅ ùóºùòÇùòÅ?\n", - "\n", - "Get started here:\n", - "https://lnkd.in/dhA-swR5\n", - "Release highlights:\n", - "https://lnkd.in/dbdQXG-W\n", - "35\n", - "3 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Daniel V.\n", - "Machine Learning Librarian@ü§ó | Championing Open Science & Machine Learning\n", - "1d\n", - "Report this post\n", - "Hot take: shipping BERT-sized models in 2025 will benefit far more people than sharing an LLM overfitted to some saturated leaderboards \n", - "\n", - "We're already seeing ModernBERT finetunes on the\n", - "Hugging Face\n", - "Hub. My guess is we'll see hundreds of these by the end of 2025.\n", - "80\n", - "4 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Gradio\n", - "47,326 followers\n", - "1d\n", - "Edited\n", - "Report this post\n", - "ü§Øüî•LEARN HOW TO CREATE interactive agentic chatbots using Gradio that are capable of showcasing the Thoughts, Tasks, and interim responses of Multiple Agents as you await the final answer from your AI assistant.\n", - "\n", - "üéØ Customer Support multi-agents with\n", - "CrewAI\n", - "and\n", - "Gradio\n", - "Showcasing here, a user-friendly, high-performing multi-agent gradio app. TO operate it, simply enter a webpage URL along with your questions related to that page, and in turn receive a high-quality response from the CrewAI Multi-Agent setup.\n", - "\n", - "üöÄAccess this app on\n", - "Hugging Face\n", - "Spaces:\n", - "https://lnkd.in/g6kXp_D2\n", - "‚Ķmore\n", - "72\n", - "1 Comment\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Clem Delangue ü§ó\n", - "Clem Delangue ü§ó is an Influencer\n", - "Co-founder & CEO at Hugging Face\n", - "2d\n", - "Report this post\n", - "In the past few months, we've invested a lot of efforts in improving the user management features of the Hugging Face hub that more than 5M AI builder are now using. It helps not only for easier organization collaboration but also for security (for example to make sure ex team members don't still have access to private models). \n", - "\n", - "If your manager, VP AI or admin/CISO is not aware, mention them below so that we can connect if they have any questions or feedback as most of these features are part of the Enterprise hub subscriptions:\n", - "https://lnkd.in/e-RY-3vs\n", - ")\n", - "\n", - "Cheers!\n", - "47\n", - "3 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Clem Delangue ü§ó\n", - "Clem Delangue ü§ó is an Influencer\n", - "Co-founder & CEO at Hugging Face\n", - "4d\n", - "Report this post\n", - "Just 10 days after o1's public debut, we‚Äôre thrilled to unveil the open-source version of the groundbreaking technique behind its success: scaling test-time compute ü߆üí° \n", - "\n", - "By giving models more \"time to think,\" Llama 1B outperforms Llama 8B in math‚Äîbeating a model 8x its size. The full recipe is open-sourceü§Ø \n", - "\n", - "This is the power of open science and open-source AI! üåç‚ú®\n", - "5,292\n", - "125 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Hugging Face\n", - "reposted this\n", - "Philipp Schmid\n", - "Technical Lead & LLMs at Hugging Face ü§ó | AWS ML HERO ü¶∏ü誂ôÇÔ∏è\n", - "1d\n", - "Report this post\n", - "ModernBERT, BERT revisited in the age of LLMs and Generative AI!\n", - "LightOn\n", - "and\n", - "Answer.ai\n", - "modernized BERT! Improved architecture with 8192 context length, flash attention, and trained on 2T tokens. ModernBERT outperforms version BERT and RoBERTa versions! üëÄ\n", - "\n", - "TL;DR;\n", - "2Ô∏è‚É£¬†Comes in 2 sizes base (139M) and large (395M)\n", - "üöĬ†Better performance across all metrics than the original BERT\n", - "üìè 8,192 token context length (16x longer than BERT)\n", - "‚ö° Modern architecture with Flash Attention 2, RoPE embeddings, and alternating attention\n", - "üìö Trained on 2 trillion tokens, primarily English and Code\n", - "üí® 2-4x faster than other models with mixed-length inputs\n", - "üî쬆Released under Apache 2.0\n", - "ü§ó¬†Available on\n", - "Hugging Face\n", - "and Transformers (main)\n", - "\n", - "Models:\n", - "https://lnkd.in/ethiJ2xh\n", - "Blog:\n", - "https://lnkd.in/ebiEzb4P\n", - "Paper:\n", - "https://lnkd.in/ezR8MUBF\n", - "1,844\n", - "67 Comments\n", - "Like\n", - "Comment\n", - "Share\n", - "Join now to see what you are missing\n", - "Find people you know at Hugging Face\n", - "Browse recommended jobs for you\n", - "View all updates, news, and articles\n", - "Join now\n", - "Similar pages\n", - "Anthropic\n", - "Research Services\n", - "Mistral AI\n", - "Technology, Information and Internet\n", - "Paris, France\n", - "OpenAI\n", - "Research Services\n", - "San Francisco, CA\n", - "LangChain\n", - "Technology, Information and Internet\n", - "Perplexity\n", - "Software Development\n", - "San Francisco, California\n", - "Generative AI\n", - "Technology, Information and Internet\n", - "Google DeepMind\n", - "Research Services\n", - "London, London\n", - "LlamaIndex\n", - "Technology, Information and Internet\n", - "San Francisco, California\n", - "DeepLearning.AI\n", - "Software Development\n", - "Palo Alto, California\n", - "Cohere\n", - "Software Development\n", - "Toronto, Ontario\n", - "Show more similar pages\n", - "Show fewer similar pages\n", - "Browse jobs\n", - "Engineer jobs\n", - "555,845 open jobs\n", - "Machine Learning Engineer jobs\n", - "148,937 open jobs\n", - "Scientist jobs\n", - "48,969 open jobs\n", - "Software Engineer jobs\n", - "300,699 open jobs\n", - "Intern jobs\n", - "71,196 open jobs\n", - "Developer jobs\n", - "258,935 open jobs\n", - "Analyst jobs\n", - "694,057 open jobs\n", - "Intelligence Specialist jobs\n", - "7,156 open jobs\n", - "Manager jobs\n", - "1,880,925 open jobs\n", - "Data Scientist jobs\n", - "264,158 open jobs\n", - "Director jobs\n", - "1,220,357 open jobs\n", - "Associate jobs\n", - "1,091,945 open jobs\n", - "Python Developer jobs\n", - "46,642 open jobs\n", - "Evangelist jobs\n", - "5,068 open jobs\n", - "Data Engineer jobs\n", - "192,126 open jobs\n", - "Vice President jobs\n", - "235,270 open jobs\n", - "Quantitative Analyst jobs\n", - "19,570 open jobs\n", - "Program Manager jobs\n", - "243,900 open jobs\n", - "Data Science Specialist jobs\n", - "2,441 open jobs\n", - "Lead Software Engineer jobs\n", - "68,215 open jobs\n", - "Show more jobs like this\n", - "Show fewer jobs like this\n", - "Funding\n", - "Hugging Face\n", - "7 total rounds\n", - "Last Round\n", - "Series D\n", - "Feb 16, 2024\n", - "External Crunchbase Link for last round of funding\n", - "See more info on\n", - "crunchbase\n", - "More searches\n", - "More searches\n", - "Engineer jobs\n", - "Intern jobs\n", - "Machine Learning Engineer jobs\n", - "Software Engineer jobs\n", - "Scientist jobs\n", - "Developer jobs\n", - "Research Intern jobs\n", - "Analyst jobs\n", - "Intelligence Specialist jobs\n", - "Quantitative Analyst jobs\n", - "Technician jobs\n", - "Data Science Specialist jobs\n", - "Project Manager jobs\n", - "Summer Intern jobs\n", - "Manager jobs\n", - "Senior Staff Engineer jobs\n", - "PHD jobs\n", - "Trader jobs\n", - "Researcher jobs\n", - "Data Scientist jobs\n", - "Writer jobs\n", - "Data Analyst jobs\n", - "Product Designer jobs\n", - "Back End Developer jobs\n", - "Spring Intern jobs\n", - "Program Manager jobs\n", - "Technology Officer jobs\n", - "Software Intern jobs\n", - "Security Professional jobs\n", - "Senior Software Engineer jobs\n", - "Python Developer jobs\n", - "Engineering Manager jobs\n", - "Web Developer jobs\n", - "Graduate jobs\n", - "Full Stack Engineer jobs\n", - "Professor jobs\n", - "Head jobs\n", - "Verification Manager jobs\n", - "User Experience Designer jobs\n", - "Recruiter jobs\n", - "Chief Executive Officer jobs\n", - "Associate jobs\n", - "Support Developer jobs\n", - "Senior Firmware Engineer jobs\n", - "Marketing Manager jobs\n", - "Modeling Engineer jobs\n", - "Designer jobs\n", - "Automation Lead jobs\n", - "Options Trader jobs\n", - "Agile Coach jobs\n", - "Research Engineer jobs\n", - "Software Quality Assurance Analyst jobs\n", - "User Experience Manager jobs\n", - "Technical Intern jobs\n", - "Junior Network Engineer jobs\n", - "Information Technology Recruiter jobs\n", - "User Researcher jobs\n", - "Player jobs\n", - "Engineering Project Manager jobs\n", - "Digital Strategist jobs\n", - "LinkedIn\n", - "© 2024\n", - "About\n", - "Accessibility\n", - "User Agreement\n", - "Privacy Policy\n", - "Cookie Policy\n", - "Copyright Policy\n", - "Brand Policy\n", - "Guest Controls\n", - "Community Guidelines\n", - "ÿߟÑÿπÿ±ÿ®Ÿäÿ© (Arabic)\n", - "‡¶¨‡¶æ‡¶Ç‡¶≤‡¶æ (Bangla)\n", - "ƒåe≈°tina (Czech)\n", - "Dansk (Danish)\n", - "Deutsch (German)\n", - "ŒïŒªŒªŒ∑ŒΩŒπŒ∫Œ¨ (Greek)\n", - "English (English)\n", - "Espa√±ol (Spanish)\n", - "ŸÅÿßÿ±ÿ≥€å (Persian)\n", - "Suomi (Finnish)\n", - "Fran√ßais (French)\n", - "‡§π‡§ø‡§Ç‡§¶‡•Ä (Hindi)\n", - "Magyar (Hungarian)\n", - "Bahasa Indonesia (Indonesian)\n", - "Italiano (Italian)\n", - "◊¢◊ë◊®◊ô◊™ (Hebrew)\n", - "Êó•Êú¨Ë™û (Japanese)\n", - "Ìïú͵≠Ïñ¥ (Korean)\n", - "‡§Æ‡§∞‡§æ‡§†‡•Ä (Marathi)\n", - "Bahasa Malaysia (Malay)\n", - "Nederlands (Dutch)\n", - "Norsk (Norwegian)\n", - "‡®™‡©∞‡®ú‡®æ‡®¨‡©Ä (Punjabi)\n", - "Polski (Polish)\n", - "Portugu√™s (Portuguese)\n", - "Rom√¢nƒÉ (Romanian)\n", - "–†—É—Å—Å–∫–∏–π (Russian)\n", - "Svenska (Swedish)\n", - "‡∞§‡±Ü‡∞≤‡±Å‡∞ó‡±Å (Telugu)\n", - "‡∏†‡∏≤‡∏©‡∏≤‡πч∏ó‡∏¢ (Thai)\n", - "Tagalog (Tagalog)\n", - "T√ºrk√ße (Turkish)\n", - "–£–∫—Ä–∞—ó–Ω—Å—å–∫–∞ (Ukrainian)\n", - "Ti·∫øng Vi·ªát (Vietnamese)\n", - "ÁÆÄ‰Ωì‰∏≠Êñá (Chinese (Simplified))\n", - "Ê≠£È´î‰∏≠Êñá (Chinese (Traditional))\n", - "Language\n", - "Agree & Join LinkedIn\n", - "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", - "User Agreement\n", - ",\n", - "Privacy Policy\n", - ", and\n", - "Cookie Policy\n", - ".\n", - "Sign in to see who you already know at Hugging Face\n", - "Sign in\n", - "Welcome back\n", - "Email or phone\n", - "Password\n", - "Show\n", - "Forgot password?\n", - "Sign in\n", - "or\n", - "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", - "User Agreement\n", - ",\n", - "Privacy Policy\n", - ", and\n", - "Cookie Policy\n", - ".\n", - "New to LinkedIn?\n", - "Join now\n", - "or\n", - "New to LinkedIn?\n", - "Join now\n", - "By clicking Continue to join or sign in, you agree to LinkedIn‚Äôs\n", - "User Agreement\n", - ",\n", - "Privacy Policy\n", - ", and\n", - "Cookie Policy\n", - ".\n", - "LinkedIn\n", - "LinkedIn is better on the app\n", - "Don‚Äôt have the app? Get it in the Microsoft Store.\n", - "Open the app\n", - "\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(get_all_details(\"https://huggingface.co\"))" ] @@ -2674,359 +300,10 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "cd909e0b-1312-4ce2-a553-821e795d7572", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found links: {'links': [{'type': 'about page', 'url': 'https://huggingface.co/'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog', 'url': 'https://huggingface.co/blog'}, {'type': 'company page', 'url': 'https://huggingface.co/enterprise'}]}\n", - "You are looking at a company called: HuggingFace\n", - "Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\n", - "Landing page:\n", - "Webpage Title:\n", - "Hugging Face – The AI community building the future.\n", - "Webpage Contents:\n", - "Hugging Face\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Posts\n", - "Docs\n", - "Enterprise\n", - "Pricing\n", - "Log In\n", - "Sign Up\n", - "The AI community building the future.\n", - "The platform where the machine learning community collaborates on models, datasets, and applications.\n", - "Trending on\n", - "this week\n", - "Models\n", - "IamCreateAI/Ruyi-Mini-7B\n", - "Updated\n", - "4 days ago\n", - "•\n", - "8.17k\n", - "•\n", - "352\n", - "Datou1111/shou_xin\n", - "Updated\n", - "12 days ago\n", - "•\n", - "28.3k\n", - "•\n", - "672\n", - "answerdotai/ModernBERT-base\n", - "Updated\n", - "1 day ago\n", - "•\n", - "6.24k\n", - "•\n", - "236\n", - "meta-llama/Llama-3.3-70B-Instruct\n", - "Updated\n", - "11 days ago\n", - "•\n", - "236k\n", - "•\n", - "1.21k\n", - "tencent/HunyuanVideo\n", - "Updated\n", - "3 days ago\n", - "•\n", - "6.01k\n", - "•\n", - "1.2k\n", - "Browse 400k+ models\n", - "Spaces\n", - "Running\n", - "on\n", - "Zero\n", - "1.79k\n", - "🏢\n", - "TRELLIS\n", - "Scalable and Versatile 3D Generation from images\n", - "Running\n", - "306\n", - "📝\n", - "Scaling test-time compute\n", - "Running\n", - "on\n", - "Zero\n", - "470\n", - "🚀\n", - "Flux Style Shaping\n", - "Optical illusions and style transfer with FLUX\n", - "Running\n", - "on\n", - "CPU Upgrade\n", - "6.11k\n", - "👕\n", - "Kolors Virtual Try-On\n", - "Running\n", - "on\n", - "Zero\n", - "965\n", - "📈\n", - "IC Light V2\n", - "Browse 150k+ applications\n", - "Datasets\n", - "fka/awesome-chatgpt-prompts\n", - "Updated\n", - "Sep 3\n", - "•\n", - "6.83k\n", - "•\n", - "6.58k\n", - "O1-OPEN/OpenO1-SFT\n", - "Updated\n", - "4 days ago\n", - "•\n", - "1.86k\n", - "•\n", - "234\n", - "HuggingFaceFW/fineweb-2\n", - "Updated\n", - "13 days ago\n", - "•\n", - "77.7k\n", - "•\n", - "342\n", - "HuggingFaceTB/finemath\n", - "Updated\n", - "1 day ago\n", - "•\n", - "1.86k\n", - "•\n", - "43\n", - "amphora/QwQ-LongCoT-130K\n", - "Updated\n", - "16 days ago\n", - "•\n", - "1.34k\n", - "•\n", - "85\n", - "Browse 100k+ datasets\n", - "The Home of Machine Learning\n", - "Create, discover and collaborate on ML better.\n", - "The collaboration platform\n", - "Host and collaborate on unlimited public models, datasets and applications.\n", - "Move faster\n", - "With the HF Open source stack.\n", - "Explore all modalities\n", - "Text, image, video, audio or even 3D.\n", - "Build your portfolio\n", - "Share your work with the world and build your ML profile.\n", - "Sign Up\n", - "Accelerate your ML\n", - "We provide paid Compute and Enterprise solutions.\n", - "Compute\n", - "Deploy on optimized\n", - "Inference Endpoints\n", - "or update your\n", - "Spaces applications\n", - "to a GPU in a few clicks.\n", - "View pricing\n", - "Starting at $0.60/hour for GPU\n", - "Enterprise\n", - "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", - "\t\t\tdedicated support.\n", - "Getting started\n", - "Starting at $20/user/month\n", - "Single Sign-On\n", - "Regions\n", - "Priority Support\n", - "Audit Logs\n", - "Resource Groups\n", - "Private Datasets Viewer\n", - "More than 50,000 organizations are using Hugging Face\n", - "Ai2\n", - "Enterprise\n", - "non-profit\n", - "•\n", - "366 models\n", - "•\n", - "1.76k followers\n", - "AI at Meta\n", - "Enterprise\n", - "company\n", - "•\n", - "2.05k models\n", - "•\n", - "3.83k followers\n", - "Amazon Web Services\n", - "company\n", - "•\n", - "21 models\n", - "•\n", - "2.45k followers\n", - "Google\n", - "company\n", - "•\n", - "911 models\n", - "•\n", - "5.76k followers\n", - "Intel\n", - "company\n", - "•\n", - "217 models\n", - "•\n", - "2.07k followers\n", - "Microsoft\n", - "company\n", - "•\n", - "351 models\n", - "•\n", - "6.29k followers\n", - "Grammarly\n", - "company\n", - "•\n", - "10 models\n", - "•\n", - "102 followers\n", - "Writer\n", - "Enterprise\n", - "company\n", - "•\n", - "17 models\n", - "•\n", - "186 followers\n", - "Our Open Source\n", - "We are building the foundation of ML tooling with the community.\n", - "Transformers\n", - "136,571\n", - "State-of-the-art ML for Pytorch, TensorFlow, and JAX.\n", - "Diffusers\n", - "26,740\n", - "State-of-the-art diffusion models for image and audio generation in PyTorch.\n", - "Safetensors\n", - "2,960\n", - "Simple, safe way to store and distribute neural networks weights safely and quickly.\n", - "Hub Python Library\n", - "2,177\n", - "Client library for the HF Hub: manage repositories from your Python runtime.\n", - "Tokenizers\n", - "9,165\n", - "Fast tokenizers, optimized for both research and production.\n", - "PEFT\n", - "16,767\n", - "Parameter efficient finetuning methods for large models.\n", - "Transformers.js\n", - "12,421\n", - "State-of-the-art Machine Learning for the web. Run Transformers directly in your browser, with no need for a server.\n", - "timm\n", - "32,668\n", - "State-of-the-art computer vision models, layers, optimizers, training/evaluation, and utilities.\n", - "TRL\n", - "10,382\n", - "Train transformer language models with reinforcement learning.\n", - "Datasets\n", - "19,378\n", - "Access and share datasets for computer vision, audio, and NLP tasks.\n", - "Text Generation Inference\n", - "9,484\n", - "Toolkit to serve Large Language Models.\n", - "Accelerate\n", - "8,082\n", - "Easily train and use PyTorch models with multi-GPU, TPU, mixed-precision.\n", - "System theme\n", - "Website\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Tasks\n", - "Inference Endpoints\n", - "HuggingChat\n", - "Company\n", - "About\n", - "Brand assets\n", - "Terms of service\n", - "Privacy\n", - "Jobs\n", - "Press\n", - "Resources\n", - "Learn\n", - "Documentation\n", - "Blog\n", - "Forum\n", - "Service Status\n", - "Social\n", - "GitHub\n", - "Twitter\n", - "LinkedIn\n", - "Discord\n", - "\n", - "\n", - "\n", - "about page\n", - "Webpage Title:\n", - "Hugging Face – The AI community building the future.\n", - "Webpage Contents:\n", - "Hugging Face\n", - "Models\n", - "Datasets\n", - "Spaces\n", - "Posts\n", - "Docs\n", - "Enterprise\n", - "Pricing\n", - "Log In\n", - "Sign Up\n", - "The AI community building the future.\n", - "The platform where the machine learning community collaborates on models, datasets, and applications.\n", - "Trending on\n", - "this week\n", - "Models\n", - "IamCreateAI/Ruyi-Mini-7B\n", - "Updated\n", - "4 days ago\n", - "•\n", - "8.17k\n", - "•\n", - "352\n", - "Datou1111/shou_xin\n", - "Updated\n", - "12 days ago\n", - "•\n", - "28.3k\n", - "•\n", - "672\n", - "answerdotai/ModernBERT-base\n", - "Updated\n", - "1 day ago\n", - "•\n", - "6.24k\n", - "•\n", - "236\n", - "meta-llama/Llama-3.3-70B-Instruct\n", - "Updated\n", - "11 days ago\n", - "•\n", - "236k\n", - "•\n", - "1.21k\n", - "tencent/HunyuanVideo\n", - "Updated\n", - "3 days ago\n", - "•\n", - "6.01k\n", - "•\n", - "1.2k\n", - "Browse 400k+ models\n", - "Spaces\n", - "Running\n", - "on\n", - "Zero\n", - "1.79k\n", - "🏢\n", - "TRELLIS\n", - "Scalable and Versatile 3D Generation from images\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "print(get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\"))" ] @@ -3052,103 +329,10 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "e093444a-9407-42ae-924a-145730591a39", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found links: {'links': [{'type': 'home page', 'url': 'https://huggingface.com/'}, {'type': 'about page', 'url': 'https://huggingface.com/huggingface'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'enterprise page', 'url': 'https://huggingface.com/enterprise'}, {'type': 'pricing page', 'url': 'https://huggingface.com/pricing'}, {'type': 'blog page', 'url': 'https://huggingface.com/blog'}, {'type': 'community page', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" - ] - }, - { - "data": { - "text/markdown": [ - "# Hugging Face Brochure\n", - "\n", - "**Hugging Face** \n", - "*The AI community building the future.*\n", - "\n", - "---\n", - "\n", - "## About Us\n", - "Hugging Face is a pioneering platform where the machine learning community comes together to collaborate on models, datasets, and applications. With over 400,000 models and 100,000 datasets available, we empower users to create, discover, and innovate in the field of machine learning.\n", - "\n", - "### Our Mission\n", - "To accelerate the development and deployment of machine learning applications, making cutting-edge technology accessible to everyone.\n", - "\n", - "---\n", - "\n", - "## Company Culture\n", - "At Hugging Face, we believe in the power of collaboration and open-source technology. We foster an inclusive environment where every team member's input is valued, allowing for diverse ideas and perspectives. Our culture emphasizes continuous learning, innovation, and a commitment to advancing AI for the greater good.\n", - "\n", - "---\n", - "\n", - "## Customers\n", - "Hugging Face serves more than 50,000 organizations, including industry leaders such as:\n", - "\n", - "- **Amazon Web Services**\n", - "- **Meta**\n", - "- **Google**\n", - "- **Microsoft**\n", - "- **Intel**\n", - " \n", - "These organizations utilize our platform for various machine learning tasks, enhancing their workflows and outputs.\n", - "\n", - "---\n", - "\n", - "## Careers at Hugging Face\n", - "We are always on the lookout for talented individuals who are passionate about AI and machine learning. Career opportunities at Hugging Face offer:\n", - "\n", - "- A collaborative work environment\n", - "- Remote work flexibility\n", - "- Continuing education and mentorship\n", - "- Opportunities to work on impactful projects\n", - "\n", - "**Join us and help shape the future of AI!**\n", - "\n", - "---\n", - "\n", - "## Our Offerings\n", - "### Models\n", - "- Access over 400,000 machine learning models, covering a variety of tasks and technologies.\n", - "\n", - "### Datasets\n", - "- Discover and share 100,000+ datasets tailored for computer vision, audio, and NLP tasks.\n", - "\n", - "### Spaces\n", - "- Utilize our application space to run various applications including real-time projects and demonstrations.\n", - "\n", - "### Enterprise Solutions\n", - "- With dedicated support and industry-grade security, our Enterprise solutions are designed for organizations looking to implement AI at scale.\n", - "\n", - "---\n", - "\n", - "## Get Started Today!\n", - "**Sign up now** to become part of the Hugging Face community and access an array of tools to accelerate your machine learning journey. \n", - "[Sign Up Here](#)\n", - "\n", - "---\n", - "\n", - "**Stay Connected** \n", - "Follow us on our social media platforms:\n", - "- [GitHub](#)\n", - "- [Twitter](#)\n", - "- [LinkedIn](#)\n", - "- [Discord](#)\n", - "\n", - "**Hugging Face – Building the Future of AI**" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "create_brochure(\"HuggingFace\", \"https://huggingface.com\")" ] @@ -3191,178 +375,20 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found links: {'links': [{'type': 'about page', 'url': 'https://huggingface.co'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'community discussion', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" - ] - }, - { - "data": { - "text/markdown": [ - "# Welcome to Hugging Face\n", - "\n", - "## The AI Community Building the Future\n", - "\n", - "At Hugging Face, we bring together the machine learning community to collaborate on groundbreaking models, datasets, and applications. Our platform is a vibrant hub where innovation meets practicality, empowering developers and researchers to create state-of-the-art AI solutions.\n", - "\n", - "---\n", - "\n", - "### 🏆 What We Offer\n", - "\n", - "- **Models**: Access and discover over **400k+ models** including the latest advancements in AI.\n", - "- **Datasets**: A rich collection of **100k+ datasets** tailored for various machine learning tasks.\n", - "- **Spaces**: Collaborate on applications and projects seamlessly within our community’s creative workspace.\n", - "\n", - "---\n", - "\n", - "### 🌏 Our Customers\n", - "\n", - "Join the ranks of **50,000+ organizations** leveraging Hugging Face’s offerings, including industry giants like:\n", - "- **Meta**\n", - "- **Amazon Web Services**\n", - "- **Google**\n", - "- **Microsoft**\n", - "- **Grammarly**\n", - "\n", - "These companies trust us to accelerate their machine learning initiatives and foster innovation.\n", - "\n", - "---\n", - "\n", - "### 🌱 Company Culture\n", - "\n", - "At Hugging Face, we embrace an open-source ethos, encouraging collaboration and contribution from the community. Our culture is centered around creativity, innovation, and inclusivity. We believe in empowering individuals and teams by providing the right tools and support to shape the future of AI.\n", - "\n", - "---\n", - "\n", - "### 🚀 Careers at Hugging Face\n", - "\n", - "We are on the lookout for passionate individuals to join our team! If you share our vision of an accessible AI landscape, explore the career opportunities we offer. We provide an environment that supports academic growth, teamwork, and professional development while making a meaningful impact in the machine learning realm.\n", - "\n", - "#### Current Openings Include:\n", - "- Machine Learning Engineers\n", - "- Data Scientists\n", - "- Software Developers\n", - "- Community Managers\n", - "\n", - "---\n", - "\n", - "### 💡 Join Us\n", - "\n", - "Are you ready to be part of a revolution in AI? **[Sign Up](#)** today to explore the possibilities with Hugging Face or **[Log In](#)** if you’re already part of our community.\n", - "\n", - "Let’s build the future of AI together!\n", - "\n", - "---\n", - "\n", - "*For inquiries about our enterprise solutions, pricing, or community involvement, feel free to reach out through our website.* \n", - "\n", - "**Connect with us:** \n", - "[Twitter](#) | [LinkedIn](#) | [GitHub](#) | [Forum](#)" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "87bd1188", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found links: {'links': [{'type': 'homepage', 'url': 'https://huggingface.co/'}, {'type': 'about page', 'url': 'https://huggingface.co/huggingface'}, {'type': 'enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'pricing page', 'url': 'https://huggingface.co/pricing'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'discussion forum', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" - ] - }, - { - "data": { - "text/markdown": [ - "\n", - "# Hugging Face: The AI Community Building the Future\n", - "\n", - "Welcome to Hugging Face, the leading collaborative platform for the machine learning community. With a robust environment designed for creating, discovering, and deploying machine learning models, datasets, and applications, Hugging Face is at the frontier of artificial intelligence innovation. \n", - "\n", - "---\n", - "\n", - "## About Us\n", - "At Hugging Face, we believe in the power of collaboration. Our platform enables users to work together on projects that range from machine-learning models to expansive datasets. With over 400,000 models and 100,000 datasets available, we provide the tools necessary to help researchers, developers, and organizations accelerate their machine learning projects.\n", - "\n", - "- **Trending Models This Week:**\n", - " - **IamCreateAI/Ruyi-Mini-7B** | 8.17k | Updated 4 days ago\n", - " - **Datou1111/shou_xin** | 28.3k | Updated 12 days ago\n", - " - **meta-llama/Llama-3.3-70B-Instruct** | 236k | Updated 11 days ago\n", - "\n", - "Explore our community-driven approach that integrates state-of-the-art tools like Transformers, DiffUsers, and PEFT (Parameter Efficient Finetuning).\n", - "\n", - "---\n", - "\n", - "## Company Culture\n", - "Hugging Face fosters a vibrant and inclusive company culture, aiming to empower individuals and teams through transparent practices and open-source methodologies. We believe in “AI for everyone,” promoting accessibility and co-creation within the AI community. \n", - "\n", - "### Why Work With Us?\n", - "- **Collaborative Environment**: Join a diverse team of experts and enthusiasts dedicated to pushing the boundaries of AI and machine learning.\n", - "- **Open Source Commitment**: Contribute to freely accessible tools that serve the global community.\n", - "- **Flexible Work**: We support remote work and provide a range of job opportunities tailored to different areas of expertise.\n", - "\n", - "---\n", - "\n", - "## Customers & Organizations\n", - "Over 50,000 organizations utilize Hugging Face in various industries, including notable names such as:\n", - "- **Meta AI**\n", - "- **Amazon Web Services**\n", - "- **Google**\n", - "- **Microsoft**\n", - "\n", - "Our enterprise solutions offer seamless integration with advanced security features, making us a trusted partner for both startups and established corporations.\n", - "\n", - "---\n", - "\n", - "## Careers at Hugging Face\n", - "We are always on the lookout for passionate individuals to join our team. Explore our open positions in areas such as software development, research, marketing, and customer support.\n", - "\n", - "- **Open Positions**: \n", - " - Machine Learning Engineer\n", - " - Data Scientist\n", - " - Community Manager\n", - "\n", - "Join us in shaping the future of AI. \n", - "\n", - "**[Explore Careers](#)**\n", - "\n", - "---\n", - "\n", - "## Join the Hugging Face Community\n", - "Whether you're looking to develop cutting-edge AI models, contribute to open-source projects, or advance your career in this dynamic field, Hugging Face is your gateway to innovation.\n", - "\n", - "**[Learn More](#)** | **[Sign Up Today](#)**\n", - "\n", - "Together, let's build the future of AI!\n", - "\n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" ] @@ -3453,92 +479,10 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "744bfc05", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found links: {'links': [{'type': 'about page', 'url': 'https://openai.com/about'}, {'type': 'careers page', 'url': 'https://openai.com/careers'}]}\n" - ] - }, - { - "data": { - "text/markdown": [ - "It seems that the landing and related pages for OpenAI did not yield any specific content. However, I can create a creative and engaging brochure based on general knowledge about OpenAI. Here's a humorous and entertaining brochure written in Urdu:\n", - "\n", - "\n", - "# 🎉 اوپن اے آئی: ہوشیار robots کا دوست! 🎉\n", - "\n", - "---\n", - "\n", - "## About Us - ہمارے بارے میں:\n", - "\n", - "ہماری کمپنی اوپن اے آئی، 2015 میں بنی۔ ہم نے سوچا کہ \"کیوں نہ ایک ایسا انٹیلیجنٹ سسٹم بنائیں جو انسانوں کی مدد کرے؟\" تو ہم نے کام شروع کیا اور دیکھیں! ہم نے ایک نئی دنیا کی بنیاد رکھی۔ ہماری مشن ہے \"تمام لوگوں کے لئے AI کی طاقت کو قابل رسائی بنانا\"۔ آفاقی طاقت کو ڈھونڈتے ہیں، جیسے آپ کے فرج میں چھپے ہوئے برگر!\n", - "\n", - "---\n", - "\n", - "## What We Offer - ہم کیا پیش کرتے ہیں:\n", - "\n", - "ہم AI کے شوقین ہیں! 🤖 ہم مختلف پروڈکٹس اور سروسز پیش کرتے ہیں، جیسے کہ:\n", - "\n", - "- **GPT-3**: آپ کے سوالات کے جواب دینے کے لئے تیار!\n", - "- **تخلیقی تحریر**: جنریٹنگ آئیڈیاز جب آپ کی تخلیقیت بریک ہو جائے!\n", - "- **AI ٹولز**: آپ کی زندگی کو مزید آسان بنانے کے لئے!\n", - "\n", - "ہمارے صارفین کہتے ہیں، \"اپنی زندگی میں اوپن اے آئی کی ضرورت ہے، جیسے موٹیویشن کی ضرورت ہوتی ہے!\"\n", - "\n", - "---\n", - "\n", - "## Our Culture - ہماری ثقافت:\n", - "\n", - "ہماری کمپنی میں، ہمارا بنیادی اصول ہے: \"پیار اور انوکھا خیالات!\" 🤗 ہم نے انوکھے، تعاون پر مبنی ماحول کی بنیاد رکھی، جہاں ہر کوئی اپنی بات کہہ سکتا ہے، یہاں تک کہ ونڈو کے باہر کھڑا درخت بھی! ہم کمیونٹی کی خدمت کیلئے ہمیشہ تیار رہتے ہیں، وہ بھی سوشل میڈٰیا پر۔\n", - "\n", - "---\n", - "\n", - "## Who We Serve - ہم کس کی خدمت کرتے ہیں:\n", - "\n", - "ہم ہر اُس شخص کی خدمت کرتے ہیں جو سوپر ہیرومنٹ کی تلاش میں ہے۔ ہمارے وزیٹر، محققین، اور ٹیکنالوجی کے شوقین ہیں، اور ہمارے بہترین کلائنٹس include شامل ہیں \"بڑا دماغی جیسا سوچنے والے!\" 💡\n", - "\n", - "---\n", - "\n", - "## Join Us - ہمارے ساتھ شامل ہوں:\n", - "\n", - "آپ کو ترقی کی تلاش ہے؟ تو ہماری ٹیم کا حصہ بنیں! 🚀 ہم ہمیشہ نئے امریکی جاموں کی تلاش میں ہیں۔ آپ کو ٹریننگ، ترقی کے مواقع، اور سہولیات فراہم کریں گے۔\n", - "\n", - "📩 **درخواست دینے کے مرحلے:** ہماری ویب سائٹ پر جائیں، کیونکہ ہم جانتے ہیں کہ آپ کا خواب آپ کے قریب ہے!\n", - "\n", - "---\n", - "\n", - "## Contact Us - ہم سے رابطہ کریں:\n", - "\n", - "**پتہ:** نیٹ ورک کی دنیا \n", - "**فون:** 123-456-789 \n", - "**ایمیل:** info@openai.com \n", - "**سوشل میڈیا:** [فیس بک](#) | [ٹویٹر](#) | [لنکڈ ان](#) \n", - "**ویب سائٹ:** [openai.com](#)\n", - "\n", - "---\n", - "\n", - "## Closing Note - اختتامی نوٹ:\n", - "\n", - "ہماری کمپنی اوپن اے آئی کی طرف سے ایک شکریہ! اے آئی کی دنیا میں قدم رکھنے کا وقت آ گیا ہے! \n", - "\n", - "🖱️ **آج ہی رابطہ کریں یا ہماری ویب سائٹ کا دورہ کریں!**\n", - "\n", - "\n", - "**نوٹ:** واقعی ویب سائٹ کے مخصوص روابط، ای میل اور نمبر تخلیقی مقصد کے لئے ہیں۔ اس کو حقیقی معلومات کے ساتھ تبدیل کیا جا سکتا ہے۔" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "\n", "multi_lingual_stream_brochure(\"OpenAI\", \"https://openai.com/\", \"Urdu\", \"humorous, entertaining, jokey\")" @@ -3551,14 +495,6 @@ "metadata": {}, "outputs": [], "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4fb86dc6", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From db429806bc15946513cefa605bfcb7885b4a3571 Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Sun, 22 Dec 2024 01:22:20 +1100 Subject: [PATCH 22/26] Day 5 Challend one with multilingual aloing with multitone --- .../day5-multi-lingual-desire-format.ipynb | 8 -------- 1 file changed, 8 deletions(-) diff --git a/week1/community-contributions/day5-multi-lingual-desire-format.ipynb b/week1/community-contributions/day5-multi-lingual-desire-format.ipynb index b17c402..17e1094 100644 --- a/week1/community-contributions/day5-multi-lingual-desire-format.ipynb +++ b/week1/community-contributions/day5-multi-lingual-desire-format.ipynb @@ -487,14 +487,6 @@ "\n", "multi_lingual_stream_brochure(\"OpenAI\", \"https://openai.com/\", \"Urdu\", \"humorous, entertaining, jokey\")" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b6f1e8d9", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From 7b38868ddee956bb89b85137e71e3df5820c4e14 Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Sun, 22 Dec 2024 01:38:47 +1100 Subject: [PATCH 23/26] Rename accendently --- ...-format.ipynb => day5-multi-lingual-desire-format.ipynb.ipynb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename week1/community-contributions/{day5-multi-lingual-desire-format.ipynb => day5-multi-lingual-desire-format.ipynb.ipynb} (100%) diff --git a/week1/community-contributions/day5-multi-lingual-desire-format.ipynb b/week1/community-contributions/day5-multi-lingual-desire-format.ipynb.ipynb similarity index 100% rename from week1/community-contributions/day5-multi-lingual-desire-format.ipynb rename to week1/community-contributions/day5-multi-lingual-desire-format.ipynb.ipynb From 8b52eab336c56c64ef96eef44f841d487cb6563f Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Sun, 22 Dec 2024 01:43:47 +1100 Subject: [PATCH 24/26] Renamed day5-multi-lingual-desire-format.ipynb to day5-MultiLingual-MultiTone.ipynb --- ...esire-format.ipynb.ipynb => day5-MultiLingual-MultiTone.ipynb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename week1/community-contributions/{day5-multi-lingual-desire-format.ipynb.ipynb => day5-MultiLingual-MultiTone.ipynb} (100%) diff --git a/week1/community-contributions/day5-multi-lingual-desire-format.ipynb.ipynb b/week1/community-contributions/day5-MultiLingual-MultiTone.ipynb similarity index 100% rename from week1/community-contributions/day5-multi-lingual-desire-format.ipynb.ipynb rename to week1/community-contributions/day5-MultiLingual-MultiTone.ipynb From 7446459510a6154c88c08f79739b87b050aa6245 Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Sun, 22 Dec 2024 01:47:59 +1100 Subject: [PATCH 25/26] Update the code and did final changes --- week1/community-contributions/day5-MultiLingual-MultiTone.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/week1/community-contributions/day5-MultiLingual-MultiTone.ipynb b/week1/community-contributions/day5-MultiLingual-MultiTone.ipynb index 17e1094..6e07f60 100644 --- a/week1/community-contributions/day5-MultiLingual-MultiTone.ipynb +++ b/week1/community-contributions/day5-MultiLingual-MultiTone.ipynb @@ -398,7 +398,7 @@ "id": "a9e7375d", "metadata": {}, "source": [ - "## **Multi-lingual with Desire Format**\n" + "## **Multi-lingual with Multi-Tone in Desire Format**" ] }, { From a18900a59c8f78986d9714c3782ad2f56f6ec4d1 Mon Sep 17 00:00:00 2001 From: codenigma1 Date: Sun, 22 Dec 2024 12:04:51 +1100 Subject: [PATCH 26/26] First solution I design to get response from your favorite LLM and second is colloborative approach of two LLM added might be good specimen for the upcoming student to think about it even better and good approach to refine this approach I leave it from them --- ...eek1-collaborative-approach-two-llms.ipynb | 332 ++++++++++++++++++ 1 file changed, 332 insertions(+) create mode 100644 week1/community-contributions/week1-collaborative-approach-two-llms.ipynb diff --git a/week1/community-contributions/week1-collaborative-approach-two-llms.ipynb b/week1/community-contributions/week1-collaborative-approach-two-llms.ipynb new file mode 100644 index 0000000..87b820a --- /dev/null +++ b/week1/community-contributions/week1-collaborative-approach-two-llms.ipynb @@ -0,0 +1,332 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# **End of week 1 exercise**\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "markdown", + "id": "c70e5ab1", + "metadata": {}, + "source": [ + "## **1. Get a response from your favorite AI Tutor** " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from openai import OpenAI\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "65dace69", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key) > 10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'\n", + "\n", + "openai = OpenAI()\n", + "\n", + "ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "3673d863", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"\"\"You are the software engnieer, phd in mathematics, machine learning engnieer, and other topics\"\"\"\n", + "system_prompt += \"\"\"\n", + "When responding, always use Markdown for formatting. For any code, use well-structured code blocks with syntax highlighting,\n", + "For instance:\n", + "```python\n", + "\n", + "sample_list = [for i in range(10)]\n", + "```\n", + "Another example\n", + "```javascript\n", + " function displayMessage() {\n", + " alert(\"Hello, welcome to JavaScript!\");\n", + " }\n", + "\n", + "```\n", + "\n", + "Break down explanations into clear, numbered steps for better understanding. \n", + "Highlight important terms using inline code formatting (e.g., `function_name`, `variable`).\n", + "Provide examples for any concepts and ensure all examples are concise, clear, and relevant.\n", + "Your goal is to create visually appealing, easy-to-read, and informative responses.\n", + "\n", + "\"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "1df78d41", + "metadata": {}, + "outputs": [], + "source": [ + "def tutor_user_prompt(question):\n", + " # Ensure the question is properly appended to the user prompt.\n", + " user_prompt = (\n", + " \"Please carefully explain the following question in a step-by-step manner for clarity:\\n\\n\"\n", + " )\n", + " user_prompt += question\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "6dccbccb", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "def askTutor(question, MODEL):\n", + " # Generate the user prompt dynamically.\n", + " user_prompt = tutor_user_prompt(question)\n", + " \n", + " # OpenAI API call to generate response.\n", + " if MODEL == 'gpt-4o-mini':\n", + " print(f'You are getting response from {MODEL}')\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + " else:\n", + " MODEL == 'llama3.2'\n", + " print(f'You are getting response from {MODEL}')\n", + " stream = ollama_via_openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + "\n", + " # Initialize variables for response processing.\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " \n", + " # Process the response stream and update display dynamically.\n", + " for chunk in stream:\n", + " # Safely access the content attribute.\n", + " response_chunk = getattr(chunk.choices[0].delta, \"content\", \"\")\n", + " if response_chunk: # Check if response_chunk is not None or empty\n", + " response += response_chunk\n", + " # No replacement of Markdown formatting here!\n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "# here is the question; type over this to ask something new\n", + "\n", + "question = \"\"\"\n", + "Please explain what this code does and why:\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "askTutor(question=question, MODEL=MODEL_GPT)" + ] + }, + { + "cell_type": "markdown", + "id": "b79f9479", + "metadata": {}, + "source": [ + "## **2. Using both LLMs collaboratively approach**" + ] + }, + { + "cell_type": "markdown", + "id": "80e3c8f5", + "metadata": {}, + "source": [ + "- I thought about like similar the idea of a RAG (Retrieval-Augmented Generation) approach, is an excellent idea to improve responses by refining the user query and producing a polished, detailed final answer. Two LLM talking each other its cool!!! Here's how we can implement this:\n", + "\n", + "**Updated Concept:**\n", + "1. Refine Query with Ollama:\n", + " - Use Ollama to refine the raw user query into a well-structured prompt.\n", + " - This is especially helpful when users input vague or poorly structured queries.\n", + "2. Generate Final Response with GPT:\n", + " - Pass the refined prompt from Ollama to GPT to generate the final, detailed, and polished response.\n", + "3. Return the Combined Output:\n", + " - Combine the input, refined query, and the final response into a single display to ensure clarity." + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "id": "60f5ac2d", + "metadata": {}, + "outputs": [], + "source": [ + "def refine_with_ollama(raw_question):\n", + " \"\"\"\n", + " Use Ollama to refine the user's raw question into a well-structured prompt.\n", + " \"\"\"\n", + " print(\"Refining the query using Ollama...\")\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant. Refine and structure the following user input.\"},\n", + "\n", + " {\"role\": \"user\", \"content\": raw_question},\n", + " ]\n", + " response = ollama_via_openai.chat.completions.create(\n", + " model=MODEL_LLAMA,\n", + " messages=messages,\n", + " stream=False # Non-streamed refinement\n", + " )\n", + " refined_query = response.choices[0].message.content\n", + " return refined_query" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "id": "2aa4c9f6", + "metadata": {}, + "outputs": [], + "source": [ + "def ask_with_ollama_and_gpt(raw_question):\n", + " \"\"\"\n", + " Use Ollama to refine the user query and GPT to generate the final response.\n", + " \"\"\"\n", + " # Step 1: Refine the query using Ollama\n", + " refined_query = refine_with_ollama(raw_question)\n", + " \n", + " # Step 2: Generate final response with GPT\n", + " print(\"Generating the final response using GPT...\")\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": refined_query},\n", + " ]\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL_GPT,\n", + " messages=messages,\n", + " stream=True # Stream response for dynamic display\n", + " )\n", + "\n", + " # Step 3: Combine responses\n", + " response = \"\"\n", + " display_handle = display(Markdown(f\"### Refined Query:\\n\\n{refined_query}\\n\\n---\\n\\n### Final Response:\"), display_id=True)\n", + " for chunk in stream:\n", + " response_chunk = getattr(chunk.choices[0].delta, \"content\", \"\")\n", + " if response_chunk:\n", + " response += response_chunk\n", + " update_display(Markdown(f\"### Refined Query:\\n\\n{refined_query}\\n\\n---\\n\\n### Final Response:\\n\\n{response}\"), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "id": "4150e857", + "metadata": {}, + "outputs": [], + "source": [ + "# Example Usage\n", + "question = \"\"\"\n", + "Please explain what this code does:\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2b8935f", + "metadata": {}, + "outputs": [], + "source": [ + "ask_with_ollama_and_gpt(raw_question=question)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "086a5294", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm_env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}