From 8d754ca9022ea0c4eb78665d0a8ce2f70e4d6304 Mon Sep 17 00:00:00 2001 From: HamzaSH6 Date: Mon, 17 Feb 2025 14:39:02 +0200 Subject: [PATCH] Week1 Exercise Ollama Solution with Streaming --- ...Week1-Exercise-OllamaStream-Solution.ipynb | 138 ++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 week1/community-contributions/Week1-Exercise-OllamaStream-Solution.ipynb diff --git a/week1/community-contributions/Week1-Exercise-OllamaStream-Solution.ipynb b/week1/community-contributions/Week1-Exercise-OllamaStream-Solution.ipynb new file mode 100644 index 0000000..f05bb80 --- /dev/null +++ b/week1/community-contributions/Week1-Exercise-OllamaStream-Solution.ipynb @@ -0,0 +1,138 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise Solution Ollama with streaming\n", + "\n", + "A tool that takes a technical question, and responds with an explanation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "\n", + "import ollama\n", + "import requests\n", + "from IPython.display import Markdown, display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# Constants\n", + "\n", + "MODEL_LLAMA = 'llama3.2'\n", + "MODEL_LLAMA1b = \"llama3.2:1b\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "# Environment\n", + "\n", + "system_prompt = \"\"\"\n", + "You are an assistant that takes a technical question and respond with an explanation.\n", + "\"\"\"\n", + "\n", + "question = \"\"\"\n", + "Please explain what this code does and why:\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"\n", + "\n", + "question2 = \"\"\"\n", + "What is the purpose of using yield from in the following code, and how does it differ from a standard for loop with yield?\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"\n", + "\n", + "user_prompt = \"Answer these two questions in detail please, Question1:\" + question + \"Question2:\" + question2\n", + "\n", + "def message():\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [], + "source": [ + "# Llama 3.2 answer, with streaming\n", + "\n", + "def llama():\n", + " response = ollama.chat(\n", + " model = MODEL_LLAMA,\n", + " messages = message(),\n", + " stream =True\n", + " )\n", + " full_response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in response:\n", + " content = chunk.get(\"message\", {}).get(\"content\", \"\")\n", + " if content:\n", + " full_response += content\n", + " display_handle.update(Markdown(full_response))\n", + "llama()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "342a470c-9aab-4051-ad21-514dceec76eb", + "metadata": {}, + "outputs": [], + "source": [ + "# Llama 3.2:1b answer\n", + "\n", + "def llama():\n", + " response = ollama.chat(\n", + " model = MODEL_LLAMA1b,\n", + " messages = message()\n", + " )\n", + " return display(Markdown(response['message']['content']))\n", + "\n", + "llama()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}