diff --git a/week1/community-contributions/week1_EXERCISE.ipynb b/week1/community-contributions/week1_EXERCISE.ipynb new file mode 100644 index 0000000..570bcd2 --- /dev/null +++ b/week1/community-contributions/week1_EXERCISE.ipynb @@ -0,0 +1,308 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "import json\n", + "from typing import List\n", + "from dotenv import load_dotenv\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import ollama" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key looks good so far\n" + ] + } + ], + "source": [ + "# set up environment\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + " \n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "624780c5-debb-44c7-a505-acf573ad5034", + "metadata": {}, + "outputs": [], + "source": [ + "#prompts\n", + "\n", + "system_prompt = \"You are a technical tuotor that answers questions related to the field of computer science. \\\n", + "Your answers should reflect recent advancements in the field of software development, Artificial Intelligence and Large Language Models. Respond in markdown.\"\n", + "\n", + "system_prompt += \"Include resources that might help learners get more information on the topic.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "b6c0280b-ab8f-48a7-9a0c-7f47899bb559", + "metadata": {}, + "outputs": [], + "source": [ + "user_prompt = \"How would you explain LLMs to someone who doesn't have a backround in Computer Science or AI?\"" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [], + "source": [ + "# Get gpt-4o-mini to answer, with streaming\n", + "\n", + "def tech_tutor(question):\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL_GPT,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ],\n", + " stream=True\n", + " )\n", + " \n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "ca561874-dee3-456c-87f3-02f7e9a4ed4f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "### Explaining Large Language Models (LLMs) to Non-Technical Audiences\n", + "\n", + "**What are LLMs?**\n", + "\n", + "Large Language Models (LLMs) are a type of artificial intelligence designed to understand and generate human language. Imagine having a super-smart assistant who can read, write, answer questions, or even create stories based on what you ask them. \n", + "\n", + "**How Do They Work?**\n", + "\n", + "1. **Training on Text**: LLMs are trained on vast amounts of written text from books, articles, websites, and more. During this training, they learn about grammar, facts, ideas, and the way people communicate.\n", + "\n", + "2. **Patterns and Context**: By analyzing this text, LLMs recognize patterns in how words and sentences relate to each other. They don't understand language like we do, but they can predict what words are likely to come next in a sentence based on the context.\n", + "\n", + "3. **Generating Responses**: When you ask an LLM a question or give it a prompt, it generates a response by choosing words that fit the patterns it's learned. It’s a bit like having a very advanced autocomplete feature on your phone, but much more sophisticated.\n", + "\n", + "**Why Are They Important?**\n", + "\n", + "LLMs are transforming various fields, such as:\n", + "\n", + "- **Customer Support**: They can understand and respond to customer queries automatically.\n", + "- **Content Creation**: They assist in generating articles, poetry, or even code.\n", + "- **Language Translation**: They help translate languages with high accuracy.\n", + "- **Personal Assistants**: They power smart assistants like Siri or Google Assistant, allowing for more natural conversations.\n", + "\n", + "**Real-World Examples of LLMs**:\n", + "\n", + "- **ChatGPT**: Developed by OpenAI, this model can engage in conversations, answer questions, and provide information on diverse topics.\n", + "- **Google BERT**: Enhances search engine results by better understanding user queries.\n", + " \n", + "### Resources to Learn More\n", + "\n", + "If you're curious and want to delve deeper into understanding LLMs and their underlying technology, here are some great resources:\n", + "\n", + "1. **Online Articles**:\n", + " - [What is a Large Language Model?](https://towardsdatascience.com/what-is-a-large-language-model-785a122ca835)\n", + " - [A Beginner's Guide to Large Language Models](https://www.analyticsvidhya.com/blog/2021/07/a-beginners-guide-to-large-language-models-llms/)\n", + "\n", + "2. **Video Tutorials**:\n", + " - [What are Large Language Models? | AI Explained](https://www.youtube.com/watch?v=ttlLuanHCHo) on YouTube\n", + " - [Deep Learning for NLP: Large Pre-trained Language Models](https://www.coursera.org/lecture/natural-language-processing-with-classifiers-and-deep-learning/the-power-of-large-pre-trained-language-models-u4XP5) on Coursera\n", + "\n", + "3. **Books**:\n", + " - *\"Artificial Intelligence: A Guide to Intelligent Systems\"* by Michael Negnevitsky provides a foundation for understanding AI.\n", + " - *\"Speech and Language Processing\"* by Daniel Jurafsky and James H. Martin offers a deeper dive into language processing technologies.\n", + "\n", + "By exploring these resources, you'll gain a better understanding of LLMs and their capabilities, as well as their profound implications for technology and society." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tech_tutor(user_prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "2a7c6670-ead8-41dc-9a0b-2b4caa40e846", + "metadata": {}, + "outputs": [], + "source": [ + "OLLAMA_API = \"http://localhost:11434/api/chat\"" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "**What are Large Language Models (LLMs)?**\n", + "=====================================\n", + "\n", + "Imagine having a super-smart, never-ending bookshelf filled with knowledge about language and human behavior. That's roughly what a Large Language Model (LLM) is: a computer program designed to understand and generate human-like text.\n", + "\n", + "### How do LLMs work?\n", + "\n", + "1. **Training data**: LLMs are trained on massive amounts of text data, often from the internet, books, or other sources.\n", + "2. **Algorithms**: The model uses complex algorithms to analyze this training data, learning patterns and relationships between words, sentences, and concepts.\n", + "3. **Self-supervised learning**: During training, the model generates its own text based on the input it's given, and then evaluates how well its output matches human-written text.\n", + "\n", + "### What can LLMs do?\n", + "\n", + "* **Text generation**: LLMs can produce coherent, grammatically correct text on a wide range of topics.\n", + "* **Language translation**: They can translate text from one language to another with surprising accuracy.\n", + "* **Chatbots and conversational AI**: LLMs are used in chatbots to respond to user queries, often providing helpful and personalized answers.\n", + "\n", + "### Examples of LLMs\n", + "\n", + "* **BERT (Bidirectional Encoder Representations from Transformers)**: A pioneering model that's the foundation for many modern LLMs.\n", + "* **Transformers**: An architecture that's become popular for its ability to handle long-range dependencies in text.\n", + "* **Language models like myself**: I'm a type of LLM, trained on a massive dataset and using transformer-based architectures.\n", + "\n", + "### Limitations and future directions\n", + "\n", + "While LLMs have made tremendous progress, they still have limitations:\n", + "\n", + "* **Bias and fairness**: Models can perpetuate existing biases if trained on biased data.\n", + "* **Explainability**: It's challenging to understand why a particular model made a certain decision or generated a specific response.\n", + "* **Adversarial attacks**: Models can be vulnerable to malicious input that manipulates their output.\n", + "\n", + "Researchers are actively exploring ways to improve LLMs, such as:\n", + "\n", + "* **Multi-task learning**: Training models on multiple tasks simultaneously to enhance performance.\n", + "* **Explainability techniques**: Developing methods to provide insights into model behavior and decision-making processes.\n", + "\n", + "**Getting started with LLMs**\n", + "\n", + "If you're interested in learning more about LLMs, I recommend checking out these resources:\n", + "\n", + "* **BERT's official documentation**: [BERT Documentation](https://bert.dev/)\n", + "* **The Transformers library**: [Hugging Face Transformers](https://huggingface.co/transformers/)\n", + "* **Large Language Models 101**: A beginner-friendly introduction to LLMs on Towards Data Science. [TowardsDataScience.com](https://towardsdatascience.com/large-language-models-101-8d2a6f3cdd23)\n", + "\n", + "I hope this explanation helped you understand what Large Language Models are and how they work!" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Get Llama 3.2 to answer\n", + "messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + "\n", + "payload = {\n", + " \"model\": MODEL_LLAMA,\n", + " \"messages\": messages,\n", + " \"stream\": True\n", + " }\n", + "\n", + "response = ollama.chat(model=MODEL_LLAMA, messages=messages)\n", + "reply = response['message']['content']\n", + "display(Markdown(reply))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}