From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
438 lines
19 KiB
438 lines
19 KiB
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "markdown", |
|
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", |
|
"metadata": {}, |
|
"source": [ |
|
"# End of week 1 exercise\n", |
|
"\n", |
|
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", |
|
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 60, |
|
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# imports\n", |
|
"import os\n", |
|
"import requests\n", |
|
"import json\n", |
|
"from typing import List, Optional\n", |
|
"from dotenv import load_dotenv\n", |
|
"from bs4 import BeautifulSoup\n", |
|
"from IPython.display import Markdown, display, update_display\n", |
|
"import openai" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 61, |
|
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"API key looks good so far\n" |
|
] |
|
} |
|
], |
|
"source": [ |
|
"# Initialize and constants\n", |
|
"\n", |
|
"load_dotenv(override=True)\n", |
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
|
"\n", |
|
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", |
|
" print(\"API key looks good so far\")\n", |
|
"else:\n", |
|
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", |
|
"\n", |
|
"MODEL_GPT = 'gpt-4o-mini'\n", |
|
"MODEL_LLAMA = 'llama3.2'\n", |
|
"\n", |
|
"openai.api_key = api_key" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 78, |
|
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# headers = {\n", |
|
"# \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
|
"# }\n", |
|
"\n", |
|
"# class TechnicalQuestionSolver:\n", |
|
"# \"\"\"\n", |
|
"# A utility class that takes a technical question, and responds with an explanation\n", |
|
"# \"\"\"\n", |
|
"# def __init__(self, model=\"gpt-4o-mini\"): # Fixed the initialization method syntax\n", |
|
"# self.model = model\n", |
|
"# self.client = openai.OpenAI() # Create OpenAI client instance\n", |
|
" \n", |
|
"# def ask_question(self, question: str) -> str:\n", |
|
"# \"\"\"\n", |
|
"# Takes a technical question as input and returns an AI-generated explanation.\n", |
|
" \n", |
|
"# Parameters:\n", |
|
"# - question (str): The technical question to be explained.\n", |
|
" \n", |
|
"# Returns:\n", |
|
"# - str: AI-generated explanation.\n", |
|
"# \"\"\"\n", |
|
"# try:\n", |
|
"# # Prompt for the AI\n", |
|
"# prompt = f\"Explain the following technical question in detail:\\n\\n{question}\"\n", |
|
" \n", |
|
"# # Use new OpenAI API format\n", |
|
"# response = self.client.chat.completions.create(\n", |
|
"# model=self.model,\n", |
|
"# messages=[\n", |
|
"# {\"role\": \"system\", \"content\": \"You are a knowledgeable assistant that explains technical concepts.\"},\n", |
|
"# {\"role\": \"user\", \"content\": prompt}\n", |
|
"# ]\n", |
|
"# )\n", |
|
" \n", |
|
"# # Extract the explanation using new response format\n", |
|
"# explanation = response.choices[0].message.content\n", |
|
"# return explanation\n", |
|
" \n", |
|
"# except Exception as e:\n", |
|
"# return f\"An error occurred while processing your question: {str(e)}\"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"\n", |
|
"# Your headers definition\n", |
|
"headers = {\n", |
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
|
"}\n", |
|
"\n", |
|
"class TechnicalQuestionSolver:\n", |
|
" \"\"\"\n", |
|
" A utility class that takes a technical question, and responds with an explanation\n", |
|
" using either OpenAI GPT or Llama models\n", |
|
" \"\"\"\n", |
|
" def __init__(self, model_type: str = \"gpt\", model_name: Optional[str] = None):\n", |
|
" \"\"\"Initialize with either GPT or Llama model\"\"\"\n", |
|
" self.model_type = model_type\n", |
|
" self.model_name = model_name or (MODEL_GPT if model_type == \"gpt\" else MODEL_LLAMA)\n", |
|
" \n", |
|
" if model_type == \"gpt\":\n", |
|
" self.client = openai.OpenAI()\n", |
|
" elif model_type == \"llama\":\n", |
|
" try:\n", |
|
" from llama_cpp import Llama\n", |
|
" self.client = Llama(model_path=self.model_name)\n", |
|
" except ImportError:\n", |
|
" raise ImportError(\"Please install llama-cpp-python to use Llama models\")\n", |
|
" else:\n", |
|
" raise ValueError(f\"Unsupported model type: {model_type}\")\n", |
|
"\n", |
|
" def ask_question(self, question: str) -> str:\n", |
|
" \"\"\"\n", |
|
" Takes a technical question as input and returns an AI-generated explanation.\n", |
|
" \n", |
|
" Parameters:\n", |
|
" - question (str): The technical question to be explained.\n", |
|
" \n", |
|
" Returns:\n", |
|
" - str: AI-generated explanation.\n", |
|
" \"\"\"\n", |
|
" try:\n", |
|
" messages = [\n", |
|
" {\"role\": \"system\", \"content\": \"You are a knowledgeable assistant that explains technical concepts.\"},\n", |
|
" {\"role\": \"user\", \"content\": f\"Explain the following technical question in detail:\\n\\n{question}\"}\n", |
|
" ]\n", |
|
" \n", |
|
" if self.model_type == \"gpt\":\n", |
|
" response = self.client.chat.completions.create(\n", |
|
" model=self.model_name,\n", |
|
" messages=messages\n", |
|
" )\n", |
|
" explanation = response.choices[0].message.content\n", |
|
" else: # Llama\n", |
|
" response = self.client.create_chat_completion(\n", |
|
" messages=messages,\n", |
|
" max_tokens=2048,\n", |
|
" temperature=0.7\n", |
|
" )\n", |
|
" explanation = response[\"choices\"][0][\"message\"][\"content\"]\n", |
|
" \n", |
|
" return explanation\n", |
|
" \n", |
|
" except Exception as e:\n", |
|
" return f\"An error occurred while processing your question: {str(e)}\"\n", |
|
"\n", |
|
"\n", |
|
" def display_answer(self, question: str):\n", |
|
" \"\"\"\n", |
|
" Takes a technical question, generates an explanation, and displays it in Markdown format.\n", |
|
" \n", |
|
" Parameters:\n", |
|
" - question (str): The technical question to be explained.\n", |
|
" \"\"\"\n", |
|
" explanation = self.ask_question(question)\n", |
|
" display(Markdown(f\"### Question:\\n{question}\\n\\n### Explanation:\\n{explanation}\"))\n", |
|
"\n", |
|
"\n", |
|
"gpt_helper = TechnicalQuestionSolver(model_type=\"gpt\") # Uses GPT model\n", |
|
"# llama_helper = TechnicalQuestionSolver(model_type=\"llama\") # Uses Llama model\n", |
|
"\n", |
|
"\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 79, |
|
"id": "23ad40fe-44e1-4822-86e1-53462dd52a86", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"question = \"\"\" what does the below code does? I want a detailed explanation and in easy way line by line.\n", |
|
"\n", |
|
"headers = {\n", |
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
|
"}\n", |
|
"\n", |
|
"class TechnicalQuestionSolver:\n", |
|
" \n", |
|
" def __init__(self, model=\"gpt-4\"): # Fixed the initialization method syntax\n", |
|
" self.model = model\n", |
|
" self.client = openai.OpenAI() # Create OpenAI client instance\n", |
|
" \n", |
|
" def ask_question(self, question: str) -> str:\n", |
|
" \n", |
|
" try:\n", |
|
" # Prompt for the AI\n", |
|
" prompt = f\"Explain the following technical question in detail:\\n\\n{question}\"\n", |
|
" \n", |
|
" # Use new OpenAI API format\n", |
|
" response = self.client.chat.completions.create(\n", |
|
" model=self.model,\n", |
|
" messages=[\n", |
|
" {\"role\": \"system\", \"content\": \"You are a knowledgeable assistant that explains technical concepts.\"},\n", |
|
" {\"role\": \"user\", \"content\": prompt}\n", |
|
" ]\n", |
|
" )\n", |
|
" \n", |
|
" # Extract the explanation using new response format\n", |
|
" explanation = response.choices[0].message.content\n", |
|
" return explanation\n", |
|
" \n", |
|
" except Exception as e:\n", |
|
" return f\"An error occurred while processing your question: {str(e)}\"\n", |
|
" \"\"\"\n", |
|
"# explanation = helper.ask_question(question)\n", |
|
"# print(explanation)" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 80, |
|
"id": "69ce9cc9-1a06-44c0-9a73-e2a001312986", |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"data": { |
|
"text/markdown": [ |
|
"### Question:\n", |
|
" what does the below code does? I want a detailed explanation and in easy way line by line.\n", |
|
"\n", |
|
"headers = {\n", |
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
|
"}\n", |
|
"\n", |
|
"class TechnicalQuestionSolver:\n", |
|
" \n", |
|
" def __init__(self, model=\"gpt-4\"): # Fixed the initialization method syntax\n", |
|
" self.model = model\n", |
|
" self.client = openai.OpenAI() # Create OpenAI client instance\n", |
|
" \n", |
|
" def ask_question(self, question: str) -> str:\n", |
|
" \n", |
|
" try:\n", |
|
" # Prompt for the AI\n", |
|
" prompt = f\"Explain the following technical question in detail:\n", |
|
"\n", |
|
"{question}\"\n", |
|
" \n", |
|
" # Use new OpenAI API format\n", |
|
" response = self.client.chat.completions.create(\n", |
|
" model=self.model,\n", |
|
" messages=[\n", |
|
" {\"role\": \"system\", \"content\": \"You are a knowledgeable assistant that explains technical concepts.\"},\n", |
|
" {\"role\": \"user\", \"content\": prompt}\n", |
|
" ]\n", |
|
" )\n", |
|
" \n", |
|
" # Extract the explanation using new response format\n", |
|
" explanation = response.choices[0].message.content\n", |
|
" return explanation\n", |
|
" \n", |
|
" except Exception as e:\n", |
|
" return f\"An error occurred while processing your question: {str(e)}\"\n", |
|
" \n", |
|
"\n", |
|
"### Explanation:\n", |
|
"Let's break down the provided code line by line and explain what each part does in a simple way. The code appears to define a class called `TechnicalQuestionSolver`, which is likely intended to assist with answering technical questions by utilizing the OpenAI API.\n", |
|
"\n", |
|
"### Code Breakdown\n", |
|
"\n", |
|
"1. **Headers Definition**:\n", |
|
" ```python\n", |
|
" headers = {\n", |
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
|
" }\n", |
|
" ```\n", |
|
" - This line defines a dictionary named `headers` that includes a `User-Agent` key. \n", |
|
" - The `User-Agent` is typically sent in HTTP requests to identify the client software making the request. It helps web servers understand what type of device and browser is sending the request. In this case, it's simulating a request from a modern web browser running on a Windows operating system.\n", |
|
"\n", |
|
"2. **Class Definition**:\n", |
|
" ```python\n", |
|
" class TechnicalQuestionSolver:\n", |
|
" ```\n", |
|
" - This line defines a new class called `TechnicalQuestionSolver`. Classes are blueprints for creating objects in object-oriented programming, allowing us to bundle data (attributes) and functions (methods) that operate on that data.\n", |
|
"\n", |
|
"3. **Constructor Method**:\n", |
|
" ```python\n", |
|
" def __init__(self, model=\"gpt-4\"): # Fixed the initialization method syntax\n", |
|
" self.model = model\n", |
|
" self.client = openai.OpenAI() # Create OpenAI client instance\n", |
|
" ```\n", |
|
" - `__init__` is the constructor method that is automatically called when an instance of the class is created. It initializes the class with the following:\n", |
|
" - `model`: This parameter allows the user to specify which model of OpenAI’s language model to use, defaulting to `\"gpt-4\"`.\n", |
|
" - `self.model`: This sets the instance variable `model` to the provided value, so it can be accessed later by other methods of the class.\n", |
|
" - `self.client`: This creates an instance of `openai.OpenAI()`. This line assumes that there is an OpenAI library imported earlier that provides the functionality to interact with their models.\n", |
|
"\n", |
|
"4. **Method for Asking Questions**:\n", |
|
" ```python\n", |
|
" def ask_question(self, question: str) -> str:\n", |
|
" ```\n", |
|
" - This defines a method named `ask_question` that takes a single parameter `question` of type string and returns a string. This method is responsible for sending the question to the AI and obtaining an explanation.\n", |
|
"\n", |
|
"5. **Try-Except Block**:\n", |
|
" ```python\n", |
|
" try:\n", |
|
" ```\n", |
|
" - The `try` block is used to catch any exceptions (errors) that might occur while running the code inside it. If an error occurs, it prevents the program from crashing and allows it to gracefully handle the problem.\n", |
|
"\n", |
|
"6. **Creating the Prompt**:\n", |
|
" ```python\n", |
|
" prompt = f\"Explain the following technical question in detail:\\n\\n{question}\"\n", |
|
" ```\n", |
|
" - This line creates a string called `prompt` using an f-string (formatted string). It adds a specific text asking the AI to explain the question. The `\\n\\n` adds line breaks for better formatting, placing the actual question right after the initial instruction.\n", |
|
"\n", |
|
"7. **Making the API Call**:\n", |
|
" ```python\n", |
|
" response = self.client.chat.completions.create(\n", |
|
" model=self.model,\n", |
|
" messages=[\n", |
|
" {\"role\": \"system\", \"content\": \"You are a knowledgeable assistant that explains technical concepts.\"},\n", |
|
" {\"role\": \"user\", \"content\": prompt}\n", |
|
" ]\n", |
|
" )\n", |
|
" ```\n", |
|
" - This line sends a request to the OpenAI API using the `client` created earlier:\n", |
|
" - `model=self.model`: Specifies which AI model to use (e.g., `gpt-4`).\n", |
|
" - `messages`: This is a list of message dictionaries that define how to interact with the AI:\n", |
|
" - The system message sets up the context for the AI, telling it to act as a knowledgeable assistant on technical concepts.\n", |
|
" - The user message contains the prompt that includes the actual question to be answered.\n", |
|
"\n", |
|
"8. **Extracting the Explanation**:\n", |
|
" ```python\n", |
|
" explanation = response.choices[0].message.content\n", |
|
" return explanation\n", |
|
" ```\n", |
|
" - Here, after receiving the API response, it extracts the actual content of the response. The structure `response.choices[0].message.content` indicates that:\n", |
|
" - `response.choices` is a list of possible replies returned by the model.\n", |
|
" - `[0]` accesses the first choice (if there are multiple).\n", |
|
" - `.message.content` gets the main text of that choice which is the explanation generated by the AI.\n", |
|
" - The method then returns the explanation to wherever `ask_question` was called from.\n", |
|
"\n", |
|
"9. **Handling Errors**:\n", |
|
" ```python\n", |
|
" except Exception as e:\n", |
|
" return f\"An error occurred while processing your question: {str(e)}\"\n", |
|
" ```\n", |
|
" - If any error occurs during the execution of the `try` block, this `except` block will catch it. It creates an error message that includes the original error converted to a string. It then returns this error message, making it clear that something went wrong.\n", |
|
"\n", |
|
"### Summary\n", |
|
"\n", |
|
"The `TechnicalQuestionSolver` class is designed to facilitate query processing through OpenAI's model. It initializes an OpenAI client object, builds a prompt based on a user question, sends that to the API, and retrieves a detailed explanation of the technical question, all while managing potential errors gracefully. This modular approach allows for easy use and extension of the functionality." |
|
], |
|
"text/plain": [ |
|
"<IPython.core.display.Markdown object>" |
|
] |
|
}, |
|
"metadata": {}, |
|
"output_type": "display_data" |
|
} |
|
], |
|
"source": [ |
|
"gpt_helper.display_answer(question) # Using GPT\n" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "a83299c6-2451-486b-9a19-015b345138a5", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Get gpt-4o-mini to answer, with streaming" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [ |
|
"# Get Llama 3.2 to answer" |
|
] |
|
} |
|
], |
|
"metadata": { |
|
"kernelspec": { |
|
"display_name": "Python 3 (ipykernel)", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.11.11" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 5 |
|
}
|
|
|