From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
206 lines
7.2 KiB
206 lines
7.2 KiB
{ |
|
"cells": [ |
|
{ |
|
"cell_type": "markdown", |
|
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", |
|
"metadata": {}, |
|
"source": [ |
|
"# End of week 1 exercise\n", |
|
"\n", |
|
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", |
|
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": 27, |
|
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
|
"metadata": {}, |
|
"outputs": [ |
|
{ |
|
"name": "stdout", |
|
"output_type": "stream", |
|
"text": [ |
|
"* Running on local URL: http://127.0.0.1:7878\n", |
|
"\n", |
|
"To create a public link, set `share=True` in `launch()`.\n" |
|
] |
|
}, |
|
{ |
|
"data": { |
|
"text/html": [ |
|
"<div><iframe src=\"http://127.0.0.1:7878/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
|
], |
|
"text/plain": [ |
|
"<IPython.core.display.HTML object>" |
|
] |
|
}, |
|
"metadata": {}, |
|
"output_type": "display_data" |
|
}, |
|
{ |
|
"data": { |
|
"text/plain": [] |
|
}, |
|
"execution_count": 27, |
|
"metadata": {}, |
|
"output_type": "execute_result" |
|
} |
|
], |
|
"source": [ |
|
"import os\n", |
|
"import requests\n", |
|
"from bs4 import BeautifulSoup\n", |
|
"from typing import List\n", |
|
"from dotenv import load_dotenv\n", |
|
"from openai import OpenAI\n", |
|
"import google.generativeai\n", |
|
"import anthropic\n", |
|
"import ollama\n", |
|
"import gradio as gr\n", |
|
"\n", |
|
"load_dotenv()\n", |
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
|
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
|
"\n", |
|
"openai = OpenAI()\n", |
|
"\n", |
|
"claude = anthropic.Anthropic()\n", |
|
"\n", |
|
"# here is the question; type over this to ask something new\n", |
|
"\n", |
|
"question = \"\"\"\n", |
|
"You are provided with the following prompt that asks an AI model to extract pertinent links from a list of links. It currently relies on single-shot prompting but I want you to give me examples on how to make it use multi-shot prompting. Give me several examples to test out.\n", |
|
"The prompt in question: You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\n", |
|
"You should respond in JSON as in this example:\n", |
|
"{\n", |
|
" \"links\": [\n", |
|
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", |
|
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", |
|
" ]\n", |
|
"}\n", |
|
"\"\"\"\n", |
|
"\n", |
|
"# question = \"Give me examples of no-shot prompting, one-shot prompting and multi-shot prompting. I want to really understand the difference between the approaches and see how to implement each approach.\"\n", |
|
"\n", |
|
"# user_choice = input(\"1) Ask question directly\\n2) Write question in code\\n=> \")\n", |
|
"# if user_choice == \"1\":\n", |
|
"# print(\"Ask question directly selected.\")\n", |
|
"# question = input(\"Ask your question here\\n=> \")\n", |
|
"# else:\n", |
|
"# print(\"Write question in the code selected.\")\n", |
|
"\n", |
|
"system_prompt = \"You are a helpful technical tutor who answers questions about python code, software engineering, data science and LLMs. Answer the question to the best of your abilities, in 500 words or less.\"\n", |
|
"\n", |
|
"def stream_model(prompt, model):\n", |
|
" if model==\"GPT\":\n", |
|
" result = stream_gpt(prompt)\n", |
|
" elif model==\"Claude\":\n", |
|
" result = stream_claude(prompt)\n", |
|
" elif model==\"Llama\":\n", |
|
" result = stream_llama(prompt)\n", |
|
" else:\n", |
|
" raise ValueError(\"Unknown model\")\n", |
|
" yield from result\n", |
|
"\n", |
|
"def stream_gpt(prompt):\n", |
|
" messages = [\n", |
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
|
" {\"role\": \"user\", \"content\": prompt}\n", |
|
" ]\n", |
|
" stream = openai.chat.completions.create(\n", |
|
" model='gpt-4o-mini',\n", |
|
" messages=messages,\n", |
|
" stream=True\n", |
|
" )\n", |
|
" result = \"\"\n", |
|
" for chunk in stream:\n", |
|
" result += chunk.choices[0].delta.content or \"\"\n", |
|
" yield result\n", |
|
"\n", |
|
"def stream_claude(prompt):\n", |
|
" result = claude.messages.stream(\n", |
|
" model=\"claude-3-haiku-20240307\",\n", |
|
" max_tokens=1000,\n", |
|
" temperature=0.7,\n", |
|
" system=system_prompt,\n", |
|
" messages=[\n", |
|
" {\"role\": \"user\", \"content\": prompt},\n", |
|
" ],\n", |
|
" )\n", |
|
" response = \"\"\n", |
|
" with result as stream:\n", |
|
" for text in stream.text_stream:\n", |
|
" response += text or \"\"\n", |
|
" yield response\n", |
|
"\n", |
|
"def stream_llama(prompt):\n", |
|
" messages = [\n", |
|
" {\"role\": \"user\", \"content\": prompt}\n", |
|
" ]\n", |
|
" response = \"\"\n", |
|
" for chunk in ollama.chat(\n", |
|
" model=MODEL_LLAMA, \n", |
|
" messages=messages, \n", |
|
" stream=True\n", |
|
" ):\n", |
|
" # Check if the chunk contains text\n", |
|
" if chunk.get('message', {}).get('content'):\n", |
|
" # Append the new text to the response\n", |
|
" response += chunk['message']['content']\n", |
|
" # Yield the incrementally built response\n", |
|
" yield response\n", |
|
"\n", |
|
" \n", |
|
"\n", |
|
"view = gr.Interface(\n", |
|
" fn=stream_model,\n", |
|
" inputs=[gr.Textbox(label=\"Your message:\", lines=6), gr.Dropdown([\"GPT\", \"Claude\", \"Llama\"], label=\"Select model\", value=\"GPT\")],\n", |
|
" outputs=[gr.Markdown(label=\"Response:\")],\n", |
|
" flagging_mode=\"never\"\n", |
|
")\n", |
|
"view.launch()\n", |
|
"\n", |
|
"# output = \"\"\n", |
|
"# for chunk in response:\n", |
|
"# if hasattr(chunk.choices[0].delta, \"content\"): # Check if 'content' exists\n", |
|
"# content = chunk.choices[0].delta.content # Extract content\n", |
|
"# if content: \n", |
|
"# output += chunk.choices[0].delta.content\n", |
|
"# clear_output(wait=True)\n", |
|
"# display(Markdown(\"# GPT-O4-MINI ANSWER\"))\n", |
|
"# display(Markdown(output))" |
|
] |
|
}, |
|
{ |
|
"cell_type": "code", |
|
"execution_count": null, |
|
"id": "333b7231-93ba-4d4a-b38d-41e72f2f3863", |
|
"metadata": {}, |
|
"outputs": [], |
|
"source": [] |
|
} |
|
], |
|
"metadata": { |
|
"kernelspec": { |
|
"display_name": "Python 3 (ipykernel)", |
|
"language": "python", |
|
"name": "python3" |
|
}, |
|
"language_info": { |
|
"codemirror_mode": { |
|
"name": "ipython", |
|
"version": 3 |
|
}, |
|
"file_extension": ".py", |
|
"mimetype": "text/x-python", |
|
"name": "python", |
|
"nbconvert_exporter": "python", |
|
"pygments_lexer": "ipython3", |
|
"version": "3.11.10" |
|
} |
|
}, |
|
"nbformat": 4, |
|
"nbformat_minor": 5 |
|
}
|
|
|