24 changed files with 5950 additions and 0 deletions
@ -0,0 +1,256 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Import tkinter and ollama to create the app" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 20, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import ollama\n", |
||||||
|
"import tkinter as tk\n", |
||||||
|
"from tkinter import ttk" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Basic configuration parameters for the Ollama API:" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 21, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||||
|
"HEADERS = {\"Content-Type\":\"application/json\"}\n", |
||||||
|
"MODEL = \"llama3.2\"\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Initialize conversation history." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 22, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"conversation_history = []" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Defining the key presses. If user presses shit + enter then simply go to the next line. \n", |
||||||
|
"\n", |
||||||
|
"If user presses only enter then submit the question." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 23, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def handle_keypress(event):\n", |
||||||
|
" if event.state & 0x1: # Check if Shift is pressed\n", |
||||||
|
" return\n", |
||||||
|
" else:\n", |
||||||
|
" display_answer()\n", |
||||||
|
" return 'break'" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Defining the function that will display answers using Ollama.\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"To turn it into a chatbot we simply append user's question and Ollama's response to our conversation history and pass that into Ollama as our next question." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 24, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def display_answer(event=None):\n", |
||||||
|
" question_text['state'] = 'disabled'\n", |
||||||
|
" question_text['bg'] = '#F0F0F0'\n", |
||||||
|
" status_label.config(text=\"Looking for an answer...\")\n", |
||||||
|
" root.update()\n", |
||||||
|
"\n", |
||||||
|
" # Get question text and prepare message\n", |
||||||
|
" question = question_text.get(\"1.0\", tk.END).strip()\n", |
||||||
|
" if question:\n", |
||||||
|
" # Append the user's question to the conversation history\n", |
||||||
|
" conversation_history.append({\"role\": \"user\", \"content\": question})\n", |
||||||
|
"\n", |
||||||
|
" # Pass the entire conversation history to Ollama\n", |
||||||
|
" try:\n", |
||||||
|
" # Get the answer\n", |
||||||
|
" response = ollama.chat(model=MODEL, messages=conversation_history)\n", |
||||||
|
" answer = response[\"message\"][\"content\"]\n", |
||||||
|
"\n", |
||||||
|
" # Append the assistant's answer to the conversation history\n", |
||||||
|
" conversation_history.append({\"role\": \"assistant\", \"content\": answer})\n", |
||||||
|
"\n", |
||||||
|
" # Update the text widget with the answer\n", |
||||||
|
" answer_text.configure(state='normal')\n", |
||||||
|
" answer_text.delete(1.0, tk.END)\n", |
||||||
|
" answer_text.insert(tk.END, answer)\n", |
||||||
|
" answer_text.configure(state='disabled')\n", |
||||||
|
"\n", |
||||||
|
" status_label.config(text=\"Answered\")\n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" answer_text.configure(state='normal')\n", |
||||||
|
" answer_text.delete(1.0, tk.END)\n", |
||||||
|
" answer_text.insert(tk.END, f\"Error: {str(e)}\")\n", |
||||||
|
" answer_text.configure(state='disabled')\n", |
||||||
|
" status_label.config(text=\"Error\")\n", |
||||||
|
" else:\n", |
||||||
|
" # If empty question string was received\n", |
||||||
|
" answer_text.configure(state='normal')\n", |
||||||
|
" answer_text.delete(1.0, tk.END)\n", |
||||||
|
" answer_text.insert(tk.END, \"Please enter a question.\")\n", |
||||||
|
" answer_text.configure(state='disabled')\n", |
||||||
|
" status_label.config(text=\"\")\n", |
||||||
|
"\n", |
||||||
|
" # Re-enable question input and restore normal background\n", |
||||||
|
" question_text['state'] = 'normal'\n", |
||||||
|
" question_text['bg'] = 'white'\n", |
||||||
|
" root.update()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"A button to remove the conversation history and start all over again." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def remove_all():\n", |
||||||
|
" \"\"\"Clears the conversation history and resets the interface.\"\"\"\n", |
||||||
|
" global conversation_history\n", |
||||||
|
" conversation_history = [] # Clear conversation history\n", |
||||||
|
"\n", |
||||||
|
" # Reset text widgets\n", |
||||||
|
" question_text.delete(1.0, tk.END)\n", |
||||||
|
" answer_text.configure(state='normal')\n", |
||||||
|
" answer_text.delete(1.0, tk.END)\n", |
||||||
|
" answer_text.insert(tk.END, \"Your answer will appear here.\")\n", |
||||||
|
" answer_text.configure(state='disabled')\n", |
||||||
|
"\n", |
||||||
|
" # Reset status label\n", |
||||||
|
" status_label.config(text=\"\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Creating the app window using tkinter." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 18, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Create the main window\n", |
||||||
|
"root = tk.Tk()\n", |
||||||
|
"root.title(\"Ollama with GUI\")\n", |
||||||
|
"root.geometry(\"500x800\")\n", |
||||||
|
"\n", |
||||||
|
"# Create and configure the Questions window\n", |
||||||
|
"question_frame = ttk.LabelFrame(root, text=\"Questions\", padding=(10, 10))\n", |
||||||
|
"question_frame.pack(fill=\"both\", expand=True, padx=10, pady=10)\n", |
||||||
|
"\n", |
||||||
|
"question_label = ttk.Label(question_frame, text=\"Enter your question:\")\n", |
||||||
|
"question_label.pack(anchor=\"w\", pady=5)\n", |
||||||
|
"\n", |
||||||
|
"# Replace Entry with Text widget for questions\n", |
||||||
|
"question_text = tk.Text(question_frame, wrap=tk.WORD, width=50, height=4)\n", |
||||||
|
"question_text.pack(anchor=\"w\", pady=5)\n", |
||||||
|
"question_text.bind(\"<Return>\", handle_keypress)\n", |
||||||
|
"\n", |
||||||
|
"# Add status label\n", |
||||||
|
"status_label = ttk.Label(question_frame, text=\"\")\n", |
||||||
|
"status_label.pack(anchor=\"w\", pady=5)\n", |
||||||
|
"\n", |
||||||
|
"# Add Remove All button\n", |
||||||
|
"remove_all_button = ttk.Button(question_frame, text=\"Remove All\", command=remove_all)\n", |
||||||
|
"remove_all_button.pack(anchor=\"e\", pady=5)\n", |
||||||
|
"\n", |
||||||
|
"# Create and configure the Answers window\n", |
||||||
|
"answer_frame = ttk.LabelFrame(root, text=\"Answer\", padding=(10, 10))\n", |
||||||
|
"answer_frame.pack(fill=\"both\", expand=True, padx=10, pady=10)\n", |
||||||
|
"\n", |
||||||
|
"# Create a frame to hold the text widget and scrollbar\n", |
||||||
|
"text_frame = ttk.Frame(answer_frame)\n", |
||||||
|
"text_frame.pack(fill=\"both\", expand=True)\n", |
||||||
|
"\n", |
||||||
|
"# Create the text widget and scrollbar\n", |
||||||
|
"answer_text = tk.Text(text_frame, wrap=tk.WORD, width=70, height=100)\n", |
||||||
|
"scrollbar = ttk.Scrollbar(text_frame, orient=\"vertical\", command=answer_text.yview)\n", |
||||||
|
"answer_text.configure(yscrollcommand=scrollbar.set)\n", |
||||||
|
"\n", |
||||||
|
"# Pack the text widget and scrollbar\n", |
||||||
|
"answer_text.pack(side=\"left\", fill=\"both\", expand=True)\n", |
||||||
|
"scrollbar.pack(side=\"right\", fill=\"y\")\n", |
||||||
|
"\n", |
||||||
|
"# Set initial text and disable editing\n", |
||||||
|
"answer_text.insert(tk.END, \"Your answer will appear here.\")\n", |
||||||
|
"answer_text.configure(state='disabled')\n", |
||||||
|
"\n", |
||||||
|
"# Run the main event loop\n", |
||||||
|
"root.mainloop()\n" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "llms", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 2 |
||||||
|
} |
@ -0,0 +1,126 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d25b0aef-3e5e-4026-90ee-2b373bf262b7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Step 0: Import libraries and load environment variables\n", |
||||||
|
"import os\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv(\"OPENAI_API_KEY\")\n", |
||||||
|
"\n", |
||||||
|
"if not api_key:\n", |
||||||
|
" print(\"No API key was found!\")\n", |
||||||
|
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||||
|
" print(\"An API key was found, but it does not start with 'sk-proj-'! Please ensure you are using the right key.\")\n", |
||||||
|
"elif api_key.strip() != api_key:\n", |
||||||
|
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end! Please remove them.\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"API key found and looks good so far!\")\n", |
||||||
|
"\n", |
||||||
|
"# Step 1: Create prompts\n", |
||||||
|
"print(\"[INFO] Creating system prompt ...\")\n", |
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of \\\n", |
||||||
|
" email texts and suggests short subject lines for the email based \\\n", |
||||||
|
" on the requested tone and language. Respond in markdown.\"\n", |
||||||
|
"\n", |
||||||
|
"print(\"[INFO] Creating user prompt ...\")\n", |
||||||
|
"user_prompt = \"\"\"\n", |
||||||
|
" The text below is an e-mail text for which you are required to \\\n", |
||||||
|
" provide subject lines. Please provide two snarky, two funny, and \\\n", |
||||||
|
" two formal short subject lines for the email text. Each of the six \\\n", |
||||||
|
" subject lines should be presented in both English and French \\\n", |
||||||
|
" languages, making a total of 12 subject lines. Please provide your \\\n", |
||||||
|
" answer in markdown.\\\n", |
||||||
|
" \n", |
||||||
|
" \\n\\n\n", |
||||||
|
" \n", |
||||||
|
" Welcome to arXiv!\n", |
||||||
|
"\n", |
||||||
|
" Thank you for creating an account and joining the arXiv community. We look\n", |
||||||
|
" forward to receiving your contribution.\n", |
||||||
|
"\n", |
||||||
|
" Help Pages\n", |
||||||
|
" An overview on how to navigate and use arXiv can be found here:\n", |
||||||
|
" https://arxiv.org/help\n", |
||||||
|
" https://arxiv.org/about\n", |
||||||
|
"\n", |
||||||
|
" If you would like to know more about the submission process, please go here:\n", |
||||||
|
" https://arxiv.org/help/submit\n", |
||||||
|
"\n", |
||||||
|
" Before Submitting to arXiv\n", |
||||||
|
" The arXiv.org e-print archive is fully automated and processes nearly\n", |
||||||
|
" 1,000 new submissions per day. To help us keep the process running smoothly\n", |
||||||
|
" and efficiently please check your submission carefully for mistakes, typos\n", |
||||||
|
" and layout issues. Once you have submitted your work please check your account\n", |
||||||
|
" frequently for verification messages and other communication from arXiv.\n", |
||||||
|
"\n", |
||||||
|
" Contacting arXiv\n", |
||||||
|
" We have provided extensive help pages to guide you through the process and\n", |
||||||
|
" to answer the most common questions. If you have problems with the submission\n", |
||||||
|
" process please contact us here:\n", |
||||||
|
" https://arxiv.org/help/contact\n", |
||||||
|
" We aim to assist submitters within one business day, but during times of high\n", |
||||||
|
" volume or maintenance work we may be slightly delayed in our response.\n", |
||||||
|
"\n", |
||||||
|
" Thank you for your cooperation.\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"# Step 2: Make messages list\n", |
||||||
|
"print(\"[INFO] Making messages list ...\")\n", |
||||||
|
"messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||||
|
"]\n", |
||||||
|
"\n", |
||||||
|
"# Step 3: Call OpenAI\n", |
||||||
|
"print(\"[INFO] Calling OpenAI ...\")\n", |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"response = openai.chat.completions.create(\n", |
||||||
|
" model=\"gpt-4o-mini\",\n", |
||||||
|
" messages=messages\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
"# Step 4: Print result\n", |
||||||
|
"print(\"[INFO] Print result ...\")\n", |
||||||
|
"display(Markdown(response.choices[0].message.content))\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b0a6676e-fb43-4725-9389-2acd74c13c4e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.12.8" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,530 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## DAY1 LLM Project with GROQ!\n", |
||||||
|
"\n", |
||||||
|
"\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"from groq import Groq\n", |
||||||
|
"\n", |
||||||
|
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "5d899ad6-1428-481b-b308-750308d80442", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"If you are getting error ModuleNotFoundError: No module named 'groq' follow below steps.\n", |
||||||
|
"\n", |
||||||
|
"1. Activate llms enviornment from Anaconda, so that (llms) is showing in your prompt, as this is the environment where the package will get installed.Install pip here. \n", |
||||||
|
"\n", |
||||||
|
"(base) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> conda activate llms\n", |
||||||
|
"(llms) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> pip install groq\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"2. After you install a new package, you'd need to restart the Kernel in jupyter lab for each notebook (Kernel >> Restart Kernel and Clear Values Of All Outputs).\n", |
||||||
|
"\n", |
||||||
|
"You can also run this command in jupyter lab to see whether it's installed:\n", |
||||||
|
"\n", |
||||||
|
"!pip show groq\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "99c0c3c9-fa5e-405e-8453-2a557dc60c09", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!pip show groq" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Connecting to GROQ\n", |
||||||
|
"\n", |
||||||
|
"The next cell is where we load in the environment variables in your `.env` file and connect to GROQ.\n", |
||||||
|
"\n", |
||||||
|
".env file should have below entry\n", |
||||||
|
"\n", |
||||||
|
"GROQ_API_KEY=gsk_xxxxxx\n", |
||||||
|
"\n", |
||||||
|
"GROQ keys can be configired by logging to below link\n", |
||||||
|
"https://console.groq.com/keys\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('GROQ_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"# Check the key\n", |
||||||
|
"\n", |
||||||
|
"if not api_key:\n", |
||||||
|
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||||
|
"elif not api_key.startswith(\"gsk_\"):\n", |
||||||
|
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||||
|
"elif api_key.strip() != api_key:\n", |
||||||
|
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"API key found and looks good so far!\")\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"groq = Groq()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Let's make a quick call to a Frontier model to get started, as a preview!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# To give you a preview -- calling Groq with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", |
||||||
|
"\n", |
||||||
|
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", |
||||||
|
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=[{\"role\":\"user\", \"content\":message}])\n", |
||||||
|
"print(response.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "2aa190e5-cb31-456a-96cc-db109919cd78", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## OK onwards with our first project" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c5e793b2-6775-426a-a139-4848291d0463", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A class to represent a Webpage\n", |
||||||
|
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||||
|
"\n", |
||||||
|
"# Some websites need you to use proper headers when fetching them:\n", |
||||||
|
"headers = {\n", |
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||||
|
"}\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Let's try one out. Change the website and add print statements to follow along.\n", |
||||||
|
"\n", |
||||||
|
"ed = Website(\"https://edwarddonner.com\")\n", |
||||||
|
"print(ed.title)\n", |
||||||
|
"print(ed.text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Types of prompts\n", |
||||||
|
"\n", |
||||||
|
"You may know this already - but if not, you will get very familiar with it!\n", |
||||||
|
"\n", |
||||||
|
"Models like GPT4o have been trained to receive instructions in a particular way.\n", |
||||||
|
"\n", |
||||||
|
"They expect to receive:\n", |
||||||
|
"\n", |
||||||
|
"**A system prompt** that tells them what task they are performing and what tone they should use\n", |
||||||
|
"\n", |
||||||
|
"**A user prompt** -- the conversation starter that they should reply to" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||||
|
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||||
|
"Respond in markdown.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_for(website):\n", |
||||||
|
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||||
|
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||||
|
"please provide a short summary of this website in markdown. \\\n", |
||||||
|
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||||
|
" user_prompt += website.text\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"print(user_prompt_for(ed))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Messages\n", |
||||||
|
"\n", |
||||||
|
"Similar to OPENAI GROQ APIs share this structure:\n", |
||||||
|
"\n", |
||||||
|
"```\n", |
||||||
|
"[\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n", |
||||||
|
"]\n", |
||||||
|
"\n", |
||||||
|
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", |
||||||
|
"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# To give you a preview -- calling Groq with system and user messages:\n", |
||||||
|
"\n", |
||||||
|
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=messages)\n", |
||||||
|
"print(response.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## And now let's build useful messages for LLAMA3.3, using a function" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# See how this function creates exactly the format above\n", |
||||||
|
"\n", |
||||||
|
"def messages_for(website):\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Try this out, and then try for a few more websites\n", |
||||||
|
"\n", |
||||||
|
"messages_for(ed)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Time to bring it together - the API for GROQ is very simple!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# And now: call the GROQ API\n", |
||||||
|
"\n", |
||||||
|
"def summarize(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" response = groq.chat.completions.create(\n", |
||||||
|
" model = \"llama-3.3-70b-versatile\",\n", |
||||||
|
" messages = messages_for(website)\n", |
||||||
|
" )\n", |
||||||
|
" return response.choices[0].message.content" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"summarize(\"https://edwarddonner.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3d926d59-450e-4609-92ba-2d6f244f1342", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function to display this nicely in the Jupyter output, using markdown\n", |
||||||
|
"\n", |
||||||
|
"def display_summary(url):\n", |
||||||
|
" summary = summarize(url)\n", |
||||||
|
" display(Markdown(summary))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3018853a-445f-41ff-9560-d925d1774b2f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://edwarddonner.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Let's try more websites\n", |
||||||
|
"\n", |
||||||
|
"Note that this will only work on websites that can be scraped using this simplistic approach.\n", |
||||||
|
"\n", |
||||||
|
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", |
||||||
|
"\n", |
||||||
|
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", |
||||||
|
"\n", |
||||||
|
"But many websites will work just fine!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "45d83403-a24c-44b5-84ac-961449b4008f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://cnn.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "75e9fd40-b354-4341-991e-863ef2e59db7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://anthropic.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#181;\">Business applications</h2>\n", |
||||||
|
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", |
||||||
|
"\n", |
||||||
|
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>\n", |
||||||
|
"\n", |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n", |
||||||
|
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Step 1: Create your prompts\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"something here\"\n", |
||||||
|
"user_prompt = \"\"\"\n", |
||||||
|
" Lots of text\n", |
||||||
|
" Can be pasted here\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"# Step 2: Make the messages list\n", |
||||||
|
"\n", |
||||||
|
"messages = [] # fill this in\n", |
||||||
|
"\n", |
||||||
|
"# Step 3: Call OpenAI\n", |
||||||
|
"\n", |
||||||
|
"response =\n", |
||||||
|
"\n", |
||||||
|
"# Step 4: print the result\n", |
||||||
|
"\n", |
||||||
|
"print(" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## An extra exercise for those who enjoy web scraping\n", |
||||||
|
"\n", |
||||||
|
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Sharing your code\n", |
||||||
|
"\n", |
||||||
|
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", |
||||||
|
"\n", |
||||||
|
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", |
||||||
|
"\n", |
||||||
|
"Here are good instructions courtesy of an AI friend: \n", |
||||||
|
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,530 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## DAY1 LLM Project with GROQ!\n", |
||||||
|
"\n", |
||||||
|
"\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"from groq import Groq\n", |
||||||
|
"\n", |
||||||
|
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "5d899ad6-1428-481b-b308-750308d80442", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"If you are getting error ModuleNotFoundError: No module named 'groq' follow below steps.\n", |
||||||
|
"\n", |
||||||
|
"1. Activate llms enviornment from Anaconda, so that (llms) is showing in your prompt, as this is the environment where the package will get installed.Install pip here. \n", |
||||||
|
"\n", |
||||||
|
"(base) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> conda activate llms\n", |
||||||
|
"(llms) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> pip install groq\n", |
||||||
|
"\n", |
||||||
|
"\n", |
||||||
|
"2. After you install a new package, you'd need to restart the Kernel in jupyter lab for each notebook (Kernel >> Restart Kernel and Clear Values Of All Outputs).\n", |
||||||
|
"\n", |
||||||
|
"You can also run this command in jupyter lab to see whether it's installed:\n", |
||||||
|
"\n", |
||||||
|
"!pip show groq\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "99c0c3c9-fa5e-405e-8453-2a557dc60c09", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!pip show groq" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Connecting to GROQ\n", |
||||||
|
"\n", |
||||||
|
"The next cell is where we load in the environment variables in your `.env` file and connect to GROQ.\n", |
||||||
|
"\n", |
||||||
|
".env file should have below entry\n", |
||||||
|
"\n", |
||||||
|
"GROQ_API_KEY=gsk_xxxxxx\n", |
||||||
|
"\n", |
||||||
|
"GROQ keys can be configired by logging to below link\n", |
||||||
|
"https://console.groq.com/keys\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('GROQ_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"# Check the key\n", |
||||||
|
"\n", |
||||||
|
"if not api_key:\n", |
||||||
|
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||||
|
"elif not api_key.startswith(\"gsk_\"):\n", |
||||||
|
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||||
|
"elif api_key.strip() != api_key:\n", |
||||||
|
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"API key found and looks good so far!\")\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"groq = Groq()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Let's make a quick call to a Frontier model to get started, as a preview!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# To give you a preview -- calling Groq with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", |
||||||
|
"\n", |
||||||
|
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", |
||||||
|
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=[{\"role\":\"user\", \"content\":message}])\n", |
||||||
|
"print(response.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "2aa190e5-cb31-456a-96cc-db109919cd78", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## OK onwards with our first project" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c5e793b2-6775-426a-a139-4848291d0463", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A class to represent a Webpage\n", |
||||||
|
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||||
|
"\n", |
||||||
|
"# Some websites need you to use proper headers when fetching them:\n", |
||||||
|
"headers = {\n", |
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||||
|
"}\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Let's try one out. Change the website and add print statements to follow along.\n", |
||||||
|
"\n", |
||||||
|
"ed = Website(\"https://edwarddonner.com\")\n", |
||||||
|
"print(ed.title)\n", |
||||||
|
"print(ed.text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Types of prompts\n", |
||||||
|
"\n", |
||||||
|
"You may know this already - but if not, you will get very familiar with it!\n", |
||||||
|
"\n", |
||||||
|
"Models like GPT4o have been trained to receive instructions in a particular way.\n", |
||||||
|
"\n", |
||||||
|
"They expect to receive:\n", |
||||||
|
"\n", |
||||||
|
"**A system prompt** that tells them what task they are performing and what tone they should use\n", |
||||||
|
"\n", |
||||||
|
"**A user prompt** -- the conversation starter that they should reply to" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||||
|
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||||
|
"Respond in markdown.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_for(website):\n", |
||||||
|
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||||
|
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||||
|
"please provide a short summary of this website in markdown. \\\n", |
||||||
|
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||||
|
" user_prompt += website.text\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"print(user_prompt_for(ed))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Messages\n", |
||||||
|
"\n", |
||||||
|
"Similar to OPENAI GROQ APIs share this structure:\n", |
||||||
|
"\n", |
||||||
|
"```\n", |
||||||
|
"[\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n", |
||||||
|
"]\n", |
||||||
|
"\n", |
||||||
|
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n", |
||||||
|
"]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# To give you a preview -- calling Groq with system and user messages:\n", |
||||||
|
"\n", |
||||||
|
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=messages)\n", |
||||||
|
"print(response.choices[0].message.content)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## And now let's build useful messages for LLAMA3.3, using a function" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# See how this function creates exactly the format above\n", |
||||||
|
"\n", |
||||||
|
"def messages_for(website):\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Try this out, and then try for a few more websites\n", |
||||||
|
"\n", |
||||||
|
"messages_for(ed)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Time to bring it together - the API for GROQ is very simple!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# And now: call the GROQ API\n", |
||||||
|
"\n", |
||||||
|
"def summarize(url):\n", |
||||||
|
" website = Website(url)\n", |
||||||
|
" response = groq.chat.completions.create(\n", |
||||||
|
" model = \"llama-3.3-70b-versatile\",\n", |
||||||
|
" messages = messages_for(website)\n", |
||||||
|
" )\n", |
||||||
|
" return response.choices[0].message.content" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"summarize(\"https://edwarddonner.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3d926d59-450e-4609-92ba-2d6f244f1342", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function to display this nicely in the Jupyter output, using markdown\n", |
||||||
|
"\n", |
||||||
|
"def display_summary(url):\n", |
||||||
|
" summary = summarize(url)\n", |
||||||
|
" display(Markdown(summary))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3018853a-445f-41ff-9560-d925d1774b2f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://edwarddonner.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Let's try more websites\n", |
||||||
|
"\n", |
||||||
|
"Note that this will only work on websites that can be scraped using this simplistic approach.\n", |
||||||
|
"\n", |
||||||
|
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n", |
||||||
|
"\n", |
||||||
|
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n", |
||||||
|
"\n", |
||||||
|
"But many websites will work just fine!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "45d83403-a24c-44b5-84ac-961449b4008f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://cnn.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "75e9fd40-b354-4341-991e-863ef2e59db7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"display_summary(\"https://anthropic.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#181;\">Business applications</h2>\n", |
||||||
|
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n", |
||||||
|
"\n", |
||||||
|
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>\n", |
||||||
|
"\n", |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n", |
||||||
|
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Step 1: Create your prompts\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"something here\"\n", |
||||||
|
"user_prompt = \"\"\"\n", |
||||||
|
" Lots of text\n", |
||||||
|
" Can be pasted here\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"# Step 2: Make the messages list\n", |
||||||
|
"\n", |
||||||
|
"messages = [] # fill this in\n", |
||||||
|
"\n", |
||||||
|
"# Step 3: Call OpenAI\n", |
||||||
|
"\n", |
||||||
|
"response =\n", |
||||||
|
"\n", |
||||||
|
"# Step 4: print the result\n", |
||||||
|
"\n", |
||||||
|
"print(" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## An extra exercise for those who enjoy web scraping\n", |
||||||
|
"\n", |
||||||
|
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Sharing your code\n", |
||||||
|
"\n", |
||||||
|
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n", |
||||||
|
"\n", |
||||||
|
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n", |
||||||
|
"\n", |
||||||
|
"Here are good instructions courtesy of an AI friend: \n", |
||||||
|
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,233 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "1b8f7ac7-7089-427a-8f63-57211da7e691", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Summarizing Research Papers" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "641d5c00-ff09-4697-9c87-5de5df1469f8", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"\n", |
||||||
|
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "1a6a2864-fd9d-43e2-b0ca-1476c0153077", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"# Check the key\n", |
||||||
|
"\n", |
||||||
|
"if not api_key:\n", |
||||||
|
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||||
|
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||||
|
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||||
|
"elif api_key.strip() != api_key:\n", |
||||||
|
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"API key found and looks good so far!\")\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "340e3166-5aa7-4bcf-9cf0-e2fc776dc322", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"openai = OpenAI()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "73198fb7-581f-42ac-99a6-76c56c86248d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A class to represent a Webpage\n", |
||||||
|
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||||
|
"\n", |
||||||
|
"# Some websites need you to use proper headers when fetching them:\n", |
||||||
|
"headers = {\n", |
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||||
|
"}\n", |
||||||
|
"\n", |
||||||
|
"class Paper:\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3b39c3ad-d238-418e-9e6a-55a4fd717ebc", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Insert Paper URL\n", |
||||||
|
"res = Paper(\" \")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "83bc1eec-4187-4c6c-b188-3f72564351f1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"system_prompt = \"\"\"You are a research paper summarizer. You take the url of the research paper and extract the following:\n", |
||||||
|
"1) Title and Author of the research paper.\n", |
||||||
|
"2) Year it was published it\n", |
||||||
|
"3) Objective or aim of the research to specify why the research was conducted\n", |
||||||
|
"4) Background or Introduction to explain the need to conduct this research or any topics the readers must have knowledge about\n", |
||||||
|
"5) Type of research/study/experiment to explain what kind of research it is.\n", |
||||||
|
"6) Methods or methodology to explain what the researchers did to conduct the research\n", |
||||||
|
"7) Results and key findings to explain what the researchers found\n", |
||||||
|
"8) Conclusion tells about the conclusions that can be drawn from this research including limitations and future direction\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4aba1b51-9a72-4325-8c86-3968b9d3172e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_for(paper):\n", |
||||||
|
" user_prompt = f\"You are looking at a website titled {paper.title}\"\n", |
||||||
|
" user_prompt += \"\\nThe contents of this paper is as follows; \\\n", |
||||||
|
"please provide a short summary of this paper in markdown. \\\n", |
||||||
|
"If it includes additional headings, then summarize these too.\\n\\n\"\n", |
||||||
|
" user_prompt += paper.text\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "659cb3c4-8a02-493d-abe7-20da9219e358", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# See how this function creates exactly the format above\n", |
||||||
|
"def messages_for(paper):\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(paper)}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "08ea1193-1bbb-40de-ba64-d02ffe109372", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"messages_for(res)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e07d00e7-1b87-4ca8-a69d-4a206e34a2b2", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# And now: call the OpenAI API. You will get very familiar with this!\n", |
||||||
|
"\n", |
||||||
|
"def summarize(url):\n", |
||||||
|
" paper = Paper(url)\n", |
||||||
|
" response = openai.chat.completions.create(\n", |
||||||
|
" model = \"gpt-4o-mini\",\n", |
||||||
|
" messages = messages_for(paper)\n", |
||||||
|
" )\n", |
||||||
|
" return response.choices[0].message.content" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "5c12df95-1700-47ee-891b-96b0a7227bdd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function to display this nicely in the Jupyter output, using markdown\n", |
||||||
|
"\n", |
||||||
|
"def display_summary(url):\n", |
||||||
|
" summary = summarize(url)\n", |
||||||
|
" display(Markdown(summary))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "05cff05f-2b74-44a4-9dbd-57c08f8f56cb", |
||||||
|
"metadata": { |
||||||
|
"scrolled": true |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Insert Paper URL in the quotes below\n", |
||||||
|
"display_summary(\" \")" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,224 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import requests\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"\n", |
||||||
|
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Connecting to OpenAI\n", |
||||||
|
"\n", |
||||||
|
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n", |
||||||
|
"\n", |
||||||
|
"## Troubleshooting if you have problems:\n", |
||||||
|
"\n", |
||||||
|
"Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n", |
||||||
|
"\n", |
||||||
|
"If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n", |
||||||
|
"\n", |
||||||
|
"Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n", |
||||||
|
"\n", |
||||||
|
"Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load environment variables in a file called .env\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv()\n", |
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"# Check the key\n", |
||||||
|
"\n", |
||||||
|
"if not api_key:\n", |
||||||
|
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||||
|
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||||
|
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||||
|
"elif api_key.strip() != api_key:\n", |
||||||
|
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"API key found and looks good so far!\")\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"\n", |
||||||
|
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n", |
||||||
|
"# If it STILL doesn't work (horrors!) then please see the troubleshooting notebook, or try the below line instead:\n", |
||||||
|
"# openai = OpenAI(api_key=\"your-key-here-starting-sk-proj-\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||||
|
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||||
|
"Respond in markdown.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_for(website):\n", |
||||||
|
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||||
|
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||||
|
"please provide a short summary of this website in markdown. \\\n", |
||||||
|
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||||
|
" user_prompt += website.text\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## An extra exercise for those who enjoy web scraping\n", |
||||||
|
"\n", |
||||||
|
"You may notice that if you try the course example with \"https://openai.com\" - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. Below an example created with Playwright." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "dca2768e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"! pip install playwright\n", |
||||||
|
"! playwright install" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "682eff74-55c4-4d4b-b267-703edbc293c7", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import asyncio\n", |
||||||
|
"from playwright.async_api import async_playwright\n", |
||||||
|
"import nest_asyncio\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"import time\n", |
||||||
|
"\n", |
||||||
|
"nest_asyncio.apply()\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
" title: str\n", |
||||||
|
" text: str\n", |
||||||
|
" url: str\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" self.url = url\n", |
||||||
|
" \n", |
||||||
|
" async def run(self, playwright):\n", |
||||||
|
" browser = await playwright.chromium.launch(headless=False)\n", |
||||||
|
" page = await browser.new_page()\n", |
||||||
|
" await page.goto(self.url)\n", |
||||||
|
" await page.wait_for_load_state('load')\n", |
||||||
|
" \n", |
||||||
|
" # Extract data from the page\n", |
||||||
|
" self.title = await page.title()\n", |
||||||
|
" text = await page.content()\n", |
||||||
|
" await browser.close()\n", |
||||||
|
" \n", |
||||||
|
" soup = BeautifulSoup(text, 'html.parser')\n", |
||||||
|
" for irrelevant in soup([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n", |
||||||
|
" \n", |
||||||
|
" async def main(self):\n", |
||||||
|
" async with async_playwright() as playwright:\n", |
||||||
|
" await self.run(playwright) \n", |
||||||
|
" \n", |
||||||
|
"def messages_for(website):\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||||
|
" ]\n", |
||||||
|
"\n", |
||||||
|
"if __name__ == \"__main__\":\n", |
||||||
|
" site = Website('https://openai.com')\n", |
||||||
|
" asyncio.run(site.main())\n", |
||||||
|
" response = openai.chat.completions.create(\n", |
||||||
|
" model = \"gpt-4o-mini\",\n", |
||||||
|
" messages = messages_for(site)\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" web_summary = response.choices[0].message.content\n", |
||||||
|
" display(Markdown(web_summary))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "69218dec-749c-412d-84a0-40a10fd80c73", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,170 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 8, |
||||||
|
"id": "6ba7c60a-c338-49a1-b1ba-46b7c20e33cb", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import openai\n", |
||||||
|
"import os\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"from IPython.display import Markdown, display" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 2, |
||||||
|
"id": "4acb4062-17b2-43b1-8b74-aefaa9599463", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"name": "stdout", |
||||||
|
"output_type": "stream", |
||||||
|
"text": [ |
||||||
|
"API key found and looks good so far!\n" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"# Check the key\n", |
||||||
|
"\n", |
||||||
|
"if not api_key:\n", |
||||||
|
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", |
||||||
|
"elif not api_key.startswith(\"sk-proj-\"):\n", |
||||||
|
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", |
||||||
|
"elif api_key.strip() != api_key:\n", |
||||||
|
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"API key found and looks good so far!\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 5, |
||||||
|
"id": "56f011b2-b759-4ad6-9d01-870fbcb8ade1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def generate_quiz(topic):\n", |
||||||
|
" prompt = f\"Generate a multiple-choice quiz with 5 questions on the topic: {topic}. Include the correct answer for each question.\"\n", |
||||||
|
" \n", |
||||||
|
" messages = [\n", |
||||||
|
" {\"role\": \"system\", \"content\": \"You are a quiz generator. Create a multiple-choice quiz with 5 questions and provide the correct answers.Respond in markdown.\"},\n", |
||||||
|
" {\"role\": \"user\", \"content\": prompt}\n", |
||||||
|
" ]\n", |
||||||
|
" \n", |
||||||
|
" response = openai.chat.completions.create(\n", |
||||||
|
" model=\"gpt-4\",\n", |
||||||
|
" messages=messages,\n", |
||||||
|
" max_tokens=300\n", |
||||||
|
" )\n", |
||||||
|
" \n", |
||||||
|
" return response.choices[0].message.content" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 10, |
||||||
|
"id": "1cf977e7-b04b-49e7-8b0a-d0ab2800c234", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [ |
||||||
|
{ |
||||||
|
"data": { |
||||||
|
"text/markdown": [ |
||||||
|
"**Question 1:** What is Python?\n", |
||||||
|
"\n", |
||||||
|
"**Choice A:** A type of snake\n", |
||||||
|
"**Choice B:** A medical term\n", |
||||||
|
"**Choice C:** A drilling tool\n", |
||||||
|
"**Choice D:** A high-level programming language\n", |
||||||
|
"\n", |
||||||
|
"Correct Answer: **Choice D:** A high-level programming language\n", |
||||||
|
"\n", |
||||||
|
"**Question 2:** In Python, what keyword is used to create a function?\n", |
||||||
|
"\n", |
||||||
|
"**Choice A:** func\n", |
||||||
|
"**Choice B:** def\n", |
||||||
|
"**Choice C:** function\n", |
||||||
|
"**Choice D:** create\n", |
||||||
|
"\n", |
||||||
|
"Correct Answer: **Choice B:** def\n", |
||||||
|
"\n", |
||||||
|
"**Question 3:** What is the correct syntax to output \"Hello World\" in Python?\n", |
||||||
|
"\n", |
||||||
|
"**Choice A:** printf(\"Hello World\")\n", |
||||||
|
"**Choice B:** println(\"Hello World\")\n", |
||||||
|
"**Choice C:** echo(\"Hello World\")\n", |
||||||
|
"**Choice D:** print(\"Hello World\")\n", |
||||||
|
"\n", |
||||||
|
"Correct Answer: **Choice D:** print(\"Hello World\")\n", |
||||||
|
"\n", |
||||||
|
"**Question 4:** How would you create a variable \"x\" that equals 5 in Python?\n", |
||||||
|
"\n", |
||||||
|
"**Choice A:** var x = 5\n", |
||||||
|
"**Choice B:** x := 5\n", |
||||||
|
"**Choice C:** x = 5\n", |
||||||
|
"**Choice D:** x : 5\n", |
||||||
|
"\n", |
||||||
|
"Correct Answer: **Choice C:** x = 5\n", |
||||||
|
"\n", |
||||||
|
"**Question 5:** How do you create a comment in Python?\n", |
||||||
|
"\n", |
||||||
|
"**Choice A:** // This is a comment\n", |
||||||
|
"**Choice B:** # This is a comment\n", |
||||||
|
"**Choice C:** <!-- This is a comment -->\n", |
||||||
|
"**Choice D:** /* This is a comment */\n", |
||||||
|
"\n", |
||||||
|
"Correct Answer" |
||||||
|
], |
||||||
|
"text/plain": [ |
||||||
|
"<IPython.core.display.Markdown object>" |
||||||
|
] |
||||||
|
}, |
||||||
|
"metadata": {}, |
||||||
|
"output_type": "display_data" |
||||||
|
} |
||||||
|
], |
||||||
|
"source": [ |
||||||
|
"# Example usage\n", |
||||||
|
"topic = \"Python programming\"\n", |
||||||
|
"quiz = generate_quiz(topic)\n", |
||||||
|
"display(Markdown(quiz))" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "70990d7c-6061-43c6-b3c9-9146a3c51c3e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,192 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "e3ce0a59-fbfb-4377-85db-f62f95039200", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Day2 EXERCISE - Summarization using Ollama" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"import requests\n", |
||||||
|
"from bs4 import BeautifulSoup\n", |
||||||
|
"from IPython.display import Markdown, display" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "29ddd15d-a3c5-4f4e-a678-873f56162724", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Constants\n", |
||||||
|
"\n", |
||||||
|
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||||
|
"HEADERS = {\"Content-Type\": \"application/json\"}\n", |
||||||
|
"MODEL = \"llama3.2\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "cb5c0f84-4e4d-4f87-b492-e09d0333a638", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A class to represent a Webpage\n", |
||||||
|
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", |
||||||
|
"\n", |
||||||
|
"# Some websites need you to use proper headers when fetching them:\n", |
||||||
|
"headers = {\n", |
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", |
||||||
|
"}\n", |
||||||
|
"\n", |
||||||
|
"class Website:\n", |
||||||
|
"\n", |
||||||
|
" def __init__(self, url):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" self.url = url\n", |
||||||
|
" response = requests.get(url, headers=headers)\n", |
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n", |
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n", |
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", |
||||||
|
" irrelevant.decompose()\n", |
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "23457b52-c85b-4dc1-b946-6f1461dc0675", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"\n", |
||||||
|
"ed = Website(\"https://edwarddonner.com\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "bed206ed-43c1-4f68-ad01-a738b3b4648d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", |
||||||
|
"and provides a short summary, ignoring text that might be navigation related. \\\n", |
||||||
|
"Respond in markdown.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e558f381-614a-461f-83bc-e5bdc99460df", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# A function that writes a User Prompt that asks for summaries of websites:\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_for(website):\n", |
||||||
|
" user_prompt = f\"You are looking at a website titled {website.title}\"\n", |
||||||
|
" user_prompt += \"\\nThe contents of this website is as follows; \\\n", |
||||||
|
"please provide a short summary of this website in markdown. \\\n", |
||||||
|
"If it includes news or announcements, then summarize these too.\\n\\n\"\n", |
||||||
|
" user_prompt += website.text\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e5ba638d-aeb9-441e-a62a-8e8027ad8439", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# See how this function creates exactly the format above\n", |
||||||
|
"\n", |
||||||
|
"def messages_for(website):\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e85ca2ec-3e46-4b8f-9c2f-66e7d20138fa", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#website search\n", |
||||||
|
"\n", |
||||||
|
"ed = Website(\"https://edwarddonner.com\")\n", |
||||||
|
"messages=messages_for(ed)\n", |
||||||
|
"\n", |
||||||
|
"payload = {\n", |
||||||
|
" \"model\": MODEL,\n", |
||||||
|
" \"messages\": messages,\n", |
||||||
|
" \"stream\": False\n", |
||||||
|
" }" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7745b9c4-57dc-4867-9180-61fa5db55eb8", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import ollama\n", |
||||||
|
"\n", |
||||||
|
"response = ollama.chat(model=MODEL, messages=messages)\n", |
||||||
|
"print(response['message']['content'])" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "402d5686-4e76-4110-b65a-b3906c35c0a4", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
Binary file not shown.
@ -0,0 +1,308 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "it1JLoxrSqO1", |
||||||
|
"metadata": { |
||||||
|
"id": "it1JLoxrSqO1" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!pip install openai python-docx python-dotenv" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "950a084a-7f92-4669-af62-f07cb121da56", |
||||||
|
"metadata": { |
||||||
|
"id": "950a084a-7f92-4669-af62-f07cb121da56" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import os\n", |
||||||
|
"import json\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from IPython.display import Markdown, display, update_display\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"from docx import Document" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d", |
||||||
|
"metadata": { |
||||||
|
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"class ReqDoc:\n", |
||||||
|
" def __init__(self, file_path):\n", |
||||||
|
" self.file_path = file_path\n", |
||||||
|
"\n", |
||||||
|
" def extract(self):\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" Reads the content of a .docx file and returns the paragraphs as a list of strings.\n", |
||||||
|
" \"\"\"\n", |
||||||
|
" try:\n", |
||||||
|
" # Check if the file exists\n", |
||||||
|
" if not os.path.exists(self.file_path):\n", |
||||||
|
" raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n", |
||||||
|
"\n", |
||||||
|
" # Attempt to open and read the document\n", |
||||||
|
" doc = Document(self.file_path)\n", |
||||||
|
" text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n", |
||||||
|
" return text\n", |
||||||
|
"\n", |
||||||
|
" except FileNotFoundError as fnf_error:\n", |
||||||
|
" print(fnf_error)\n", |
||||||
|
" return None\n", |
||||||
|
" except Exception as e:\n", |
||||||
|
" print(f\"An error occurred: {e}\")\n", |
||||||
|
" return None\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9", |
||||||
|
"metadata": { |
||||||
|
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Initialize and constants\n", |
||||||
|
"load_dotenv(override=True)\n", |
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"\n", |
||||||
|
"if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n", |
||||||
|
" print(\"API key looks good!\")\n", |
||||||
|
"else:\n", |
||||||
|
" print(\"There might be a problem with your API key. Please check!\")\n", |
||||||
|
" \n", |
||||||
|
"MODEL = 'gpt-4o-mini'\n", |
||||||
|
"openai = OpenAI()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb", |
||||||
|
"metadata": { |
||||||
|
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Set up system prompt for extracting just the requirements from the document\n", |
||||||
|
"\n", |
||||||
|
"req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n", |
||||||
|
"You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n", |
||||||
|
"functional or non-functional and list them all.\\n\"\n", |
||||||
|
"req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n", |
||||||
|
"Do not make up your own requirements. \\n\"\n", |
||||||
|
"req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n", |
||||||
|
"req_doc_system_prompt += \"\"\"\n", |
||||||
|
"{\n", |
||||||
|
" \"requirements\": [\n", |
||||||
|
" {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
||||||
|
" {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n", |
||||||
|
" {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n", |
||||||
|
" {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n", |
||||||
|
" ]\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895", |
||||||
|
"metadata": { |
||||||
|
"id": "20460e45-c1b7-4dc4-ab07-932235c19895" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n", |
||||||
|
"#respond in JSON format.\n", |
||||||
|
"\n", |
||||||
|
"def req_doc_user_prompt(doc):\n", |
||||||
|
" user_prompt = \"Here is the contents from a requirement document.\\n\"\n", |
||||||
|
" user_prompt += f\"{doc.extract()} \\n\"\n", |
||||||
|
" user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n", |
||||||
|
"paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n", |
||||||
|
"You must respond in a JSON format\"\n", |
||||||
|
" user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n", |
||||||
|
" user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n", |
||||||
|
" return user_prompt\n", |
||||||
|
"\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891", |
||||||
|
"metadata": { |
||||||
|
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n", |
||||||
|
"\n", |
||||||
|
"def get_requirements(doc):\n", |
||||||
|
" reqdoc = ReqDoc(doc)\n", |
||||||
|
" response = openai.chat.completions.create(\n", |
||||||
|
" model=MODEL,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": req_doc_system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n", |
||||||
|
" ],\n", |
||||||
|
" response_format={\"type\": \"json_object\"}\n", |
||||||
|
" )\n", |
||||||
|
" result = response.choices[0].message.content\n", |
||||||
|
" return json.loads(result)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e", |
||||||
|
"metadata": { |
||||||
|
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Uncomment and run this if you want to see the extracted requriements in json format.\n", |
||||||
|
"#get_requirements(\"reqdoc.docx\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93", |
||||||
|
"metadata": { |
||||||
|
"id": "1fe8618c-1dfe-4030-bad8-405731294c93" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"### Next, we will make another call to gpt-4o-mini" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b", |
||||||
|
"metadata": { |
||||||
|
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#Set up system prompt to ask for test cases in table format\n", |
||||||
|
"\n", |
||||||
|
"system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n", |
||||||
|
"You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n", |
||||||
|
"The table will contain the following columns. \\\n", |
||||||
|
"1.S No\\\n", |
||||||
|
"2.Requirement No\\\n", |
||||||
|
"3.Requirement Description\\\n", |
||||||
|
"4.Test Case ID\\\n", |
||||||
|
"5.Test case summary\\\n", |
||||||
|
"6.Test case description\\\n", |
||||||
|
"7.Success criteria \\n\"\n", |
||||||
|
"system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5", |
||||||
|
"metadata": { |
||||||
|
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n", |
||||||
|
"\n", |
||||||
|
"def get_testcase_user_prompt(reqdoc):\n", |
||||||
|
" user_prompt = \"You are looking at the following list of requirements. \\n\"\n", |
||||||
|
" user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n", |
||||||
|
" user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n", |
||||||
|
" user_prompt += user_prompt[:25000]\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "59d859e2-e5bb-4bd6-ab59-5ad967d5d2e0", |
||||||
|
"metadata": { |
||||||
|
"id": "59d859e2-e5bb-4bd6-ab59-5ad967d5d2e0" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n", |
||||||
|
"def create_testcase_doc(reqdoc):\n", |
||||||
|
" stream = openai.chat.completions.create(\n", |
||||||
|
" model=MODEL,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": get_testcase_user_prompt(reqdoc)}\n", |
||||||
|
" ],\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" display_handle = display(Markdown(\"\"), display_id=True)\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" response += chunk.choices[0].delta.content or ''\n", |
||||||
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", |
||||||
|
" update_display(Markdown(response), display_id=display_handle.display_id)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0612d662-7047-4620-aa1c-2eb1c3d715cb", |
||||||
|
"metadata": { |
||||||
|
"id": "0612d662-7047-4620-aa1c-2eb1c3d715cb" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"#The final piece of code. Provide the uploaded requirements filename below.\n", |
||||||
|
"file_path = r\"reqdoc.docx\"\n", |
||||||
|
"#print(file_path)\n", |
||||||
|
"create_testcase_doc(file_path)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "82ae4371-22dd-4f2a-97c9-a70e0232a0aa", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"colab": { |
||||||
|
"provenance": [] |
||||||
|
}, |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.13.1" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,45 @@ |
|||||||
|
import ollama, os |
||||||
|
from openai import OpenAI |
||||||
|
from dotenv import load_dotenv |
||||||
|
from IPython.display import Markdown, display |
||||||
|
|
||||||
|
load_dotenv() |
||||||
|
|
||||||
|
open_key = os.getenv("OPENAI_API_KEY") |
||||||
|
|
||||||
|
OPEN_MODEL = "gpt-4-turbo" |
||||||
|
ollama_model = "llama3.2" |
||||||
|
openai = OpenAI() |
||||||
|
|
||||||
|
system_prompt = "You are an assistant that focuses on the reason for each code, analysing and interpreting what the code does and how it could be improved, \ |
||||||
|
Give your answer in markdown down with two different topics namely: Explanation and Code Improvement. However if you think there is no possible improvement \ |
||||||
|
to said code, simply state 'no possible improvement '" |
||||||
|
|
||||||
|
def user_prompt(): |
||||||
|
custom_message = input("Write your prompt message: ") |
||||||
|
return custom_message |
||||||
|
|
||||||
|
def explain(): |
||||||
|
response = openai.chat.completions.create(model=OPEN_MODEL, |
||||||
|
messages = [ |
||||||
|
{"role":"system", "content":system_prompt}, |
||||||
|
{"role": "user", "content":user_prompt()} |
||||||
|
]) |
||||||
|
result = response.choices[0].message.content |
||||||
|
display(Markdown(result)) |
||||||
|
|
||||||
|
# explain() run this to get the openai output with peronalized input |
||||||
|
|
||||||
|
#With ollama |
||||||
|
|
||||||
|
ollama_api = "https://localhost:11434/api/chat" |
||||||
|
|
||||||
|
def explainer_with_ollama(): |
||||||
|
response = ollama.chat(model=ollama_model, messages=[ |
||||||
|
{"role":"system", "content":system_prompt}, |
||||||
|
{"role":"user", "content":user_prompt()} |
||||||
|
]) |
||||||
|
result = response["message"]["content"] |
||||||
|
display(Markdown(result)) |
||||||
|
|
||||||
|
#explainer_with_ollama() run for ollama output with same personalized input |
@ -0,0 +1,182 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Import Required Libraries" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 49, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import os\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import gradio as gr" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Load Environment Variables" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 50, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"load_dotenv()\n", |
||||||
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||||
|
"if not openai_api_key:\n", |
||||||
|
" print(\"OpenAI API Key not set\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Initialize OpenAI Client and Define Model" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 51, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"MODEL = 'gpt-4o-mini'" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Define the System Message" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 52, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"system_message = (\n", |
||||||
|
" \"You are a helpful assistant, trying your best to answer every question as accurately as possible. \"\n", |
||||||
|
" \"You are also free to say you do not know if you do not have the information to answer a question. \"\n", |
||||||
|
" \"You always respond in markdown.\"\n", |
||||||
|
")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Define the Chat Function" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 53, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def chat(message, history):\n", |
||||||
|
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||||
|
"\n", |
||||||
|
" stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", |
||||||
|
"\n", |
||||||
|
" response = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" response += chunk.choices[0].delta.content or ''\n", |
||||||
|
" yield response" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Create the Chat Interface" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 54, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"demo = gr.ChatInterface(\n", |
||||||
|
" fn=chat,\n", |
||||||
|
" title=\"AI chatbot\",\n", |
||||||
|
" description=\"Please login to use the chat interface\",\n", |
||||||
|
" type='messages',\n", |
||||||
|
")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"auth_data is a list of tuples, where each tuple contains a username and password." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": 55, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"auth_data = [(\"user_1\", \"password_1\"), (\"user_2\", \"password_2\"), (\"user_3\", \"password_3\")]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"Add Authentication and Launch\n", |
||||||
|
"\n", |
||||||
|
"auth_message is the message displayed to users before accessing the interface." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"demo.launch(share=True,\n", |
||||||
|
" auth=auth_data,\n", |
||||||
|
" auth_message=\"Please enter your credentials to access the chat interface\",\n", |
||||||
|
")" |
||||||
|
] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "llms", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 2 |
||||||
|
} |
@ -0,0 +1,908 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# Code Generator\n", |
||||||
|
"\n", |
||||||
|
"The requirement: use an Open Source model to generate high performance C++ code from Python code\n", |
||||||
|
"\n", |
||||||
|
"To replicate this, you'll need to set up a HuggingFace endpoint as I do in the video. It's simple to do, and it's quite satisfying to see the results!\n", |
||||||
|
"\n", |
||||||
|
"It's also an important part of your learning; this is the first example of deploying an open source model to be behind an API. We'll return to this in Week 8, but this should plant a seed in your mind for what's involved in moving open source models into production." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "22e1567b-33fd-49e7-866e-4b635d15715a", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h1 style=\"color:#900;\">Important - Pause Endpoints when not in use</h1>\n", |
||||||
|
" <span style=\"color:#900;\">\n", |
||||||
|
" If you do decide to use HuggingFace endpoints for this project, you should stop or pause the endpoints when you are done to avoid accruing unnecessary running cost. The costs are very low as long as you only run the endpoint when you're using it. Navigate to the HuggingFace endpoint UI <a href=\"https://ui.endpoints.huggingface.co/\">here,</a> open your endpoint, and click Pause to put it on pause so you no longer pay for it. \n", |
||||||
|
"Many thanks to student John L. for raising this.\n", |
||||||
|
"<br/><br/>\n", |
||||||
|
"In week 8 we will use Modal instead of HuggingFace endpoints; with Modal you only pay for the time that you use it and you should get free credits.\n", |
||||||
|
" </span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"\n", |
||||||
|
"import os\n", |
||||||
|
"import io\n", |
||||||
|
"import sys\n", |
||||||
|
"import json\n", |
||||||
|
"import requests\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"import google.generativeai as genai\n", |
||||||
|
"import anthropic\n", |
||||||
|
"from IPython.display import Markdown, display, update_display\n", |
||||||
|
"import gradio as gr\n", |
||||||
|
"import subprocess" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4f672e1c-87e9-4865-b760-370fa605e614", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# environment\n", |
||||||
|
"\n", |
||||||
|
"load_dotenv()\n", |
||||||
|
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", |
||||||
|
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", |
||||||
|
"os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')\n", |
||||||
|
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# initialize\n", |
||||||
|
"\n", |
||||||
|
"openai = OpenAI()\n", |
||||||
|
"claude = anthropic.Anthropic()\n", |
||||||
|
"OPENAI_MODEL = \"gpt-4o\"\n", |
||||||
|
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", |
||||||
|
"GEMINI_MODEL = 'gemini-1.5-pro'" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "6896636f-923e-4a2c-9d6c-fac07828a201", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n", |
||||||
|
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n", |
||||||
|
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def user_prompt_for(python):\n", |
||||||
|
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n", |
||||||
|
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n", |
||||||
|
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n", |
||||||
|
" user_prompt += python\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def messages_for(python):\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_message},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# write to a file called optimized.cpp\n", |
||||||
|
"\n", |
||||||
|
"def write_output(cpp):\n", |
||||||
|
" code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n", |
||||||
|
" with open(\"optimized.cpp\", \"w\") as f:\n", |
||||||
|
" f.write(code)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def optimize_gpt(python): \n", |
||||||
|
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" fragment = chunk.choices[0].delta.content or \"\"\n", |
||||||
|
" reply += fragment\n", |
||||||
|
" print(fragment, end='', flush=True)\n", |
||||||
|
" write_output(reply)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def optimize_claude(python):\n", |
||||||
|
" result = claude.messages.stream(\n", |
||||||
|
" model=CLAUDE_MODEL,\n", |
||||||
|
" max_tokens=2000,\n", |
||||||
|
" system=system_message,\n", |
||||||
|
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", |
||||||
|
" )\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" with result as stream:\n", |
||||||
|
" for text in stream.text_stream:\n", |
||||||
|
" reply += text\n", |
||||||
|
" print(text, end=\"\", flush=True)\n", |
||||||
|
" write_output(reply)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "3625fcd6-209f-481c-a745-dcbcf5e44bc1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def optimize_gemini(python):\n", |
||||||
|
" gemini = genai.GenerativeModel(\n", |
||||||
|
" model_name = GEMINI_MODEL,\n", |
||||||
|
" system_instruction=system_message\n", |
||||||
|
" )\n", |
||||||
|
" response = gemini.generate_content(\n", |
||||||
|
" user_prompt_for(python),\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" for chunk in response:\n", |
||||||
|
" reply += chunk.text\n", |
||||||
|
" print(chunk.text, end=\"\", flush=True)\n", |
||||||
|
" write_output(reply)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a1cbb778-fa57-43de-b04b-ed523f396c38", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"pi = \"\"\"\n", |
||||||
|
"import time\n", |
||||||
|
"\n", |
||||||
|
"def calculate(iterations, param1, param2):\n", |
||||||
|
" result = 1.0\n", |
||||||
|
" for i in range(1, iterations+1):\n", |
||||||
|
" j = i * param1 - param2\n", |
||||||
|
" result -= (1/j)\n", |
||||||
|
" j = i * param1 + param2\n", |
||||||
|
" result += (1/j)\n", |
||||||
|
" return result\n", |
||||||
|
"\n", |
||||||
|
"start_time = time.time()\n", |
||||||
|
"result = calculate(100_000_000, 4, 1) * 4\n", |
||||||
|
"end_time = time.time()\n", |
||||||
|
"\n", |
||||||
|
"print(f\"Result: {result:.12f}\")\n", |
||||||
|
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "fe891e3a-d1c4-4ee5-a361-34d0982fcff4", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"optimize_gemini(pi)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "7fe1cd4b-d2c5-4303-afed-2115a3fef200", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"exec(pi)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "105db6f9-343c-491d-8e44-3a5328b81719", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"optimize_gpt(pi)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"exec(pi)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4194e40c-04ab-4940-9d64-b4ad37c5bb40", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", |
||||||
|
"!./optimized" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "983a11fe-e24d-4c65-8269-9802c5ef3ae6", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"optimize_claude(pi)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", |
||||||
|
"!./optimized" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"python_hard = \"\"\"# Be careful to support large number sizes\n", |
||||||
|
"\n", |
||||||
|
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n", |
||||||
|
" value = seed\n", |
||||||
|
" while True:\n", |
||||||
|
" value = (a * value + c) % m\n", |
||||||
|
" yield value\n", |
||||||
|
" \n", |
||||||
|
"def max_subarray_sum(n, seed, min_val, max_val):\n", |
||||||
|
" lcg_gen = lcg(seed)\n", |
||||||
|
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n", |
||||||
|
" max_sum = float('-inf')\n", |
||||||
|
" for i in range(n):\n", |
||||||
|
" current_sum = 0\n", |
||||||
|
" for j in range(i, n):\n", |
||||||
|
" current_sum += random_numbers[j]\n", |
||||||
|
" if current_sum > max_sum:\n", |
||||||
|
" max_sum = current_sum\n", |
||||||
|
" return max_sum\n", |
||||||
|
"\n", |
||||||
|
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n", |
||||||
|
" total_sum = 0\n", |
||||||
|
" lcg_gen = lcg(initial_seed)\n", |
||||||
|
" for _ in range(20):\n", |
||||||
|
" seed = next(lcg_gen)\n", |
||||||
|
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n", |
||||||
|
" return total_sum\n", |
||||||
|
"\n", |
||||||
|
"# Parameters\n", |
||||||
|
"n = 10000 # Number of random numbers\n", |
||||||
|
"initial_seed = 42 # Initial seed for the LCG\n", |
||||||
|
"min_val = -10 # Minimum value of random numbers\n", |
||||||
|
"max_val = 10 # Maximum value of random numbers\n", |
||||||
|
"\n", |
||||||
|
"# Timing the function\n", |
||||||
|
"import time\n", |
||||||
|
"start_time = time.time()\n", |
||||||
|
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n", |
||||||
|
"end_time = time.time()\n", |
||||||
|
"\n", |
||||||
|
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n", |
||||||
|
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "dab5e4bc-276c-4555-bd4c-12c699d5e899", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"exec(python_hard)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"optimize_gpt(python_hard)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e0b3d073-88a2-40b2-831c-6f0c345c256f", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", |
||||||
|
"!./optimized" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"optimize_claude(python_hard)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0c181036-8193-4fdd-aef3-fc513b218d43", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n", |
||||||
|
"!./optimized" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_gpt(python): \n", |
||||||
|
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" fragment = chunk.choices[0].delta.content or \"\"\n", |
||||||
|
" reply += fragment\n", |
||||||
|
" yield reply.replace('```cpp\\n','').replace('```','')" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "8669f56b-8314-4582-a167-78842caea131", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_claude(python):\n", |
||||||
|
" result = claude.messages.stream(\n", |
||||||
|
" model=CLAUDE_MODEL,\n", |
||||||
|
" max_tokens=2000,\n", |
||||||
|
" system=system_message,\n", |
||||||
|
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", |
||||||
|
" )\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" with result as stream:\n", |
||||||
|
" for text in stream.text_stream:\n", |
||||||
|
" reply += text\n", |
||||||
|
" yield reply.replace('```cpp\\n','').replace('```','')" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a9b6938f-89ef-4998-a334-2f6c042a2da4", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_gemini(python):\n", |
||||||
|
" gemini = genai.GenerativeModel(\n", |
||||||
|
" model_name = GEMINI_MODEL,\n", |
||||||
|
" system_instruction=system_message\n", |
||||||
|
" )\n", |
||||||
|
" response = gemini.generate_content(\n", |
||||||
|
" user_prompt_for(python),\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" for chunk in response:\n", |
||||||
|
" reply += chunk.text\n", |
||||||
|
" yield reply.replace('```cpp\\n','').replace('```','')" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def optimize(python, model):\n", |
||||||
|
" if model==\"GPT\":\n", |
||||||
|
" result = stream_gpt(python)\n", |
||||||
|
" elif model==\"Claude\":\n", |
||||||
|
" result = stream_claude(python)\n", |
||||||
|
" elif model==\"Gemini\":\n", |
||||||
|
" result= stream_gemini(python)\n", |
||||||
|
" else:\n", |
||||||
|
" raise ValueError(\"Unknown model\")\n", |
||||||
|
" for stream_so_far in result:\n", |
||||||
|
" yield stream_so_far " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"with gr.Blocks() as ui:\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n", |
||||||
|
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" model = gr.Dropdown([\"GPT\", \"Claude\",\"Gemini\"], label=\"Select model\", value=\"GPT\")\n", |
||||||
|
" convert = gr.Button(\"Convert code\")\n", |
||||||
|
"\n", |
||||||
|
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", |
||||||
|
"\n", |
||||||
|
"ui.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "19bf2bff-a822-4009-a539-f003b1651383", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def execute_python(code):\n", |
||||||
|
" try:\n", |
||||||
|
" output = io.StringIO()\n", |
||||||
|
" sys.stdout = output\n", |
||||||
|
" exec(code)\n", |
||||||
|
" finally:\n", |
||||||
|
" sys.stdout = sys.__stdout__\n", |
||||||
|
" return output.getvalue()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def execute_cpp(code):\n", |
||||||
|
" write_output(code)\n", |
||||||
|
" try:\n", |
||||||
|
" compile_result = subprocess.run(compiler_cmd[2], check=True, text=True, capture_output=True)\n", |
||||||
|
" run_cmd = [\"./optimized\"]\n", |
||||||
|
" run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n", |
||||||
|
" return run_result.stdout\n", |
||||||
|
" except subprocess.CalledProcessError as e:\n", |
||||||
|
" return f\"An error occurred:\\n{e.stderr}\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"css = \"\"\"\n", |
||||||
|
".python {background-color: #306998;}\n", |
||||||
|
".cpp {background-color: #050;}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f1303932-160c-424b-97a8-d28c816721b2", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"with gr.Blocks(css=css) as ui:\n", |
||||||
|
" gr.Markdown(\"## Convert code from Python to C++\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n", |
||||||
|
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" model = gr.Dropdown([\"GPT\", \"Claude\",\"Gemini\"], label=\"Select model\", value=\"GPT\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" convert = gr.Button(\"Convert code\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_run = gr.Button(\"Run Python\")\n", |
||||||
|
" cpp_run = gr.Button(\"Run C++\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n", |
||||||
|
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n", |
||||||
|
"\n", |
||||||
|
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", |
||||||
|
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n", |
||||||
|
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n", |
||||||
|
"\n", |
||||||
|
"ui.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "bb8c5b4e-ec51-4f21-b3f8-6aa94fede86d", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"from huggingface_hub import login, InferenceClient\n", |
||||||
|
"from transformers import AutoTokenizer" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "13347633-4606-4e38-9927-80c39e65c1f1", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"hf_token = os.environ['HF_TOKEN']\n", |
||||||
|
"login(hf_token, add_to_git_credential=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "ef60a4df-6267-4ebd-8eed-dcb917af0a5e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n", |
||||||
|
"code_gemma = \"google/codegemma-7b-it\"\n", |
||||||
|
"CODE_QWEN_URL = \"https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud\"\n", |
||||||
|
"CODE_GEMMA_URL = \"https://c5hggiyqachmgnqg.us-east-1.aws.endpoints.huggingface.cloud\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "695ce389-a903-4533-a2f1-cd9e2a6af8f2", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", |
||||||
|
"messages = messages_for(pi)\n", |
||||||
|
"text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d4548e96-0b32-4793-bdd6-1b072c2f26ab", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"print(text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "bb2a126b-09e7-4966-bc97-0ef5c2cc7896", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n", |
||||||
|
"stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", |
||||||
|
"for r in stream:\n", |
||||||
|
" print(r.token.text, end = \"\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "127a52e5-ad85-42b7-a0f5-9afda5efe090", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_code_qwen(python):\n", |
||||||
|
" tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n", |
||||||
|
" messages = messages_for(python)\n", |
||||||
|
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n", |
||||||
|
" client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n", |
||||||
|
" stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n", |
||||||
|
" result = \"\"\n", |
||||||
|
" for r in stream:\n", |
||||||
|
" result += r.token.text\n", |
||||||
|
" yield result " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a82387d1-7651-4923-995b-fe18356fcaa6", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def optimize(python, model):\n", |
||||||
|
" if model==\"GPT\":\n", |
||||||
|
" result = stream_gpt(python)\n", |
||||||
|
" elif model==\"Claude\":\n", |
||||||
|
" result = stream_claude(python)\n", |
||||||
|
" elif model==\"Gemini\":\n", |
||||||
|
" result= stream_gemini(python)\n", |
||||||
|
" elif model==\"CodeQwen\":\n", |
||||||
|
" result = stream_code_qwen(python)\n", |
||||||
|
" else:\n", |
||||||
|
" raise ValueError(\"Unknown model\")\n", |
||||||
|
" for stream_so_far in result:\n", |
||||||
|
" yield stream_so_far " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "4b0a6a97-5b8a-4a9b-8ee0-7561e0ced673", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"<table style=\"margin: 0; text-align: left;\">\n", |
||||||
|
" <tr>\n", |
||||||
|
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n", |
||||||
|
" <img src=\"../../thankyou.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n", |
||||||
|
" </td>\n", |
||||||
|
" <td>\n", |
||||||
|
" <h2 style=\"color:#090;\">Thank you to @CloudLlama for an amazing contribution</h2>\n", |
||||||
|
" <span style=\"color:#090;\">\n", |
||||||
|
" A student has contributed a chunk of code to improve this, in the next 2 cells. You can now select which Python porgram to run,\n", |
||||||
|
" and a compiler is automatically selected that will work on PC, Windows and Mac. Massive thank you @CloudLlama!\n", |
||||||
|
" </span>\n", |
||||||
|
" </td>\n", |
||||||
|
" </tr>\n", |
||||||
|
"</table>" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "4ba311ec-c16a-4fe0-946b-4b940704cf65", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def select_sample_program(sample_program):\n", |
||||||
|
" if sample_program==\"pi\":\n", |
||||||
|
" return pi\n", |
||||||
|
" elif sample_program==\"python_hard\":\n", |
||||||
|
" return python_hard\n", |
||||||
|
" else:\n", |
||||||
|
" return \"Type your Python program here\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "e42286bc-085c-45dc-b101-234308e58269", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import platform\n", |
||||||
|
"\n", |
||||||
|
"VISUAL_STUDIO_2022_TOOLS = \"C:\\\\Program Files\\\\Microsoft Visual Studio\\\\2022\\\\Community\\\\Common7\\Tools\\\\VsDevCmd.bat\"\n", |
||||||
|
"VISUAL_STUDIO_2019_TOOLS = \"C:\\\\Program Files (x86)\\\\Microsoft Visual Studio\\\\2019\\\\BuildTools\\\\Common7\\\\Tools\\\\VsDevCmd.bat\"\n", |
||||||
|
"\n", |
||||||
|
"simple_cpp = \"\"\"\n", |
||||||
|
"#include <iostream>\n", |
||||||
|
"\n", |
||||||
|
"int main() {\n", |
||||||
|
" std::cout << \"Hello\";\n", |
||||||
|
" return 0;\n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"def run_cmd(command_to_run):\n", |
||||||
|
" try:\n", |
||||||
|
" run_result = subprocess.run(command_to_run, check=True, text=True, capture_output=True)\n", |
||||||
|
" return run_result.stdout if run_result.stdout else \"SUCCESS\"\n", |
||||||
|
" except:\n", |
||||||
|
" return \"\"\n", |
||||||
|
"\n", |
||||||
|
"def c_compiler_cmd(filename_base):\n", |
||||||
|
" my_platform = platform.system()\n", |
||||||
|
" my_compiler = []\n", |
||||||
|
"\n", |
||||||
|
" try:\n", |
||||||
|
" with open(\"simple.cpp\", \"w\") as f:\n", |
||||||
|
" f.write(simple_cpp)\n", |
||||||
|
" \n", |
||||||
|
" if my_platform == \"Windows\":\n", |
||||||
|
" if os.path.isfile(VISUAL_STUDIO_2022_TOOLS):\n", |
||||||
|
" if os.path.isfile(\"./simple.exe\"):\n", |
||||||
|
" os.remove(\"./simple.exe\")\n", |
||||||
|
" compile_cmd = [\"cmd\", \"/c\", VISUAL_STUDIO_2022_TOOLS, \"&\", \"cl\", \"simple.cpp\"]\n", |
||||||
|
" if run_cmd(compile_cmd):\n", |
||||||
|
" if run_cmd([\"./simple.exe\"]) == \"Hello\":\n", |
||||||
|
" my_compiler = [\"Windows\", \"Visual Studio 2022\", [\"cmd\", \"/c\", VISUAL_STUDIO_2022_TOOLS, \"&\", \"cl\", f\"{filename_base}.cpp\"]]\n", |
||||||
|
" \n", |
||||||
|
" if not my_compiler:\n", |
||||||
|
" if os.path.isfile(VISUAL_STUDIO_2019_TOOLS):\n", |
||||||
|
" if os.path.isfile(\"./simple.exe\"):\n", |
||||||
|
" os.remove(\"./simple.exe\")\n", |
||||||
|
" compile_cmd = [\"cmd\", \"/c\", VISUAL_STUDIO_2019_TOOLS, \"&\", \"cl\", \"simple.cpp\"]\n", |
||||||
|
" if run_cmd(compile_cmd):\n", |
||||||
|
" if run_cmd([\"./simple.exe\"]) == \"Hello\":\n", |
||||||
|
" my_compiler = [\"Windows\", \"Visual Studio 2019\", [\"cmd\", \"/c\", VISUAL_STUDIO_2019_TOOLS, \"&\", \"cl\", f\"{filename_base}.cpp\"]]\n", |
||||||
|
" \n", |
||||||
|
" if not my_compiler:\n", |
||||||
|
" my_compiler=[my_platform, \"Unavailable\", []]\n", |
||||||
|
" \n", |
||||||
|
" elif my_platform == \"Linux\":\n", |
||||||
|
" if os.path.isfile(\"./simple\"):\n", |
||||||
|
" os.remove(\"./simple\")\n", |
||||||
|
" compile_cmd = [\"g++\", \"simple.cpp\", \"-o\", \"simple\"]\n", |
||||||
|
" if run_cmd(compile_cmd):\n", |
||||||
|
" if run_cmd([\"./simple\"]) == \"Hello\":\n", |
||||||
|
" my_compiler = [\"Linux\", \"GCC (g++)\", [\"g++\", f\"{filename_base}.cpp\", \"-o\", f\"{filename_base}\" ]]\n", |
||||||
|
" \n", |
||||||
|
" if not my_compiler:\n", |
||||||
|
" if os.path.isfile(\"./simple\"):\n", |
||||||
|
" os.remove(\"./simple\")\n", |
||||||
|
" compile_cmd = [\"clang++\", \"simple.cpp\", \"-o\", \"simple\"]\n", |
||||||
|
" if run_cmd(compile_cmd):\n", |
||||||
|
" if run_cmd([\"./simple\"]) == \"Hello\":\n", |
||||||
|
" my_compiler = [\"Linux\", \"Clang++\", [\"clang++\", f\"{filename_base}.cpp\", \"-o\", f\"{filename_base}\"]]\n", |
||||||
|
" \n", |
||||||
|
" if not my_compiler:\n", |
||||||
|
" my_compiler=[my_platform, \"Unavailable\", []]\n", |
||||||
|
" \n", |
||||||
|
" elif my_platform == \"Darwin\":\n", |
||||||
|
" if os.path.isfile(\"./simple\"):\n", |
||||||
|
" os.remove(\"./simple\")\n", |
||||||
|
" compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"simple\", \"simple.cpp\"]\n", |
||||||
|
" if run_cmd(compile_cmd):\n", |
||||||
|
" if run_cmd([\"./simple\"]) == \"Hello\":\n", |
||||||
|
" my_compiler = [\"Macintosh\", \"Clang++\", [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", f\"{filename_base}\", f\"{filename_base}.cpp\"]]\n", |
||||||
|
" \n", |
||||||
|
" if not my_compiler:\n", |
||||||
|
" my_compiler=[my_platform, \"Unavailable\", []]\n", |
||||||
|
" except:\n", |
||||||
|
" my_compiler=[my_platform, \"Unavailable\", []]\n", |
||||||
|
" \n", |
||||||
|
" if my_compiler:\n", |
||||||
|
" return my_compiler\n", |
||||||
|
" else:\n", |
||||||
|
" return [\"Unknown\", \"Unavailable\", []]\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "f9ca2e6f-60c1-4e5f-b570-63c75b2d189b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"compiler_cmd = c_compiler_cmd(\"optimized\")\n", |
||||||
|
"\n", |
||||||
|
"with gr.Blocks(css=css) as ui:\n", |
||||||
|
" gr.Markdown(\"## Convert code from Python to C++\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n", |
||||||
|
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" with gr.Column():\n", |
||||||
|
" sample_program = gr.Radio([\"pi\", \"python_hard\"], label=\"Sample program\", value=\"python_hard\")\n", |
||||||
|
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\", \"CodeQwen\"], label=\"Select model\", value=\"GPT\")\n", |
||||||
|
" with gr.Column():\n", |
||||||
|
" architecture = gr.Radio([compiler_cmd[0]], label=\"Architecture\", interactive=False, value=compiler_cmd[0])\n", |
||||||
|
" compiler = gr.Radio([compiler_cmd[1]], label=\"Compiler\", interactive=False, value=compiler_cmd[1])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" convert = gr.Button(\"Convert code\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_run = gr.Button(\"Run Python\")\n", |
||||||
|
" if not compiler_cmd[1] == \"Unavailable\":\n", |
||||||
|
" cpp_run = gr.Button(\"Run C++\")\n", |
||||||
|
" else:\n", |
||||||
|
" cpp_run = gr.Button(\"No compiler to run C++\", interactive=False)\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n", |
||||||
|
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n", |
||||||
|
"\n", |
||||||
|
" sample_program.change(select_sample_program, inputs=[sample_program], outputs=[python])\n", |
||||||
|
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", |
||||||
|
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n", |
||||||
|
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n", |
||||||
|
"\n", |
||||||
|
"ui.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "9d0ad093-425b-488e-8c3f-67f729dd9c06", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
@ -0,0 +1,869 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": { |
||||||
|
"id": "ykDDGx1cjYlh" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"# **DocuPy** \n", |
||||||
|
"### _\"Automate Documentation, Comments, and Unit Tests for Python Code\"_ \n", |
||||||
|
"\n", |
||||||
|
"## Overview \n", |
||||||
|
"DocuPy is a Gradio-powered tool designed to automate essential but time-consuming Python development tasks. It streamlines documentation, unit testing, and Python-to-C++ code conversion with AI-driven assistance. \n", |
||||||
|
"\n", |
||||||
|
"### Key Features \n", |
||||||
|
"✅ **Auto-Generate Docstrings & Comments** – Instantly improve code clarity and maintainability. \n", |
||||||
|
"✅ **Unit Test Generation** – Ensure reliability with AI-generated test cases. \n", |
||||||
|
"✅ **Python to C++ Conversion** – Seamlessly translate Python code to C++ with execution support. \n", |
||||||
|
"\n", |
||||||
|
"With an intuitive tab-based UI, DocuPy enhances productivity for developers of all levels. Whether you're documenting functions, validating code with tests, or exploring C++ conversions, this tool lets you focus on coding while it handles the rest. \n", |
||||||
|
"\n", |
||||||
|
"🔗 **Check out the repo**: [GitHub Repo](https://github.com/emads22/DocuPy) \n", |
||||||
|
"\n", |
||||||
|
"💡 **Have insights, feedback, or ideas?** Feel free to reach out. \n", |
||||||
|
"\n", |
||||||
|
"[<img src=\"https://img.shields.io/badge/GitHub-Emad-blue?logo=github\" width=\"150\">](https://github.com/emads22)\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"If you're running this notebook on **`Google Colab`**, ensure you install the required libraries by running the following command:\n", |
||||||
|
"\n", |
||||||
|
"```bash\n", |
||||||
|
"!pip install -q openai anthropic python-dotenv gradio huggingface_hub transformers\n", |
||||||
|
"```\n", |
||||||
|
"Otherwise, make sure to activate the Conda environment `docupy` that already includes these modules:\n", |
||||||
|
"\n", |
||||||
|
"```bash\n", |
||||||
|
"conda activate docupy\n", |
||||||
|
"```" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "6wIpBtNPjXc8" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Uncomment the following command when running on Google Colab\n", |
||||||
|
"# !pip install -q openai anthropic python-dotenv gradio huggingface_hub transformers " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": { |
||||||
|
"id": "T-cTBf9amBxf" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"## Setup and Install Dependencies\n", |
||||||
|
"\n", |
||||||
|
"- Start by installing all necessary libraries." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "aIHWC7xpk87X" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# imports\n", |
||||||
|
"import os\n", |
||||||
|
"import io\n", |
||||||
|
"import sys\n", |
||||||
|
"import subprocess\n", |
||||||
|
"import openai\n", |
||||||
|
"import anthropic\n", |
||||||
|
"import google.generativeai as google_genai\n", |
||||||
|
"import gradio as gr\n", |
||||||
|
"from openai import OpenAI\n", |
||||||
|
"# from google.colab import userdata\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"from pathlib import Path\n", |
||||||
|
"from huggingface_hub import login, InferenceClient\n", |
||||||
|
"from transformers import AutoTokenizer" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": { |
||||||
|
"id": "LZQbXR3dmZy4" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"## Add Secrets to the Colab Notebook\n", |
||||||
|
"\n", |
||||||
|
"- Add the API keys for OpenAI, Claude, and Gemini to authenticate and access their respective models and services.\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "AadABekBm4fV" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# # Log in to Hugging Face using the token and add it to git credentials\n", |
||||||
|
"# hf_token = userdata.get('HF_TOKEN')\n", |
||||||
|
"# login(token=hf_token, add_to_git_credential=True)\n", |
||||||
|
"\n", |
||||||
|
"# # Endpoint URL for accessing the Code Qwen model through Hugging Face\n", |
||||||
|
"# CODE_QWEN_URL = userdata.get('CODE_QWEN_URL')\n", |
||||||
|
"\n", |
||||||
|
"# # Initialize inference clients with every model using API keys\n", |
||||||
|
"# gpt = openai.OpenAI(api_key=userdata.get('OPENAI_API_KEY'))\n", |
||||||
|
"# claude = anthropic.Anthropic(api_key=userdata.get('ANTHROPIC_API_KEY'))\n", |
||||||
|
"# google_genai.configure(api_key=userdata.get('GOOGLE_API_KEY'))\n", |
||||||
|
"# code_qwen = InferenceClient(CODE_QWEN_URL, token=hf_token)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": { |
||||||
|
"id": "Ej3JNfh_wc0m" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"## Alternatively, if not running on Google Colab, Load Environment Variables for API Keys\n", |
||||||
|
"\n", |
||||||
|
"- Use the `load_dotenv()` function to securely load API keys from a `.env` file.\n", |
||||||
|
"- Ensure that the `.env` file is located in the same directory as your script or Jupyter Notebook.\n", |
||||||
|
"- The `.env` file should include the required API keys for OpenAI, Claude, and Gemini." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "av9X9XpQw0Vd" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"load_dotenv()\n", |
||||||
|
"\n", |
||||||
|
"# Log in to Hugging Face using the token and add it to git credentials\n", |
||||||
|
"hf_token = os.getenv('HF_TOKEN')\n", |
||||||
|
"login(token=hf_token, add_to_git_credential=True)\n", |
||||||
|
"\n", |
||||||
|
"# Endpoint URL for accessing the Code Qwen model through Hugging Face\n", |
||||||
|
"CODE_QWEN_URL = os.getenv('CODE_QWEN_URL')\n", |
||||||
|
"\n", |
||||||
|
"# Initialize inference clients with every model using API keys\n", |
||||||
|
"gpt = openai.OpenAI(api_key=os.getenv('OPENAI_API_KEY'))\n", |
||||||
|
"claude = anthropic.Anthropic(api_key=os.getenv('ANTHROPIC_API_KEY'))\n", |
||||||
|
"google_genai.configure(api_key=os.getenv('GOOGLE_API_KEY'))\n", |
||||||
|
"code_qwen = InferenceClient(CODE_QWEN_URL, token=hf_token)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": { |
||||||
|
"id": "lvEhCuQjrTYu" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"## Define Required Constants\n", |
||||||
|
"\n", |
||||||
|
"- Initialize the essential constants required for the application's functionality.\n", |
||||||
|
"- Configure the system and user prompts specific to each task or feature.\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "AKEBKKmAowt2" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Models\n", |
||||||
|
"OPENAI_MODEL = \"gpt-4o\"\n", |
||||||
|
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", |
||||||
|
"GEMINI_MODEL = \"gemini-1.5-pro\"\n", |
||||||
|
"CODE_QWEN_MODEL = \"Qwen/CodeQwen1.5-7B-Chat\"\n", |
||||||
|
"\n", |
||||||
|
"MODELS_IN_USE = [\"GPT\", \"Claude\", \"Gemini\", \"CodeQwen\"]\n", |
||||||
|
"\n", |
||||||
|
"MAX_TOKENS = 2000\n", |
||||||
|
"\n", |
||||||
|
"ACTION_A = \"commenting\"\n", |
||||||
|
"ACTION_B = \"testing\"\n", |
||||||
|
"ACTION_C = \"converting\"\n", |
||||||
|
"\n", |
||||||
|
"# Define and create the path for the \"temp_files\" directory within the current script's directory\n", |
||||||
|
"TEMP_DIR = Path.cwd() / \"temp_files\"\n", |
||||||
|
"TEMP_DIR.mkdir(parents=True, exist_ok=True)\n", |
||||||
|
"\n", |
||||||
|
"PYTHON_SCRIPT_EASY = \"\"\"\n", |
||||||
|
"import time\n", |
||||||
|
"\n", |
||||||
|
"def reverse_string(s):\n", |
||||||
|
" return s[::-1]\n", |
||||||
|
"\n", |
||||||
|
"if __name__ == \"__main__\":\n", |
||||||
|
" start_time = time.time()\n", |
||||||
|
" text = \"Hello, World!\"\n", |
||||||
|
" print(f\"- Original string: {text}\")\n", |
||||||
|
" print(\"- Reversed string:\", reverse_string(text))\n", |
||||||
|
" execution_time = time.time() - start_time \n", |
||||||
|
" print(f\"\\\\n=> Execution Time: {execution_time:.6f} seconds\")\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"PYTHON_SCRIPT_INTERMEDIATE = \"\"\"\n", |
||||||
|
"import time\n", |
||||||
|
"\n", |
||||||
|
"def is_palindrome(s):\n", |
||||||
|
" s = s.lower().replace(\" \", \"\") \n", |
||||||
|
" return s == s[::-1]\n", |
||||||
|
"\n", |
||||||
|
"if __name__ == \"__main__\":\n", |
||||||
|
" start_time = time.time()\n", |
||||||
|
" text = \"Racecar\"\n", |
||||||
|
" if is_palindrome(text):\n", |
||||||
|
" print(f\"- '{text}' is a palindrome!\")\n", |
||||||
|
" else:\n", |
||||||
|
" print(f\"- '{text}' is Not a palindrome.\")\n", |
||||||
|
" execution_time = time.time() - start_time \n", |
||||||
|
" print(f\"\\\\n=> Execution Time: {execution_time:.6f} seconds\")\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"PYTHON_SCRIPT_HARD = \"\"\"\n", |
||||||
|
"import time\n", |
||||||
|
"\n", |
||||||
|
"def generate_primes(limit):\n", |
||||||
|
" primes = []\n", |
||||||
|
" for num in range(2, limit + 1):\n", |
||||||
|
" if all(num % p != 0 for p in primes):\n", |
||||||
|
" primes.append(num)\n", |
||||||
|
" return primes\n", |
||||||
|
"\n", |
||||||
|
"if __name__ == \"__main__\":\n", |
||||||
|
" start_time = time.time()\n", |
||||||
|
" n = 20\n", |
||||||
|
" print(f\"- Generating primes up to: {n}\")\n", |
||||||
|
" print(\"- Prime numbers:\", generate_primes(n))\n", |
||||||
|
" execution_time = time.time() - start_time \n", |
||||||
|
" print(f\"\\\\n=> Execution Time: {execution_time:.6f} seconds\")\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"PYTHON_SCRIPTS = {\n", |
||||||
|
" \"reverse_string\" : PYTHON_SCRIPT_EASY,\n", |
||||||
|
" \"is_palindrome\" : PYTHON_SCRIPT_INTERMEDIATE,\n", |
||||||
|
" \"generate_primes\" : PYTHON_SCRIPT_HARD,\n", |
||||||
|
" \"custom\" : \"\"\"\n", |
||||||
|
"# Write your custom Python script here\n", |
||||||
|
"if __name__ == \"__main__\":\n", |
||||||
|
" print(\"Hello, World!\")\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"}\n", |
||||||
|
"\n", |
||||||
|
"# Relative system prompts\n", |
||||||
|
"SYSTEM_PROMPT_COMMENTS = \"\"\"\n", |
||||||
|
"You are an AI model specializing in enhancing Python code documentation.\n", |
||||||
|
"Generate detailed and precise docstrings and inline comments for the provided Python code.\n", |
||||||
|
"Ensure the docstrings clearly describe the purpose, parameters, and return values of each function.\n", |
||||||
|
"Inline comments should explain complex or non-obvious code segments.\n", |
||||||
|
"Do not include any introductions, explanations, conclusions, or additional context.\n", |
||||||
|
"Return only the updated Python code enclosed within ```python ... ``` for proper formatting and syntax highlighting.\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"SYSTEM_PROMPT_TESTS = \"\"\"\n", |
||||||
|
"You are an AI model specializing in generating comprehensive unit tests for Python code.\n", |
||||||
|
"Create Python unit tests that thoroughly validate the functionality of the given code.\n", |
||||||
|
"Use the `unittest` framework and ensure edge cases and error conditions are tested.\n", |
||||||
|
"Do not include any comments, introductions, explanations, conclusions, or additional context.\n", |
||||||
|
"Return only the unit test code enclosed within ```python ... ``` for proper formatting and syntax highlighting.\n", |
||||||
|
"\"\"\"\n", |
||||||
|
"\n", |
||||||
|
"SYSTEM_PROMPT_CONVERT = \"\"\"\n", |
||||||
|
"You are an AI model specializing in high-performance code translation.\n", |
||||||
|
"Translate the given Python code into equivalent, optimized C++ code.\n", |
||||||
|
"Focus on:\n", |
||||||
|
"- Using efficient data structures and algorithms.\n", |
||||||
|
"- Avoiding unnecessary memory allocations and computational overhead.\n", |
||||||
|
"- Ensuring minimal risk of integer overflow by using appropriate data types.\n", |
||||||
|
"- Leveraging the C++ Standard Library (e.g., `<vector>`, `<algorithm>`) for performance and readability.\n", |
||||||
|
"Produce concise and efficient C++ code that matches the functionality of the original Python code.\n", |
||||||
|
"Do not include any comments, introductions, explanations, conclusions, or additional context..\n", |
||||||
|
"Return only the C++ code enclosed within ```cpp ... ``` for proper formatting and syntax highlighting.\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "JJ1zttf7ANqD" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Relative user prompts\n", |
||||||
|
"def user_prompt_comments(python_code):\n", |
||||||
|
" user_prompt = f\"\"\"\n", |
||||||
|
"Add detailed docstrings and inline comments to the following Python code:\n", |
||||||
|
"\n", |
||||||
|
"```python\n", |
||||||
|
"{python_code}\n", |
||||||
|
"```\n", |
||||||
|
"\"\"\"\n", |
||||||
|
" return user_prompt\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_tests(python_code):\n", |
||||||
|
" user_prompt = f\"\"\"\n", |
||||||
|
"Generate unit tests for the following Python code using the `unittest` framework:\n", |
||||||
|
"\n", |
||||||
|
"```python\n", |
||||||
|
"{python_code}\n", |
||||||
|
"```\n", |
||||||
|
"\"\"\"\n", |
||||||
|
" return user_prompt\n", |
||||||
|
"\n", |
||||||
|
"def user_prompt_convert(python_code):\n", |
||||||
|
" user_prompt = f\"\"\"\n", |
||||||
|
"Convert the following Python code into C++:\n", |
||||||
|
"\n", |
||||||
|
"```python\n", |
||||||
|
"{python_code}\n", |
||||||
|
"``` \n", |
||||||
|
"\"\"\"\n", |
||||||
|
" return user_prompt" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": { |
||||||
|
"id": "tqrOO_qsCRkd" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"### Define the Tab Functions\n", |
||||||
|
"\n", |
||||||
|
"- Develop dedicated functions for each service: documenting Python code, generating unit tests, and converting Python to C++.\n", |
||||||
|
"- Structure each function to handle user input, process it using the selected AI model, and display the generated output seamlessly.\n", |
||||||
|
"- Ensure the functionality of each tab aligns with its specific purpose, providing an intuitive and efficient user experience.\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "HBsBrq3G94ul" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def stream_gpt(system_prompt, user_prompt):\n", |
||||||
|
" stream = gpt.chat.completions.create(\n", |
||||||
|
" model=OPENAI_MODEL,\n", |
||||||
|
" messages=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||||
|
" ],\n", |
||||||
|
" stream=True)\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" reply += chunk.choices[0].delta.content or \"\"\n", |
||||||
|
" yield reply.replace(\"```python\\n\", \"\").replace(\"```cpp\\n\", \"\").replace(\"```\", \"\")\n", |
||||||
|
"\n", |
||||||
|
"def stream_claude(system_prompt, user_prompt):\n", |
||||||
|
" response = claude.messages.stream(\n", |
||||||
|
" model=CLAUDE_MODEL,\n", |
||||||
|
" max_tokens=MAX_TOKENS,\n", |
||||||
|
" system=system_prompt,\n", |
||||||
|
" messages=[{\"role\": \"user\", \"content\": user_prompt}],\n", |
||||||
|
" )\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" with response as stream:\n", |
||||||
|
" for text in stream.text_stream:\n", |
||||||
|
" reply += text\n", |
||||||
|
" yield reply.replace(\"```python\\n\", \"\").replace(\"```cpp\\n\", \"\").replace(\"```\", \"\")\n", |
||||||
|
"\n", |
||||||
|
"def stream_gemini(system_prompt, user_prompt):\n", |
||||||
|
" gemini = google_genai.GenerativeModel(\n", |
||||||
|
" model_name=GEMINI_MODEL,\n", |
||||||
|
" system_instruction=system_prompt\n", |
||||||
|
" )\n", |
||||||
|
" stream = gemini.generate_content(\n", |
||||||
|
" contents=user_prompt,\n", |
||||||
|
" stream=True\n", |
||||||
|
" )\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" reply += chunk.text or \"\"\n", |
||||||
|
" yield reply.replace(\"```python\\n\", \"\").replace(\"```cpp\\n\", \"\").replace(\"```\", \"\")\n", |
||||||
|
"\n", |
||||||
|
"def stream_code_qwen(system_prompt, user_prompt):\n", |
||||||
|
" tokenizer = AutoTokenizer.from_pretrained(CODE_QWEN_MODEL)\n", |
||||||
|
" model_input = tokenizer.apply_chat_template(\n", |
||||||
|
" conversation=[\n", |
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n", |
||||||
|
" {\"role\": \"user\", \"content\": user_prompt}\n", |
||||||
|
" ],\n", |
||||||
|
" tokenize=False,\n", |
||||||
|
" add_generation_prompt=True\n", |
||||||
|
" )\n", |
||||||
|
" stream = code_qwen.text_generation(\n", |
||||||
|
" prompt=model_input,\n", |
||||||
|
" stream=True,\n", |
||||||
|
" details=True,\n", |
||||||
|
" max_new_tokens=MAX_TOKENS\n", |
||||||
|
" )\n", |
||||||
|
" reply = \"\"\n", |
||||||
|
" for chunk in stream:\n", |
||||||
|
" reply += chunk.token.text or \"\"\n", |
||||||
|
" yield reply.replace(\"```python\\n\", \"\").replace(\"```cpp\\n\", \"\").replace(\"```\", \"\")\n", |
||||||
|
"\n", |
||||||
|
"def set_prompts(user_input, action):\n", |
||||||
|
" action = action.lower()\n", |
||||||
|
"\n", |
||||||
|
" if action == ACTION_A.lower():\n", |
||||||
|
" system_prompt = SYSTEM_PROMPT_COMMENTS\n", |
||||||
|
" user_prompt = user_prompt_comments(user_input)\n", |
||||||
|
" elif action == ACTION_B.lower():\n", |
||||||
|
" system_prompt = SYSTEM_PROMPT_TESTS\n", |
||||||
|
" user_prompt = user_prompt_tests(user_input)\n", |
||||||
|
" elif action == ACTION_C.lower():\n", |
||||||
|
" system_prompt = SYSTEM_PROMPT_CONVERT\n", |
||||||
|
" user_prompt = user_prompt_convert(user_input)\n", |
||||||
|
" else:\n", |
||||||
|
" return None, None\n", |
||||||
|
" \n", |
||||||
|
" return system_prompt, user_prompt\n", |
||||||
|
"\n", |
||||||
|
"def stream_response(user_input, model, action):\n", |
||||||
|
" system_prompt, user_prompt = set_prompts(user_input, action)\n", |
||||||
|
" if not all((system_prompt, user_prompt)):\n", |
||||||
|
" raise ValueError(\"Unknown Action\")\n", |
||||||
|
"\n", |
||||||
|
" match model:\n", |
||||||
|
" case \"GPT\":\n", |
||||||
|
" yield from stream_gpt(system_prompt, user_prompt)\n", |
||||||
|
"\n", |
||||||
|
" case \"Claude\":\n", |
||||||
|
" yield from stream_claude(system_prompt, user_prompt)\n", |
||||||
|
"\n", |
||||||
|
" case \"Gemini\":\n", |
||||||
|
" yield from stream_gemini(system_prompt, user_prompt)\n", |
||||||
|
"\n", |
||||||
|
" case \"CodeQwen\":\n", |
||||||
|
" yield from stream_code_qwen(system_prompt, user_prompt)\n", |
||||||
|
" \n", |
||||||
|
"def generate_comments(python_code, selected_model):\n", |
||||||
|
" for model in MODELS_IN_USE:\n", |
||||||
|
" if model == selected_model:\n", |
||||||
|
" yield from stream_response(python_code, model, action=ACTION_A)\n", |
||||||
|
" return # Exit the function immediately after exhausting the generator\n", |
||||||
|
" raise ValueError(\"Unknown Model\")\n", |
||||||
|
"\n", |
||||||
|
"def generate_tests(python_code, selected_model):\n", |
||||||
|
" for model in MODELS_IN_USE:\n", |
||||||
|
" if model == selected_model:\n", |
||||||
|
" yield from stream_response(python_code, model, action=ACTION_B)\n", |
||||||
|
" return # Exit the function immediately after exhausting the generator\n", |
||||||
|
" raise ValueError(\"Unknown Model\")\n", |
||||||
|
"\n", |
||||||
|
"def convert_code(python_code, selected_model):\n", |
||||||
|
" for model in MODELS_IN_USE:\n", |
||||||
|
" if model == selected_model:\n", |
||||||
|
" yield from stream_response(python_code, model, action=ACTION_C)\n", |
||||||
|
" return # Exit the function immediately after exhausting the generator\n", |
||||||
|
" raise ValueError(\"Unknown Model\")" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Running Code Functions\n", |
||||||
|
"\n", |
||||||
|
"- Functions that dynamically execute Python or C++ code provided as a string and captures its output.\n", |
||||||
|
"- This is useful for evaluating Python or C++ code snippets and returning their results programmatically.\n", |
||||||
|
"\n", |
||||||
|
"### IMPORTANT WARNING:\n", |
||||||
|
"The functions that dynamically execute Python or C++ code provided as input.\n", |
||||||
|
"While powerful, this is extremely dangerous if the input code is not trusted.\n", |
||||||
|
"Any malicious code can be executed, including:\n", |
||||||
|
" - Deleting files or directories\n", |
||||||
|
" - Stealing sensitive data (e.g., accessing environment variables or credentials)\n", |
||||||
|
" - Running arbitrary commands that compromise the system\n", |
||||||
|
"\n", |
||||||
|
"Sharing this notebook with this code snippet can allow attackers to exploit this functionality \n", |
||||||
|
"by passing harmful code as input. \n", |
||||||
|
"\n", |
||||||
|
"If you share this notebook or use this function:\n", |
||||||
|
" 1. Only accept input from trusted sources.\n", |
||||||
|
" 2. Consider running the code in a sandboxed environment (e.g., virtual machine or container).\n", |
||||||
|
" 3. Avoid using this function in publicly accessible applications or notebooks without strict validation." |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"def run_python_exec(code):\n", |
||||||
|
" try:\n", |
||||||
|
" # Capture stdout using StringIO\n", |
||||||
|
" output = io.StringIO()\n", |
||||||
|
"\n", |
||||||
|
" # Redirect stdout to StringIO\n", |
||||||
|
" sys.stdout = output\n", |
||||||
|
"\n", |
||||||
|
" # Execute the provided Python code\n", |
||||||
|
" exec(code)\n", |
||||||
|
" finally:\n", |
||||||
|
" # Restore original stdout\n", |
||||||
|
" sys.stdout = sys.__stdout__\n", |
||||||
|
"\n", |
||||||
|
" # Return the captured output\n", |
||||||
|
" return output.getvalue()\n", |
||||||
|
"\n", |
||||||
|
"# Improved running python function\n", |
||||||
|
"def run_python(code):\n", |
||||||
|
" # Save the Python code to a file\n", |
||||||
|
" with open(TEMP_DIR / \"python_code.py\", \"w\") as python_file:\n", |
||||||
|
" python_file.write(code)\n", |
||||||
|
"\n", |
||||||
|
" try:\n", |
||||||
|
" # Execute the Python code\n", |
||||||
|
" result = subprocess.run(\n", |
||||||
|
" [\"python\", str(TEMP_DIR / \"python_code.py\")],\n", |
||||||
|
" check=True, text=True, capture_output=True\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Return the program's output\n", |
||||||
|
" return result.stdout\n", |
||||||
|
"\n", |
||||||
|
" except subprocess.CalledProcessError as e:\n", |
||||||
|
" # Handle compilation or execution errors\n", |
||||||
|
" return f\"An error occurred during execution:\\n{e.stderr}\"\n", |
||||||
|
"\n", |
||||||
|
" finally:\n", |
||||||
|
" # Clean up: Delete the Python code file and executable\n", |
||||||
|
" file_path = TEMP_DIR / \"python_code.py\"\n", |
||||||
|
" if file_path.exists():\n", |
||||||
|
" file_path.unlink()\n", |
||||||
|
"\n", |
||||||
|
"def run_cpp(code):\n", |
||||||
|
" # Save the C++ code to a file\n", |
||||||
|
" with open(TEMP_DIR / \"cpp_code.cpp\", \"w\") as cpp_file:\n", |
||||||
|
" cpp_file.write(code)\n", |
||||||
|
"\n", |
||||||
|
" try:\n", |
||||||
|
" # Compile the C++ code\n", |
||||||
|
" subprocess.run(\n", |
||||||
|
" [\"g++\", \"-o\", str(TEMP_DIR / \"cpp_code\"), str(TEMP_DIR / \"cpp_code.cpp\")],\n", |
||||||
|
" check=True, text=True, capture_output=True\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Execute the compiled program\n", |
||||||
|
" result = subprocess.run(\n", |
||||||
|
" [str(TEMP_DIR / \"cpp_code\")],\n", |
||||||
|
" check=True, text=True, capture_output=True\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Return the program's output\n", |
||||||
|
" return result.stdout\n", |
||||||
|
"\n", |
||||||
|
" except subprocess.CalledProcessError as e:\n", |
||||||
|
" # Handle compilation or execution errors\n", |
||||||
|
" error_context = \"during compilation\" if \"cpp_code.cpp\" in e.stderr else \"during execution\"\n", |
||||||
|
" return f\"An error occurred {error_context}:\\n{e.stderr}\"\n", |
||||||
|
"\n", |
||||||
|
" finally:\n", |
||||||
|
" # Clean up: Delete the C++ source file and executable\n", |
||||||
|
" for filename in [\"cpp_code.cpp\", \"cpp_code\", \"cpp_code.exe\"]:\n", |
||||||
|
" file_path = TEMP_DIR / filename\n", |
||||||
|
" if file_path.exists():\n", |
||||||
|
" file_path.unlink()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"metadata": { |
||||||
|
"id": "Vude1jzPrgT2" |
||||||
|
}, |
||||||
|
"source": [ |
||||||
|
"## Develop a User-Friendly Interface with Gradio\n", |
||||||
|
"\n", |
||||||
|
"- Design a clean, intuitive, and user-centric interface using Gradio.\n", |
||||||
|
"- Ensure responsiveness and accessibility to provide a seamless and efficient user experience.\n", |
||||||
|
"- Focus on simplicity while maintaining functionality to cater to diverse user needs.\n", |
||||||
|
"\n" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "Eh-sWFZVBb_y" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# CSS styles for customizing the appearance of the Gradio UI elements.\n", |
||||||
|
"css = \"\"\"\n", |
||||||
|
".python { \n", |
||||||
|
" background-color: #377ef0; \n", |
||||||
|
" color: #ffffff; \n", |
||||||
|
" padding: 0.5em; \n", |
||||||
|
" border-radius: 5px; /* Slightly rounded corners */\n", |
||||||
|
"}\n", |
||||||
|
".cpp { \n", |
||||||
|
" background-color: #00549e; \n", |
||||||
|
" color: #ffffff; \n", |
||||||
|
" padding: 0.5em; \n", |
||||||
|
" border-radius: 5px; \n", |
||||||
|
"}\n", |
||||||
|
".model { \n", |
||||||
|
" background-color: #17a2b8; /* Vibrant cyan color */\n", |
||||||
|
" color: white; \n", |
||||||
|
" font-size: 1.2em; \n", |
||||||
|
" padding: 0.5em; \n", |
||||||
|
" border: none; \n", |
||||||
|
" border-radius: 5px; \n", |
||||||
|
" cursor: pointer; \n", |
||||||
|
"}\n", |
||||||
|
".button { \n", |
||||||
|
" height: 4em; \n", |
||||||
|
" font-size: 1.5em; \n", |
||||||
|
" padding: 0.5em 1em; \n", |
||||||
|
" background-color: #e67e22; /* Vibrant orange */\n", |
||||||
|
" color: white; \n", |
||||||
|
" border: none; \n", |
||||||
|
" border-radius: 5px; \n", |
||||||
|
" cursor: pointer; \n", |
||||||
|
"}\n", |
||||||
|
".run-button { \n", |
||||||
|
" height: 3em; \n", |
||||||
|
" font-size: 1.5em; \n", |
||||||
|
" padding: 0.5em 1em; \n", |
||||||
|
" background-color: #16a085; /* Rich teal color */\n", |
||||||
|
" color: white; \n", |
||||||
|
" border: none; \n", |
||||||
|
" border-radius: 5px; \n", |
||||||
|
" cursor: pointer; \n", |
||||||
|
"}\n", |
||||||
|
".button:hover, .run-button:hover {\n", |
||||||
|
" background-color: #2c3e50; /* Dark navy for hover effect */\n", |
||||||
|
" color: #fff; \n", |
||||||
|
"}\n", |
||||||
|
"\"\"\"" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "M_v-j-B_sQHe" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Tab to Document Code with Docstrings and Comments\n", |
||||||
|
"def docs_comments_ui():\n", |
||||||
|
" with gr.Tab(\"Docstrings & Comments\"):\n", |
||||||
|
" gr.Markdown(\"\"\"\n", |
||||||
|
" ## Document Code with Docstrings and Comments\n", |
||||||
|
" This tab allows you to automatically generate docstrings and inline comments for your Python code.\n", |
||||||
|
" - Paste your Python code into the **`Python Code`** textbox.\n", |
||||||
|
" - Select your preferred model (GPT, Claude, Gemini, or CodeQwen) to process the code.\n", |
||||||
|
" - Click the **`Add Docstrings & Comments`** button to generate well-documented Python code.\n", |
||||||
|
" The generated code will appear in the **`Python Code with Docstrings and Comments`** textarea.\n", |
||||||
|
" \"\"\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python = gr.Textbox(label=\"Python Code:\", lines=20, value=PYTHON_SCRIPTS[\"custom\"], elem_classes=[\"python\"])\n", |
||||||
|
" python_with_comments = gr.TextArea(label=\"Python Code with Docstrings and Comments:\", interactive=True, lines=20, elem_classes=[\"python\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_script = gr.Dropdown(choices=list(PYTHON_SCRIPTS.keys()), label=\"Select a Python script\", value=\"custom\", elem_classes=[\"model\"])\n", |
||||||
|
" comments_btn = gr.Button(\"Add Docstrings & Comments\", elem_classes=[\"button\"])\n", |
||||||
|
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\", \"CodeQwen\"], label=\"Select Model\", value=\"GPT\", elem_classes=[\"model\"])\n", |
||||||
|
" \n", |
||||||
|
" python_script.change(\n", |
||||||
|
" fn=lambda script: PYTHON_SCRIPTS[script],\n", |
||||||
|
" inputs=[python_script],\n", |
||||||
|
" outputs=[python]\n", |
||||||
|
" )\n", |
||||||
|
" \n", |
||||||
|
" comments_btn.click(\n", |
||||||
|
" fn=lambda: \"\",\n", |
||||||
|
" inputs=None,\n", |
||||||
|
" outputs=[python_with_comments]\n", |
||||||
|
" ).then(\n", |
||||||
|
" fn=generate_comments,\n", |
||||||
|
" inputs=[python, model],\n", |
||||||
|
" outputs=[python_with_comments]\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" return python_with_comments" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "WDjJp1eXtQzY" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Tab to Generate Comprehensive Unit Tests\n", |
||||||
|
"def unit_tests_ui():\n", |
||||||
|
" with gr.Tab(\"Unit Tests\"):\n", |
||||||
|
" gr.Markdown(\"\"\"\n", |
||||||
|
" ## Generate Comprehensive Unit Tests\n", |
||||||
|
" This tab helps you create unit tests for your Python code automatically.\n", |
||||||
|
" - Paste your Python code into the **`Python Code`** textbox.\n", |
||||||
|
" - Choose a model (GPT, Claude, Gemini, or CodeQwen) to generate the unit tests.\n", |
||||||
|
" - Click the **`Generate Unit Tests`** button, and the generated unit tests will appear in the **`Python Code with Unit Tests`** textarea.\n", |
||||||
|
" Use these unit tests to ensure your code behaves as expected.\n", |
||||||
|
" \"\"\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python = gr.Textbox(label=\"Python Code:\", lines=20, value=PYTHON_SCRIPTS[\"custom\"], elem_classes=[\"python\"])\n", |
||||||
|
" python_unit_tests = gr.TextArea(label=\"Python Code with Unit Tests:\", interactive=True, lines=20, elem_classes=[\"python\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_script = gr.Dropdown(choices=list(PYTHON_SCRIPTS.keys()), label=\"Select a Python script\", value=\"custom\", elem_classes=[\"model\"])\n", |
||||||
|
" unit_tests_btn = gr.Button(\"Generate Unit Tests\", elem_classes=[\"button\"])\n", |
||||||
|
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\", \"CodeQwen\"], label=\"Select Model\", value=\"GPT\", elem_classes=[\"model\"])\n", |
||||||
|
" \n", |
||||||
|
" python_script.change(\n", |
||||||
|
" fn=lambda script: PYTHON_SCRIPTS[script],\n", |
||||||
|
" inputs=[python_script],\n", |
||||||
|
" outputs=[python]\n", |
||||||
|
" )\n", |
||||||
|
" \n", |
||||||
|
" unit_tests_btn.click(\n", |
||||||
|
" fn=lambda: \"\",\n", |
||||||
|
" inputs=None,\n", |
||||||
|
" outputs=[python_unit_tests]\n", |
||||||
|
" ).then(\n", |
||||||
|
" fn=generate_tests,\n", |
||||||
|
" inputs=[python, model],\n", |
||||||
|
" outputs=[python_unit_tests]\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" return python_unit_tests" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"id": "x57SZeLi9NyV" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Tab to Convert Python Code to C++\n", |
||||||
|
"def python_to_cpp_ui():\n", |
||||||
|
" with gr.Tab(\"Python to C++\"):\n", |
||||||
|
" gr.Markdown(\"\"\"\n", |
||||||
|
" ## Convert Python Code to C++\n", |
||||||
|
" This tab facilitates the conversion of Python code into C++.\n", |
||||||
|
" - Paste your Python code into the **`Python Code`** textbox.\n", |
||||||
|
" - Select your preferred model (GPT, Claude, Gemini, or CodeQwen) to perform the conversion.\n", |
||||||
|
" - Click **`Convert to C++`** to see the equivalent C++ code in the **`C++ Code`** textbox.\n", |
||||||
|
" Additional Features:\n", |
||||||
|
" - You can execute the Python or C++ code directly using the respective **`Run Python`** or **`Run C++`** buttons.\n", |
||||||
|
" - The output will appear in the respective result text areas below.\n", |
||||||
|
" \"\"\")\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python = gr.Textbox(label=\"Python Code:\", lines=20, value=PYTHON_SCRIPTS[\"custom\"], elem_classes=[\"python\"])\n", |
||||||
|
" cpp = gr.Textbox(label=\"C++ Code:\", interactive=True, lines=20, elem_classes=[\"cpp\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_script = gr.Dropdown(choices=list(PYTHON_SCRIPTS.keys()), label=\"Select a Python script\", value=\"custom\", elem_classes=[\"model\"])\n", |
||||||
|
" convert_btn = gr.Button(\"Convert to C++\", elem_classes=[\"button\"])\n", |
||||||
|
" model = gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\", \"CodeQwen\"], label=\"Select Model\", value=\"GPT\", elem_classes=[\"model\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" run_python_btn = gr.Button(\"Run Python\", elem_classes=[\"run-button\"])\n", |
||||||
|
" run_cpp_btn = gr.Button(\"Run C++\", elem_classes=[\"run-button\"])\n", |
||||||
|
" with gr.Row():\n", |
||||||
|
" python_out = gr.TextArea(label=\"Python Result:\", lines=10, elem_classes=[\"python\"])\n", |
||||||
|
" cpp_out = gr.TextArea(label=\"C++ Result:\", lines=10, elem_classes=[\"cpp\"])\n", |
||||||
|
"\n", |
||||||
|
" python_script.change(\n", |
||||||
|
" fn=lambda script: PYTHON_SCRIPTS[script],\n", |
||||||
|
" inputs=[python_script],\n", |
||||||
|
" outputs=[python]\n", |
||||||
|
" )\n", |
||||||
|
" \n", |
||||||
|
" convert_btn.click(\n", |
||||||
|
" fn=lambda: \"\",\n", |
||||||
|
" inputs=None,\n", |
||||||
|
" outputs=[cpp]\n", |
||||||
|
" ).then(\n", |
||||||
|
" fn=convert_code,\n", |
||||||
|
" inputs=[python, model],\n", |
||||||
|
" outputs=[cpp]\n", |
||||||
|
" )\n", |
||||||
|
" run_python_btn.click(run_python, inputs=[python], outputs=[python_out])\n", |
||||||
|
" run_cpp_btn.click(run_cpp, inputs=[cpp], outputs=[cpp_out])\n", |
||||||
|
"\n", |
||||||
|
" return cpp, python_out, cpp_out" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": { |
||||||
|
"colab": { |
||||||
|
"base_uri": "https://localhost:8080/", |
||||||
|
"height": 645 |
||||||
|
}, |
||||||
|
"id": "n8ZdDrOrrbl-", |
||||||
|
"outputId": "08350d69-569e-4947-8da1-d755e9a2678f" |
||||||
|
}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Combine the tabs into the main UI and handle tab switching\n", |
||||||
|
"with gr.Blocks(css=css) as main_ui:\n", |
||||||
|
" with gr.Tabs() as tabs:\n", |
||||||
|
" comments_output = docs_comments_ui()\n", |
||||||
|
" tests_output = unit_tests_ui()\n", |
||||||
|
" cpp_output, python_out, cpp_out = python_to_cpp_ui()\n", |
||||||
|
"\n", |
||||||
|
" # Reset outputs on tab switch\n", |
||||||
|
" tabs.select(\n", |
||||||
|
" fn=lambda: [\"\", \"\", \"\", \"\", \"\"],\n", |
||||||
|
" inputs=None,\n", |
||||||
|
" outputs=[comments_output, \n", |
||||||
|
" tests_output, \n", |
||||||
|
" cpp_output, python_out, cpp_out]\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
"# Launch the app\n", |
||||||
|
"main_ui.launch(inbrowser=True)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"colab": { |
||||||
|
"provenance": [] |
||||||
|
}, |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 4 |
||||||
|
} |
@ -0,0 +1,37 @@ |
|||||||
|
# Overview |
||||||
|
|
||||||
|
This uses de-identified medical dictation data supplied by [mtsamples](https://mtsamples.com). The data from the mtsamples |
||||||
|
website was download from [kaggle](https://www.kaggle.com/datasets/tboyle10/medicaltranscriptions). There are four |
||||||
|
sample notes in different directories (see knowledge_base/mtsamples_dictations) that will added to a chromaDb |
||||||
|
vector database and will be available during chat using RAG (Retrieval Augmented Generation). |
||||||
|
|
||||||
|
# How to run |
||||||
|
|
||||||
|
- Run example |
||||||
|
|
||||||
|
```shell |
||||||
|
conda activate <your_environment> |
||||||
|
cd <your_directory_where_script_lives> |
||||||
|
python run_rag_chat.py |
||||||
|
``` |
||||||
|
|
||||||
|
# Chat example |
||||||
|
|
||||||
|
 |
||||||
|
|
||||||
|
# Questions to ask? |
||||||
|
|
||||||
|
1) How old is Ms. Connor? |
||||||
|
2) What are Ms. Connor's vital signs? |
||||||
|
3) How old is Ms. Mouse? |
||||||
|
4) What is Ms. Mouse concerned about? |
||||||
|
5) What are Ms. Mouse's vital signs? |
||||||
|
6) How old is Mr. Duck? |
||||||
|
7) Why did Mr. Duck go to the doctor? |
||||||
|
8) How old is Ms. Barbara? |
||||||
|
9) Why did Ms. Barbara go to the doctor? |
||||||
|
10) Is Ms. Barbara allergic to anything? |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
After Width: | Height: | Size: 89 KiB |
@ -0,0 +1,44 @@ |
|||||||
|
HISTORY OF PRESENT ILLNESS: |
||||||
|
|
||||||
|
Ms. Connor is a 50-year-old female who returns to clinic for a wound check. |
||||||
|
The patient underwent an APR secondary to refractory ulcerative colitis. |
||||||
|
Subsequently, she developed a wound infection, which has since healed. |
||||||
|
On our most recent visit to our clinic, she has her perineal stitches removed and presents today for followup of |
||||||
|
her perineal wound. She describes no drainage or erythema from her bottom. She is having good ostomy output. |
||||||
|
She does not describe any fevers, chills, nausea, or vomiting. The patient does describe some intermittent |
||||||
|
pain beneath the upper portion of the incision as well as in the right lower quadrant below her ostomy. |
||||||
|
She has been taking Percocet for this pain and it does work. She has since run out has been trying |
||||||
|
extra strength Tylenol, which will occasionally help this intermittent pain. She is requesting additional |
||||||
|
pain medications for this occasional abdominal pain, which she still experiences. |
||||||
|
|
||||||
|
PHYSICAL EXAMINATION: |
||||||
|
|
||||||
|
Temperature 95.8, pulse 68, blood pressure 132/73, and weight 159 pounds. |
||||||
|
|
||||||
|
This is a pleasant female in no acute distress. |
||||||
|
The patient's abdomen is soft, nontender, nondistended with a well-healed midline scar. |
||||||
|
There is an ileostomy in the right hemiabdomen, which is pink, patent, productive, and protuberant. |
||||||
|
There are no signs of masses or hernias over the patient's abdomen. |
||||||
|
|
||||||
|
ASSESSMENT AND PLAN: |
||||||
|
|
||||||
|
This is a pleasant 50-year-old female who has undergone an APR secondary to refractory ulcerative colitis. |
||||||
|
Overall, her quality of life has significantly improved since she had her APR. She is functioning well with her ileostomy. |
||||||
|
She did have concerns or questions about her diet and we discussed the BRAT diet, which consisted of foods that would |
||||||
|
slow down the digestive tract such as bananas, rice, toast, cheese, and peanut butter. |
||||||
|
I discussed the need to monitor her ileostomy output and preferential amount of daily output is 2 liters or less. |
||||||
|
I have counseled her on refraining from soft drinks and fruit drinks. I have also discussed with her that this diet |
||||||
|
is moreover a trial and error and that she may try certain foods that did not agree with her ileostomy, |
||||||
|
however others may and that this is something she will just have to perform trials with over the next several |
||||||
|
months until she finds what foods that she can and cannot eat with her ileostomy. She also had questions about |
||||||
|
her occasional abdominal pain. I told her that this was probably continue to improve as months went by and I |
||||||
|
gave her a refill of her Percocet for the continued occasional pain. I told her that this would the last time |
||||||
|
I would refill the Percocet and if she has continued pain after she finishes this bottle then she would need to |
||||||
|
start ibuprofen or Tylenol if she had continued pain. The patient then brought up some right hand and arm numbness, |
||||||
|
which has been there postsurgically and was thought to be from positioning during surgery. |
||||||
|
This is all primarily gone away except for a little bit of numbness at the tip of the third digit as well as |
||||||
|
some occasional forearm muscle cramping. I told her that I felt that this would continue to improve as it |
||||||
|
has done over the past two months since her surgery. I told her to continue doing hand exercises as she has |
||||||
|
been doing and this seems to be working for her. Overall, I think she has healed from her surgery and is doing |
||||||
|
very well. Again, her quality of life is significantly improved. She is happy with her performance. We will see |
||||||
|
her back in six months just for a general routine checkup and see how she is doing at that time. |
@ -0,0 +1,50 @@ |
|||||||
|
HISTORY OF PRESENT ILLNESS:, |
||||||
|
|
||||||
|
Ms. Mouse is a 67-year-old white female with a history of uterine papillary serous carcinoma who is |
||||||
|
status post 6 cycles of carboplatin and Taxol, is here today for followup. Her last cycle of chemotherapy |
||||||
|
was finished on 01/18/08, and she complains about some numbness in her right upper extremity. |
||||||
|
This has not gotten worse recently and there is no numbness in her toes. She denies any tingling or burning., |
||||||
|
|
||||||
|
REVIEW OF SYSTEMS: |
||||||
|
|
||||||
|
Negative for any fever, chills, nausea, vomiting, headache, chest pain, shortness of breath, abdominal pain, |
||||||
|
constipation, diarrhea, melena, hematochezia or dysuria. |
||||||
|
|
||||||
|
The patient is concerned about her blood pressure being up a little bit and also a mole that she had noticed for the |
||||||
|
past few months in her head. |
||||||
|
|
||||||
|
PHYSICAL EXAMINATION: |
||||||
|
|
||||||
|
VITAL SIGNS: Temperature 35.6, blood pressure 143/83, pulse 65, respirations 18, and weight 66.5 kg. |
||||||
|
GENERAL: She is a middle-aged white female, not in any distress. |
||||||
|
HEENT: No lymphadenopathy or mucositis. |
||||||
|
CARDIOVASCULAR: Regular rate and rhythm. |
||||||
|
LUNGS: Clear to auscultation bilaterally. |
||||||
|
EXTREMITIES: No cyanosis, clubbing or edema. |
||||||
|
NEUROLOGICAL: No focal deficits noted. |
||||||
|
PELVIC: Normal-appearing external genitalia. Vaginal vault with no masses or bleeding., |
||||||
|
|
||||||
|
LABORATORY DATA: |
||||||
|
|
||||||
|
None today. |
||||||
|
|
||||||
|
RADIOLOGIC DATA: |
||||||
|
|
||||||
|
CT of the chest, abdomen, and pelvis from 01/28/08 revealed status post total abdominal hysterectomy/bilateral |
||||||
|
salpingo-oophorectomy with an unremarkable vaginal cuff. No local or distant metastasis. |
||||||
|
Right probably chronic gonadal vein thrombosis. |
||||||
|
|
||||||
|
ASSESSMENT: |
||||||
|
|
||||||
|
This is a 67-year-old white female with history of uterine papillary serous carcinoma, status post total |
||||||
|
abdominal hysterectomy and bilateral salpingo-oophorectomy and 6 cycles of carboplatin and Taxol chemotherapy. |
||||||
|
She is doing well with no evidence of disease clinically or radiologically. |
||||||
|
|
||||||
|
PLAN: |
||||||
|
|
||||||
|
1. Plan to follow her every 3 months and CT scans every 6 months for the first 2 years. |
||||||
|
2. The patient was advised to contact the primary physician for repeat blood pressure check and get started on |
||||||
|
antihypertensives if it is persistently elevated. |
||||||
|
3. The patient was told that the mole that she is mentioning in her head is no longer palpable and just to observe it for now. |
||||||
|
4. The patient was advised about doing Kegel exercises for urinary incontinence, and we will address this issue again |
||||||
|
during next clinic visit if it is persistent. |
@ -0,0 +1,25 @@ |
|||||||
|
SUBJECTIVE: |
||||||
|
|
||||||
|
Mr. Duck is a 29-year-old white male who is a patient of Dr. XYZ and he comes in today |
||||||
|
complaining that he was stung by a Yellow Jacket Wasp yesterday and now has a lot of |
||||||
|
swelling in his right hand and right arm. He says that he has been stung by wasps before and had similar |
||||||
|
reactions. He just said that he wanted to catch it early before he has too bad of a severe reaction like he has had in the past. |
||||||
|
He has had a lot of swelling, but no anaphylaxis-type reactions in the past; no shortness of breath or difficultly with his |
||||||
|
throat feeling like it is going to close up or anything like that in the past; no racing heart beat or anxiety feeling, |
||||||
|
just a lot of localized swelling where the sting occurs. |
||||||
|
|
||||||
|
OBJECTIVE: |
||||||
|
|
||||||
|
Vitals: His temperature is 98.4. Respiratory rate is 18. Weight is 250 pounds. |
||||||
|
Extremities: Examination of his right hand and forearm reveals that he has an apparent sting just around his |
||||||
|
wrist region on his right hand on the medial side as well as significant swelling in his hand and his right forearm; |
||||||
|
extending up to the elbow. He says that it is really not painful or anything like that. It is really not all that |
||||||
|
red and no signs of infection at this time. |
||||||
|
|
||||||
|
ASSESSMENT:, Wasp sting to the right wrist area. |
||||||
|
|
||||||
|
PLAN: |
||||||
|
|
||||||
|
1. Solu-Medrol 125 mg IM X 1. |
||||||
|
2. Over-the-counter Benadryl, ice and elevation of that extremity. |
||||||
|
3. Follow up with Dr. XYZ if any further evaluation is needed. |
@ -0,0 +1,54 @@ |
|||||||
|
CHIEF COMPLAINT: |
||||||
|
|
||||||
|
Ms. Barbara is a thirty one year old female patient comes for three-week postpartum checkup, complaining of allergies. |
||||||
|
|
||||||
|
HISTORY OF PRESENT ILLNESS: |
||||||
|
|
||||||
|
She is doing well postpartum. She has had no headache. She is breastfeeding and feels like her milk is adequate. |
||||||
|
She has not had much bleeding. She is using about a mini pad twice a day, not any cramping or clotting and the |
||||||
|
discharge is turned from red to brown to now slightly yellowish. She has not yet had sexual intercourse. |
||||||
|
She does complain that she has had a little pain with the bowel movement, and every now and then she |
||||||
|
notices a little bright red bleeding. She has not been particularly constipated but her husband says |
||||||
|
she is not eating her vegetables like she should. Her seasonal allergies have back developed and she is |
||||||
|
complaining of extremely itchy watery eyes, runny nose, sneezing, and kind of a pressure sensation in her ears. |
||||||
|
|
||||||
|
MEDICATIONS: |
||||||
|
|
||||||
|
Prenatal vitamins. |
||||||
|
|
||||||
|
ALLERGIES: |
||||||
|
|
||||||
|
She thinks to Benadryl. |
||||||
|
|
||||||
|
FAMILY HISTORY: |
||||||
|
|
||||||
|
Mother is 50 and healthy. Dad is 40 and healthy. Half-sister, age 34, is healthy. |
||||||
|
She has a sister who is age 10 who has some yeast infections. |
||||||
|
|
||||||
|
PHYSICAL EXAMINATION: |
||||||
|
|
||||||
|
VITALS: Weight: 124 pounds. Blood pressure 96/54. Pulse: 72. Respirations: 16. LMP: 10/18/03. Age: 39. |
||||||
|
HEENT: Head is normocephalic. |
||||||
|
Eyes: EOMs intact. |
||||||
|
PERRLA. Conjunctiva clear. |
||||||
|
Fundi: Discs flat, cups normal. |
||||||
|
No AV nicking, hemorrhage or exudate. |
||||||
|
Ears: TMs intact. |
||||||
|
Mouth: No lesion. |
||||||
|
Throat: No inflammation. |
||||||
|
She has allergic rhinitis with clear nasal drainage, clear watery discharge from the eyes. |
||||||
|
Abdomen: Soft. No masses. |
||||||
|
Pelvic: Uterus is involuting. |
||||||
|
Rectal: She has one external hemorrhoid which has inflamed. Stool is guaiac negative and using anoscope, |
||||||
|
no other lesions are identified. |
||||||
|
|
||||||
|
ASSESSMENT/PLAN: |
||||||
|
|
||||||
|
Satisfactory three-week postpartum course, seasonal allergies. We will try Patanol eyedrops and Allegra 60 |
||||||
|
mg twice a day. She was cautioned about the possibility that this may alter her milk supply. She is to |
||||||
|
drink extra fluids and call if she has problems with that. We will try ProctoFoam HC. For the hemorrhoids, |
||||||
|
also increase the fiber in her diet. That prescription was written, as well as one for Allegra and Patanol. |
||||||
|
She additionally will be begin on Micronor because she would like to protect herself from pregnancy until |
||||||
|
her husband get scheduled in and has a vasectomy, which is their ultimate plan for birth control, and she |
||||||
|
anticipates that happening fairly soon. She will call and return if she continues to have problems with allergies. |
||||||
|
Meantime, rechecking in three weeks for her final six-week postpartum checkup. |
@ -0,0 +1,59 @@ |
|||||||
|
import gradio as gr |
||||||
|
from langchain_chroma import Chroma |
||||||
|
from pathlib import Path |
||||||
|
from utils import create_vector_db, Rag, get_chunks, get_conversation_chain, get_local_vector_db |
||||||
|
|
||||||
|
|
||||||
|
def chat(question, history) -> str: |
||||||
|
|
||||||
|
""" |
||||||
|
Get the chat data need for the gradio app |
||||||
|
|
||||||
|
:param question: |
||||||
|
The question being asked in the chat app. |
||||||
|
:type question: str |
||||||
|
:param history: |
||||||
|
A list of the conversation questions and answers. |
||||||
|
:type history: list |
||||||
|
:return: |
||||||
|
The answer from the current question. |
||||||
|
""" |
||||||
|
|
||||||
|
result = conversation_chain.invoke({"question": question}) |
||||||
|
answer = result['answer'] |
||||||
|
|
||||||
|
# include source documents if they exist |
||||||
|
# grab the first one as that should be related to the answer |
||||||
|
source_doc = "" |
||||||
|
if result.get('source_documents'): |
||||||
|
source_doc = result['source_documents'][0] |
||||||
|
|
||||||
|
response = f"{answer}\n\n**Source:**\n{source_doc.metadata.get('source', 'Source')}" \ |
||||||
|
if source_doc \ |
||||||
|
else answer |
||||||
|
return response |
||||||
|
|
||||||
|
|
||||||
|
def main(): |
||||||
|
|
||||||
|
gr.ChatInterface(chat, type="messages").launch(inbrowser=True) |
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__': |
||||||
|
|
||||||
|
create_new_db = False if Path('vector_db').exists() else True |
||||||
|
|
||||||
|
if create_new_db: |
||||||
|
folders = Path('knowledge_base').glob('*') |
||||||
|
chunks = get_chunks(folders=folders) |
||||||
|
vector_store = create_vector_db(chunks=chunks, db_name=Rag.DB_NAME.value, embeddings=Rag.EMBED_MODEL.value) |
||||||
|
else: |
||||||
|
client = get_local_vector_db(path='../rag_chat_example/vector_db') |
||||||
|
vector_store = Chroma(client=client, embedding_function=Rag.EMBED_MODEL.value) |
||||||
|
|
||||||
|
conversation_chain = get_conversation_chain(vectorstore=vector_store) |
||||||
|
|
||||||
|
main() |
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,267 @@ |
|||||||
|
from chromadb import PersistentClient |
||||||
|
from dotenv import load_dotenv |
||||||
|
from enum import Enum |
||||||
|
|
||||||
|
import plotly.graph_objects as go |
||||||
|
from langchain.document_loaders import DirectoryLoader, TextLoader |
||||||
|
from langchain.text_splitter import CharacterTextSplitter |
||||||
|
from langchain.schema import Document |
||||||
|
from langchain_openai import OpenAIEmbeddings, ChatOpenAI |
||||||
|
from langchain_chroma import Chroma |
||||||
|
from langchain.memory import ConversationBufferMemory |
||||||
|
from langchain.chains import ConversationalRetrievalChain |
||||||
|
import numpy as np |
||||||
|
import os |
||||||
|
from pathlib import Path |
||||||
|
from sklearn.manifold import TSNE |
||||||
|
from typing import Any, List, Tuple, Generator |
||||||
|
|
||||||
|
cur_path = Path(__file__) |
||||||
|
env_path = cur_path.parent.parent.parent.parent / '.env' |
||||||
|
assert env_path.exists(), f"Please add an .env to the root project path" |
||||||
|
|
||||||
|
load_dotenv(dotenv_path=env_path) |
||||||
|
|
||||||
|
|
||||||
|
class Rag(Enum): |
||||||
|
|
||||||
|
GPT_MODEL = "gpt-4o-mini" |
||||||
|
HUG_MODEL = "sentence-transformers/all-MiniLM-L6-v2" |
||||||
|
EMBED_MODEL = OpenAIEmbeddings() |
||||||
|
DB_NAME = "vector_db" |
||||||
|
|
||||||
|
|
||||||
|
def add_metadata(doc: Document, doc_type: str) -> Document: |
||||||
|
""" |
||||||
|
Add metadata to a Document object. |
||||||
|
|
||||||
|
:param doc: The Document object to add metadata to. |
||||||
|
:type doc: Document |
||||||
|
:param doc_type: The type of document to be added as metadata. |
||||||
|
:type doc_type: str |
||||||
|
:return: The Document object with added metadata. |
||||||
|
:rtype: Document |
||||||
|
""" |
||||||
|
doc.metadata["doc_type"] = doc_type |
||||||
|
return doc |
||||||
|
|
||||||
|
|
||||||
|
def get_chunks(folders: Generator[Path, None, None], file_ext='.txt') -> List[Document]: |
||||||
|
""" |
||||||
|
Load documents from specified folders, add metadata, and split them into chunks. |
||||||
|
|
||||||
|
:param folders: List of folder paths containing documents. |
||||||
|
:type folders: List[str] |
||||||
|
:param file_ext: |
||||||
|
The file extension to get from a local knowledge base (e.g. '.txt') |
||||||
|
:type file_ext: str |
||||||
|
:return: List of document chunks. |
||||||
|
:rtype: List[Document] |
||||||
|
""" |
||||||
|
text_loader_kwargs = {'encoding': 'utf-8'} |
||||||
|
documents = [] |
||||||
|
for folder in folders: |
||||||
|
doc_type = os.path.basename(folder) |
||||||
|
loader = DirectoryLoader( |
||||||
|
folder, glob=f"**/*{file_ext}", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs |
||||||
|
) |
||||||
|
folder_docs = loader.load() |
||||||
|
documents.extend([add_metadata(doc, doc_type) for doc in folder_docs]) |
||||||
|
|
||||||
|
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200) |
||||||
|
chunks = text_splitter.split_documents(documents) |
||||||
|
|
||||||
|
return chunks |
||||||
|
|
||||||
|
|
||||||
|
def create_vector_db(db_name: str, chunks: List[Document], embeddings: Any) -> Any: |
||||||
|
""" |
||||||
|
Create a vector database from document chunks. |
||||||
|
|
||||||
|
:param db_name: Name of the database to create. |
||||||
|
:type db_name: str |
||||||
|
:param chunks: List of document chunks. |
||||||
|
:type chunks: List[Document] |
||||||
|
:param embeddings: Embedding function to use. |
||||||
|
:type embeddings: Any |
||||||
|
:return: Created vector store. |
||||||
|
:rtype: Any |
||||||
|
""" |
||||||
|
# Delete if already exists |
||||||
|
if os.path.exists(db_name): |
||||||
|
Chroma(persist_directory=db_name, embedding_function=embeddings).delete_collection() |
||||||
|
|
||||||
|
# Create vectorstore |
||||||
|
vectorstore = Chroma.from_documents(documents=chunks, embedding=embeddings, persist_directory=db_name) |
||||||
|
|
||||||
|
return vectorstore |
||||||
|
|
||||||
|
|
||||||
|
def get_local_vector_db(path: str) -> Any: |
||||||
|
""" |
||||||
|
Get a local vector database. |
||||||
|
|
||||||
|
:param path: Path to the local vector database. |
||||||
|
:type path: str |
||||||
|
:return: Persistent client for the vector database. |
||||||
|
:rtype: Any |
||||||
|
""" |
||||||
|
return PersistentClient(path=path) |
||||||
|
|
||||||
|
|
||||||
|
def get_vector_db_info(vector_store: Any) -> None: |
||||||
|
""" |
||||||
|
Print information about the vector database. |
||||||
|
|
||||||
|
:param vector_store: Vector store to get information from. |
||||||
|
:type vector_store: Any |
||||||
|
""" |
||||||
|
collection = vector_store._collection |
||||||
|
count = collection.count() |
||||||
|
|
||||||
|
sample_embedding = collection.get(limit=1, include=["embeddings"])["embeddings"][0] |
||||||
|
dimensions = len(sample_embedding) |
||||||
|
|
||||||
|
print(f"There are {count:,} vectors with {dimensions:,} dimensions in the vector store") |
||||||
|
|
||||||
|
|
||||||
|
def get_plot_data(collection: Any) -> Tuple[np.ndarray, List[str], List[str], List[str]]: |
||||||
|
""" |
||||||
|
Get plot data from a collection. |
||||||
|
|
||||||
|
:param collection: Collection to get data from. |
||||||
|
:type collection: Any |
||||||
|
:return: Tuple containing vectors, colors, document types, and documents. |
||||||
|
:rtype: Tuple[np.ndarray, List[str], List[str], List[str]] |
||||||
|
""" |
||||||
|
result = collection.get(include=['embeddings', 'documents', 'metadatas']) |
||||||
|
vectors = np.array(result['embeddings']) |
||||||
|
documents = result['documents'] |
||||||
|
metadatas = result['metadatas'] |
||||||
|
doc_types = [metadata['doc_type'] for metadata in metadatas] |
||||||
|
colors = [['blue', 'green', 'red', 'orange'][['products', 'employees', 'contracts', 'company'].index(t)] for t in |
||||||
|
doc_types] |
||||||
|
|
||||||
|
return vectors, colors, doc_types, documents |
||||||
|
|
||||||
|
|
||||||
|
def get_2d_plot(collection: Any) -> go.Figure: |
||||||
|
""" |
||||||
|
Generate a 2D plot of the vector store. |
||||||
|
|
||||||
|
:param collection: Collection to generate plot from. |
||||||
|
:type collection: Any |
||||||
|
:return: 2D scatter plot figure. |
||||||
|
:rtype: go.Figure |
||||||
|
""" |
||||||
|
vectors, colors, doc_types, documents = get_plot_data(collection) |
||||||
|
tsne = TSNE(n_components=2, random_state=42) |
||||||
|
reduced_vectors = tsne.fit_transform(vectors) |
||||||
|
|
||||||
|
fig = go.Figure(data=[go.Scatter( |
||||||
|
x=reduced_vectors[:, 0], |
||||||
|
y=reduced_vectors[:, 1], |
||||||
|
mode='markers', |
||||||
|
marker=dict(size=5, color=colors, opacity=0.8), |
||||||
|
text=[f"Type: {t}<br>Text: {d[:100]}..." for t, d in zip(doc_types, documents)], |
||||||
|
hoverinfo='text' |
||||||
|
)]) |
||||||
|
|
||||||
|
fig.update_layout( |
||||||
|
title='2D Chroma Vector Store Visualization', |
||||||
|
scene=dict(xaxis_title='x', yaxis_title='y'), |
||||||
|
width=800, |
||||||
|
height=600, |
||||||
|
margin=dict(r=20, b=10, l=10, t=40) |
||||||
|
) |
||||||
|
|
||||||
|
return fig |
||||||
|
|
||||||
|
|
||||||
|
def get_3d_plot(collection: Any) -> go.Figure: |
||||||
|
""" |
||||||
|
Generate a 3D plot of the vector store. |
||||||
|
|
||||||
|
:param collection: Collection to generate plot from. |
||||||
|
:type collection: Any |
||||||
|
:return: 3D scatter plot figure. |
||||||
|
:rtype: go.Figure |
||||||
|
""" |
||||||
|
vectors, colors, doc_types, documents = get_plot_data(collection) |
||||||
|
tsne = TSNE(n_components=3, random_state=42) |
||||||
|
reduced_vectors = tsne.fit_transform(vectors) |
||||||
|
|
||||||
|
fig = go.Figure(data=[go.Scatter3d( |
||||||
|
x=reduced_vectors[:, 0], |
||||||
|
y=reduced_vectors[:, 1], |
||||||
|
z=reduced_vectors[:, 2], |
||||||
|
mode='markers', |
||||||
|
marker=dict(size=5, color=colors, opacity=0.8), |
||||||
|
text=[f"Type: {t}<br>Text: {d[:100]}..." for t, d in zip(doc_types, documents)], |
||||||
|
hoverinfo='text' |
||||||
|
)]) |
||||||
|
|
||||||
|
fig.update_layout( |
||||||
|
title='3D Chroma Vector Store Visualization', |
||||||
|
scene=dict(xaxis_title='x', yaxis_title='y', zaxis_title='z'), |
||||||
|
width=900, |
||||||
|
height=700, |
||||||
|
margin=dict(r=20, b=10, l=10, t=40) |
||||||
|
) |
||||||
|
|
||||||
|
return fig |
||||||
|
|
||||||
|
|
||||||
|
def get_conversation_chain(vectorstore: Any) -> ConversationalRetrievalChain: |
||||||
|
""" |
||||||
|
Create a conversation chain using the vector store. |
||||||
|
|
||||||
|
:param vectorstore: Vector store to use in the conversation chain. |
||||||
|
:type vectorstore: Any |
||||||
|
:return: Conversational retrieval chain. |
||||||
|
:rtype: ConversationalRetrievalChain |
||||||
|
""" |
||||||
|
llm = ChatOpenAI(temperature=0.7, model_name=Rag.GPT_MODEL.value) |
||||||
|
|
||||||
|
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True, output_key='answer') |
||||||
|
|
||||||
|
retriever = vectorstore.as_retriever(search_kwargs={"k": 25}) |
||||||
|
|
||||||
|
conversation_chain = ConversationalRetrievalChain.from_llm( |
||||||
|
llm=llm, |
||||||
|
retriever=retriever, |
||||||
|
memory=memory, |
||||||
|
return_source_documents=True, |
||||||
|
) |
||||||
|
|
||||||
|
return conversation_chain |
||||||
|
|
||||||
|
|
||||||
|
def get_lang_doc(document_text, doc_id, metadata=None, encoding='utf-8'): |
||||||
|
|
||||||
|
""" |
||||||
|
Build a langchain Document that can be used to create a chroma database |
||||||
|
|
||||||
|
:type document_text: str |
||||||
|
:param document_text: |
||||||
|
The text to add to a document object |
||||||
|
:type doc_id: str |
||||||
|
:param doc_id: |
||||||
|
The document id to include. |
||||||
|
:type metadata: dict |
||||||
|
:param metadata: |
||||||
|
A dictionary of metadata to associate to the document object. This will help filter an item from a |
||||||
|
vector database. |
||||||
|
:type encoding: string |
||||||
|
:param encoding: |
||||||
|
The type of encoding to use for loading the text. |
||||||
|
|
||||||
|
""" |
||||||
|
return Document( |
||||||
|
page_content=document_text, |
||||||
|
id=doc_id, |
||||||
|
metadata=metadata, |
||||||
|
encoding=encoding, |
||||||
|
) |
||||||
|
|
||||||
|
|
@ -0,0 +1,313 @@ |
|||||||
|
{ |
||||||
|
"cells": [ |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "db8736a7-ed94-441c-9556-831fa57b5a10", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"# The Product Pricer Continued...\n", |
||||||
|
"\n", |
||||||
|
"## Testing Gemini-1.5-pro model" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "681c717b-4c24-4ac3-a5f3-3c5881d6e70a", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"import os\n", |
||||||
|
"import re\n", |
||||||
|
"from dotenv import load_dotenv\n", |
||||||
|
"import matplotlib.pyplot as plt\n", |
||||||
|
"import pickle\n", |
||||||
|
"import google.generativeai as google_genai\n", |
||||||
|
"import time" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "21a3833e-4093-43b0-8f7b-839c50b911ea", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"from items import Item\n", |
||||||
|
"from testing import Tester " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "36d05bdc-0155-4c72-a7ee-aa4e614ffd3c", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# environment\n", |
||||||
|
"load_dotenv()\n", |
||||||
|
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b0a6fb86-74a4-403c-ab25-6db2d74e9d2b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"google_genai.configure()" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "c830ed3e-24ee-4af6-a07b-a1bfdcd39278", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"%matplotlib inline" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "5c9b05f4-c9eb-462c-8d86-de9140a2d985", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Load in the pickle files that are located in the `pickled_dataset` folder\n", |
||||||
|
"with open('train.pkl', 'rb') as file:\n", |
||||||
|
" train = pickle.load(file)\n", |
||||||
|
"\n", |
||||||
|
"with open('test.pkl', 'rb') as file:\n", |
||||||
|
" test = pickle.load(file)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "fc5c807b-c14c-458e-8cca-32bc0cc5b7c3", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Function to create the messages format required for Gemini 1.5 Pro\n", |
||||||
|
"# This function prepares the system and user messages in the format expected by Gemini models.\n", |
||||||
|
"def gemini_messages_for(item):\n", |
||||||
|
" system_message = \"You estimate prices of items. Reply only with the price, no explanation\"\n", |
||||||
|
" \n", |
||||||
|
" # Modify the test prompt by removing \"to the nearest dollar\" and \"Price is $\"\n", |
||||||
|
" # This ensures that the model receives a cleaner, simpler prompt.\n", |
||||||
|
" user_prompt = item.test_prompt().replace(\" to the nearest dollar\", \"\").replace(\"\\n\\nPrice is $\", \"\")\n", |
||||||
|
"\n", |
||||||
|
" # Reformat messages to Gemini’s expected format: messages = [{'role':'user', 'parts': ['hello']}]\n", |
||||||
|
" return [\n", |
||||||
|
" {\"role\": \"system\", \"parts\": [system_message]}, # System-level instruction\n", |
||||||
|
" {\"role\": \"user\", \"parts\": [user_prompt]}, # User's query\n", |
||||||
|
" {\"role\": \"model\", \"parts\": [\"Price is $\"]} # Assistant's expected prefix for response\n", |
||||||
|
" ]" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d6da66bb-bc4b-49ad-9224-a388470ef20b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Example usage of the gemini_messages_for function\n", |
||||||
|
"gemini_messages_for(test[0]) # Generate message structure for the first test item" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "b1af1888-f94a-4106-b0d8-8a70939eec4e", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Utility function to extract the numerical price from a given string\n", |
||||||
|
"# This function removes currency symbols and commas, then extracts the first number found.\n", |
||||||
|
"def get_price(s):\n", |
||||||
|
" s = s.replace('$', '').replace(',', '') # Remove currency symbols and formatting\n", |
||||||
|
" match = re.search(r\"[-+]?\\d*\\.\\d+|\\d+\", s) # Regular expression to find a number\n", |
||||||
|
" return float(match.group()) if match else 0 # Convert matched value to float, return 0 if no match" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a053c1a9-f86e-427c-a6be-ed8ec7bd63a5", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Example usage of get_price function\n", |
||||||
|
"get_price(\"The price is roughly $99.99 because blah blah\") # Expected output: 99.99" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "34a88e34-1719-4d08-adbe-adb69dfe5e83", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Function to get the estimated price using Gemini 1.5 Pro\n", |
||||||
|
"def gemini_1_point_5_pro(item):\n", |
||||||
|
" messages = gemini_messages_for(item) # Generate messages for the model\n", |
||||||
|
" system_message = messages[0]['parts'][0] # Extract system-level instruction\n", |
||||||
|
" user_messages = messages[1:] # Remove system message from messages list\n", |
||||||
|
" \n", |
||||||
|
" # Initialize Gemini 1.5 Pro model with system instruction\n", |
||||||
|
" gemini = google_genai.GenerativeModel(\n", |
||||||
|
" model_name=\"gemini-1.5-pro\",\n", |
||||||
|
" system_instruction=system_message\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Generate response using Gemini API\n", |
||||||
|
" response = gemini.generate_content(\n", |
||||||
|
" contents=user_messages,\n", |
||||||
|
" generation_config=google_genai.GenerationConfig(max_output_tokens=5)\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Extract text response and convert to numerical price\n", |
||||||
|
" return get_price(response.text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "d89b10bb-8ebb-42ef-9146-f6e64e6849f9", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Example usage:\n", |
||||||
|
"gemini_1_point_5_pro(test[0])" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "89ad07e6-a28a-4625-b61e-d2ce12d440fc", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Retrieve the actual price of the test item (for comparison)\n", |
||||||
|
"test[0].price # Output: 374.41" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "384f28e5-e51f-4cd3-8d74-30a8275530db", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Test the function for gemini-1.5 pro using the Tester framework\n", |
||||||
|
"Tester.test(gemini_1_point_5_pro, test)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "markdown", |
||||||
|
"id": "9b627291-b02e-48dd-9130-703498135ddf", |
||||||
|
"metadata": {}, |
||||||
|
"source": [ |
||||||
|
"## Five, Gemini-2.0-flash" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "0ee393a9-7afd-404f-92f2-a64bb4d5fb8b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Function to get the estimated price using Gemini-2.0-flash-exp\n", |
||||||
|
"def gemini_2_point_0_flash_exp(item):\n", |
||||||
|
" messages = gemini_messages_for(item) # Generate messages for the model\n", |
||||||
|
" system_message = messages[0]['parts'][0] # Extract system-level instruction\n", |
||||||
|
" user_messages = messages[1:] # Remove system message from messages list\n", |
||||||
|
" \n", |
||||||
|
" # Initialize Gemini-2.0-flash-exp model with system instruction\n", |
||||||
|
" gemini = google_genai.GenerativeModel(\n", |
||||||
|
" model_name=\"gemini-2.0-flash-exp\",\n", |
||||||
|
" system_instruction=system_message\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Adding a delay to avoid hitting the API rate limit and getting a \"ResourceExhausted: 429\" error\n", |
||||||
|
" time.sleep(5)\n", |
||||||
|
" \n", |
||||||
|
" # Generate response using Gemini API\n", |
||||||
|
" response = gemini.generate_content(\n", |
||||||
|
" contents=user_messages,\n", |
||||||
|
" generation_config=google_genai.GenerationConfig(max_output_tokens=5)\n", |
||||||
|
" )\n", |
||||||
|
"\n", |
||||||
|
" # Extract text response and convert to numerical price\n", |
||||||
|
" return get_price(response.text)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "203dc6f1-309e-46eb-9957-e06eed803cc8", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Example usage:\n", |
||||||
|
"gemini_2_point_0_flash_exp(test[0]) " |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "a844df09-d347-40b9-bb79-006ec4160aab", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Retrieve the actual price of the test item (for comparison)\n", |
||||||
|
"test[0].price # Output: 374.41" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "500b45c7-e5c1-44f2-95c9-1c3c06365339", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [ |
||||||
|
"# Test the function for gemini-2.0-flash-exp using the Tester framework\n", |
||||||
|
"Tester.test(gemini_2_point_0_flash_exp, test)" |
||||||
|
] |
||||||
|
}, |
||||||
|
{ |
||||||
|
"cell_type": "code", |
||||||
|
"execution_count": null, |
||||||
|
"id": "746b2d12-ba92-48e2-9065-c9a108d1593b", |
||||||
|
"metadata": {}, |
||||||
|
"outputs": [], |
||||||
|
"source": [] |
||||||
|
} |
||||||
|
], |
||||||
|
"metadata": { |
||||||
|
"kernelspec": { |
||||||
|
"display_name": "Python 3 (ipykernel)", |
||||||
|
"language": "python", |
||||||
|
"name": "python3" |
||||||
|
}, |
||||||
|
"language_info": { |
||||||
|
"codemirror_mode": { |
||||||
|
"name": "ipython", |
||||||
|
"version": 3 |
||||||
|
}, |
||||||
|
"file_extension": ".py", |
||||||
|
"mimetype": "text/x-python", |
||||||
|
"name": "python", |
||||||
|
"nbconvert_exporter": "python", |
||||||
|
"pygments_lexer": "ipython3", |
||||||
|
"version": "3.11.11" |
||||||
|
} |
||||||
|
}, |
||||||
|
"nbformat": 4, |
||||||
|
"nbformat_minor": 5 |
||||||
|
} |
Loading…
Reference in new issue