Browse Source

Merge pull request #246 from Nirtun/community-contributions-branch

Added my contributions to community-contributions
pull/264/head
Ed Donner 2 months ago committed by GitHub
parent
commit
a859f6723b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 148
      week1/community-contributions/week1 exercise - my AI tutor.ipynb

148
week1/community-contributions/week1 exercise - my AI tutor.ipynb

@ -0,0 +1,148 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "f38e9ebb-453d-4b40-84f6-bc3e9bf4d7ef",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"import json\n",
"import ollama\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"\n",
"# constants\n",
"\n",
"MODEL_GPT = 'gpt-4o-mini'\n",
"MODEL_LLAMA = 'llama3.2'\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"HEADERS = {\"Content-Type\": \"application/json\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f367c5bb-80a2-4d78-8f27-823f5dafe7c0",
"metadata": {},
"outputs": [],
"source": [
"# set up environment\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"openai = OpenAI()\n",
"\n",
"# System prompt for the AI TECHNICAL LLM AND PYTHON TUTOR.\"\n",
"\n",
"system_prompt = \"You are an EXPERT in AI, LLMS and Python \\\n",
"Provide the answer with example ALLWAYS when necessary. \\\n",
"If you do not know the answer just say 'I don't know the answer' \\\n",
"Respond in markdown in Spanish.\"\n",
"\n",
"# messages\n",
"def messages_for(question):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": question}\n",
" ]\n",
"\n",
"# here is the question; type over this to ask something new\n",
"\n",
"question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\"\n",
"question = question[:5_000] # Truncate if more than 5,000 characters"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a90d726d-d494-401f-9cd6-0260f5c781e0",
"metadata": {},
"outputs": [],
"source": [
"# METHODS TO DISPLAY\n",
"def display_summary_ollama(question):\n",
" response = ollama.chat(\n",
" model = MODEL_LLAMA,\n",
" messages = messages_for(question)\n",
" ) \n",
" summary = response['message']['content']\n",
" display(Markdown(summary))\n",
"\n",
"def display_summary_gpt(question):\n",
" stream = openai.chat.completions.create(\n",
" model = MODEL_GPT,\n",
" messages = messages_for(question),\n",
" stream=True\n",
" )\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
" \n",
"def display_summary(llm, question):\n",
" if llm.startswith(\"llama3.2\"):\n",
" display_summary_ollama(question)\n",
" else:\n",
" display_summary_gpt(question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e993b6d-8fee-43f3-9e36-f86701a5cc57",
"metadata": {},
"outputs": [],
"source": [
"# Get gpt-4o-mini to answer, with streaming\n",
"\n",
"display_summary(MODEL_GPT, question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "31f6283a-ee57-415e-9a57-83d07261b7f9",
"metadata": {},
"outputs": [],
"source": [
"# Get Llama 3.2 to answer\n",
"\n",
"display_summary(MODEL_LLAMA, question)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading…
Cancel
Save