diff --git a/week2/community-contributions/brochure-generator-interface.ipynb b/week2/community-contributions/brochure-generator-interface.ipynb
new file mode 100644
index 0000000..b7b8d8c
--- /dev/null
+++ b/week2/community-contributions/brochure-generator-interface.ipynb
@@ -0,0 +1,460 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "e71d7ff9-c27a-4602-9230-856626b1de07",
+ "metadata": {},
+ "source": [
+ "# Company Brochure Generator UI\n",
+ "Generates a brochure for a company website, after scraping the website and pages linked with that page, based on the provided company URL. \n",
+ "Enables users to \n",
+ "- Choose a model type (Llama 3.2, Claude, GPT)-\n",
+ "- Choose the tone preference\n",
+ "- Choose the target audience"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "de9b59b9-8673-42e7-8849-62fe30f56711",
+ "metadata": {},
+ "source": [
+ "#### Imports, Keys, Instantiation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "id": "39fd7fed-b215-4037-bd6e-7e1af1b83897",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import requests\n",
+ "import json\n",
+ "from typing import List\n",
+ "from dotenv import load_dotenv\n",
+ "from bs4 import BeautifulSoup\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "from openai import OpenAI\n",
+ "import anthropic\n",
+ "import gradio as gr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "0bf24357-1d77-4721-9d5a-f99827b2158c",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "OpenAI API Key exists and begins sk-proj-\n",
+ "Anthropic API Key exists and begins sk-ant-\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Load environment variables in a file called .env\n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
+ "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
+ "\n",
+ "if openai_api_key:\n",
+ " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
+ "else:\n",
+ " print(\"OpenAI API Key not set\")\n",
+ " \n",
+ "if anthropic_api_key:\n",
+ " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
+ "else:\n",
+ " print(\"Anthropic API Key not set\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "1afc12e1-02c1-4394-b589-19cd08d2a8bb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define models\n",
+ "CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n",
+ "GPT_MODEL = \"gpt-4o-mini\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "d5d79a69-0a39-4ab4-aaf8-bc591bce0536",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Creating instances\n",
+ "claude = anthropic.Anthropic()\n",
+ "openai = OpenAI()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1d3369bc-b751-4f4d-a288-d7d81c384e67",
+ "metadata": {},
+ "source": [
+ "#### Web Scraper"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "fafe1074-fbf4-47cc-80dc-34413a447977",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# A class to represent a Webpage\n",
+ "\n",
+ "# Some websites need you to use proper headers when fetching them:\n",
+ "headers = {\n",
+ " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
+ "}\n",
+ "\n",
+ "class Website:\n",
+ " \"\"\"\n",
+ " A utility class to represent a Website that we have scraped, now with links\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self, url):\n",
+ " self.url = url\n",
+ " response = requests.get(url, headers=headers)\n",
+ " self.body = response.content\n",
+ " soup = BeautifulSoup(self.body, 'html.parser')\n",
+ " self.title = soup.title.string if soup.title else \"No title found\"\n",
+ " if soup.body:\n",
+ " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
+ " irrelevant.decompose()\n",
+ " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
+ " else:\n",
+ " self.text = \"\"\n",
+ " links = [link.get('href') for link in soup.find_all('a')]\n",
+ " self.links = [link for link in links if link]\n",
+ "\n",
+ " def get_contents(self):\n",
+ " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "41c1f1af-ae20-423b-bf7c-efd7f8c2751b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
+ "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n",
+ "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n",
+ "link_system_prompt += \"You should respond in JSON as in this example:\"\n",
+ "link_system_prompt += \"\"\"\n",
+ "{\n",
+ " \"links\": [\n",
+ " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
+ " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
+ " ]\n",
+ "}\n",
+ "\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "eb537563-e393-47ca-9af2-a8ea7393edd9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_links_user_prompt(website):\n",
+ " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
+ " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
+ "Do not include Terms of Service, Privacy, email or social media links.\\n\"\n",
+ " user_prompt += \"Links (some might be relative links):\\n\"\n",
+ " user_prompt += \"\\n\".join(website.links)\n",
+ " return user_prompt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "id": "033568d2-3f1a-43ac-a288-7a65b4ea86a5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_links(url):\n",
+ " website = Website(url)\n",
+ " response = openai.chat.completions.create(\n",
+ " model=GPT_MODEL,\n",
+ " messages=[\n",
+ " {\"role\": \"system\", \"content\": link_system_prompt},\n",
+ " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
+ " ],\n",
+ " response_format={\"type\": \"json_object\"}\n",
+ " )\n",
+ " result = response.choices[0].message.content\n",
+ " return json.loads(result)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "d8f316ac-f0b1-42d9-88a8-0a61fcb0023d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_all_details(url):\n",
+ " result = \"Landing page:\\n\"\n",
+ " result += Website(url).get_contents()\n",
+ " links = get_links(url)\n",
+ " print(\"Found links:\", links)\n",
+ " for link in links[\"links\"]:\n",
+ " print(f\"Processing {link['url']}...\")\n",
+ " result += f\"\\n\\n{link['type']}\\n\"\n",
+ " result += Website(link[\"url\"]).get_contents()\n",
+ " return result"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "016e065a-ac5a-48c0-bc4b-e916e9801384",
+ "metadata": {},
+ "source": [
+ "#### System Message"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "ed1c6068-5f4f-47a7-ab97-738dfb94e057",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n",
+ "and creates a short brochure about the company for prospective customers, investors and recruits. \\\n",
+ "You are also provided with the tone, and the target audience. Provide an appropriate answer. Respond in markdown.\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6d4f594c-927d-440f-8aae-33cfeb9c445c",
+ "metadata": {},
+ "source": [
+ "#### LLM Call Functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "id": "5b6a0379-3465-4c04-a553-4e4cdb9064b9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_gpt(prompt,company_name,url):\n",
+ " messages = [\n",
+ " {\"role\": \"user\", \"content\": prompt},\n",
+ " {\"role\":\"system\",\"content\":system_message}\n",
+ " ]\n",
+ " stream = openai.chat.completions.create(\n",
+ " model=GPT_MODEL,\n",
+ " messages=messages,\n",
+ " stream=True\n",
+ " )\n",
+ " result = \"\"\n",
+ " for chunk in stream:\n",
+ " result += chunk.choices[0].delta.content or \"\"\n",
+ " yield result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "a2194e1d-4e99-4127-9515-aa9353382bc6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_claude(prompt):\n",
+ " result = claude.messages.stream(\n",
+ " model=CLAUDE_MODEL,\n",
+ " max_tokens=1000,\n",
+ " temperature=0.7,\n",
+ " system=system_message,\n",
+ " messages=[\n",
+ " {\"role\": \"user\", \"content\": prompt},\n",
+ " ],\n",
+ " )\n",
+ " response = \"\"\n",
+ " with result as stream:\n",
+ " for text in stream.text_stream:\n",
+ " response += text or \"\"\n",
+ " yield response"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "64adf26c-33b2-4589-8df6-dc5d6da71420",
+ "metadata": {},
+ "source": [
+ "#### Brochure Creation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "8192f39f-508b-4592-a075-767db68672b3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_brochure_user_prompt(company_name, url):\n",
+ " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
+ " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n",
+ " user_prompt += get_all_details(url)\n",
+ " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
+ " return user_prompt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "id": "8aebfabe-4d51-4ee7-a9d2-5a379e9427cb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def create_brochure(company_name, url,model,tone,target):\n",
+ " print('create brochure function called')\n",
+ " prompt = f\"Please generate a company brochure for {company_name}.\"\n",
+ " prompt += f\"Use a {tone} tone; and target content at {target}\"\n",
+ " prompt += get_brochure_user_prompt(company_name,url)\n",
+ " \n",
+ " if model == \"GPT\":\n",
+ " result = stream_gpt(prompt,company_name,url)\n",
+ " elif model==\"Claude\":\n",
+ " result = stream_claude(prompt,company_name,url)\n",
+ " else:\n",
+ " raise ValueError(\"Unknown model\")\n",
+ " yield from result"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c5f4f97b-c9d0-4d4c-8b02-e6209ba2549c",
+ "metadata": {},
+ "source": [
+ "#### Putting it all together : Gradio UI"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "33162303-9b49-46fe-a8e0-0d01be45685b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "force_dark_mode = \"\"\"\n",
+ "function refresh() {\n",
+ " const url = new URL(window.location);\n",
+ " if (url.searchParams.get('__theme') !== 'dark') {\n",
+ " url.searchParams.set('__theme', 'dark');\n",
+ " window.location.href = url.href;\n",
+ " }\n",
+ "}\n",
+ "\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "id": "47ab9a41-cecd-4c21-bd68-4a15966b80c4",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "* Running on local URL: http://127.0.0.1:7877\n",
+ "\n",
+ "To create a public link, set `share=True` in `launch()`.\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "
"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": []
+ },
+ "execution_count": 41,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Found links: {'links': [{'type': 'about page', 'url': 'https://www.vellum.ai/'}, {'type': 'careers page', 'url': 'https://www.vellum.ai/careers'}]}\n",
+ "Processing https://www.vellum.ai/...\n",
+ "Processing https://www.vellum.ai/careers...\n"
+ ]
+ }
+ ],
+ "source": [
+ "gr.Interface(\n",
+ " fn=create_brochure,\n",
+ " inputs=[\n",
+ " gr.Textbox(label='Company Name:'),\n",
+ " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
+ " gr.Dropdown(['GPT','Claude'],label='Select Model:'),\n",
+ " gr.Dropdown(['Formal','Casual','Persuasive','Informative','Conversational'],label='Select Tone:'),\n",
+ " gr.Dropdown(['Businesses','General Public','Students','Investors','Customers'],label='Select Target Audience:'),\n",
+ " ],\n",
+ " outputs = [gr.Markdown(label='Brochure')],\n",
+ " flagging_mode = 'never',\n",
+ " js = force_dark_mode\n",
+ ").launch(inbrowser=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2b923b09-6738-450a-9035-2c8d1bb9cae6",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/day 4 - course booking assistant.ipynb b/week2/community-contributions/day 4 - course booking assistant.ipynb
new file mode 100644
index 0000000..c7a057e
--- /dev/null
+++ b/week2/community-contributions/day 4 - course booking assistant.ipynb
@@ -0,0 +1,251 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "5d799d2a-6e58-4a83-b17a-dbbc40efdc39",
+ "metadata": {},
+ "source": [
+ "## Project - Course Booking AI Asssistant\n",
+ "AI Customer Support Bot that \n",
+ "- Returns Prices\n",
+ "- Books Tickets\n",
+ "- Adds Information to Text File"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b1ad9acd-a702-48a3-8ff5-d536bcac8030",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "import json\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "import gradio as gr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "74adab0c-99b3-46cd-a79f-320a3e74138a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Initialization\n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "\n",
+ "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
+ "if openai_api_key:\n",
+ " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
+ "else:\n",
+ " print(\"OpenAI API Key not set\")\n",
+ " \n",
+ "MODEL = \"gpt-4o-mini\"\n",
+ "openai = OpenAI()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8d3240a4-99c1-4c07-acaa-ecbb69ffd2e4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are a helpful assistant for an Online Course Platform called StudyAI. \"\n",
+ "system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
+ "system_message += \"Always be accurate. If you don't know the answer, say so.\"\n",
+ "system_message += \"If you are given a partial name, for example 'discrete' instead of 'discrete structures' \\\n",
+ "ask the user if they meant to say 'discrete structures', and then display the price. The user may also use \\\n",
+ "acronyms like 'PF' instead of programming fundamentals or 'OOP' to mean 'Object oriented programming'. \\\n",
+ "Clarify what the user means and then proceed as directed.\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9a1b8d5f-f893-477b-8396-ff7d697eb0c3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "course_prices = {\"programming fundamentals\": \"$19\", \"discrete structures\": \"$39\", \"operating systems\": \"$24\", \"object oriented programming\": \"$39\"}\n",
+ "\n",
+ "def get_course_price(course):\n",
+ " print(f\"Tool get_course_price called for {course}\")\n",
+ " course = course.lower()\n",
+ " return course_prices.get(course, \"Unknown\")\n",
+ "\n",
+ "def enroll_in_course(course):\n",
+ " print(f'Tool enroll_in_course_ called for {course}')\n",
+ " course_price = get_course_price(course)\n",
+ " if course_price != 'Unknown':\n",
+ " with open('enrolled_courses.txt', 'a') as file: \n",
+ " file.write(course + \"\\n\")\n",
+ " return 'Successfully enrolled in course'\n",
+ " else:\n",
+ " return 'Enrollment failed, no such course available'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "330d2b94-a8c5-4967-ace7-15d2cd52d7ae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "get_course_price('graph theory')\n",
+ "get_course_price('discrete structures')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5bb65830-fab8-45a7-bf43-7e52186915a0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "price_function = {\n",
+ " \"name\": \"get_course_price\",\n",
+ " \"description\": \"Get the price of a course. Call this whenever you need to know the course price, for example when a customer asks 'How much is a ticket for this course?'\",\n",
+ " \"parameters\": {\n",
+ " \"type\": \"object\",\n",
+ " \"properties\": {\n",
+ " \"course\": {\n",
+ " \"type\": \"string\",\n",
+ " \"description\": \"The course that the customer wants to purchase\",\n",
+ " },\n",
+ " },\n",
+ " \"required\": [\"course\"],\n",
+ " \"additionalProperties\": False\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "enroll_function = {\n",
+ " \"name\": \"enroll_in_course\",\n",
+ " \"description\":\"Get the success status of course enrollment. Call whenever a customer wants to enroll in a course\\\n",
+ " for example, if they say 'I want to purchase this course' or 'I want to enroll in this course'\",\n",
+ " \"parameters\":{\n",
+ " \"type\":\"object\",\n",
+ " \"properties\":{\n",
+ " \"course\":{\n",
+ " \"type\":\"string\",\n",
+ " \"description\": \"The course that the customer wants to purchase\",\n",
+ " },\n",
+ " },\n",
+ " \"required\": [\"course\"],\n",
+ " \"additionalProperties\": False\n",
+ " } \n",
+ "}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "08af86b9-3aaa-4b6b-bf7c-ee668ba1cbfe",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tools = [\n",
+ " {\"type\":\"function\",\"function\":price_function},\n",
+ " {\"type\":\"function\",\"function\":enroll_function}\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "482efc34-ff1f-4146-9570-58b4d59c3b2f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def chat(message,history):\n",
+ " messages = [{\"role\":\"system\",\"content\":system_message}] + history + [{\"role\":\"user\",\"content\":message}]\n",
+ " response = openai.chat.completions.create(model=MODEL,messages=messages,tools=tools)\n",
+ "\n",
+ " if response.choices[0].finish_reason == \"tool_calls\":\n",
+ " message = response.choices[0].message\n",
+ " messages.append(message)\n",
+ " for tool_call in message.tool_calls:\n",
+ " messages.append(handle_tool_call(tool_call))\n",
+ " response = openai.chat.completions.create(model=MODEL,messages=messages)\n",
+ "\n",
+ " return response.choices[0].message.content"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f725b4fb-d477-4d7d-80b5-5d70e1b25a86",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# We have to write that function handle_tool_call:\n",
+ "\n",
+ "def handle_tool_call(tool_call):\n",
+ " function = tool_call.function.name\n",
+ " arguments = json.loads(tool_call.function.arguments)\n",
+ " match function:\n",
+ " case 'get_course_price':\n",
+ " course = arguments.get('course')\n",
+ " price = get_course_price(course)\n",
+ " return {\n",
+ " \"role\": \"tool\",\n",
+ " \"content\": json.dumps({\"course\": course,\"price\": price}),\n",
+ " \"tool_call_id\": tool_call.id\n",
+ " }\n",
+ " case 'enroll_in_course':\n",
+ " course = arguments.get('course')\n",
+ " status = enroll_in_course(course)\n",
+ " return {\n",
+ " \"role\": \"tool\",\n",
+ " \"content\": json.dumps({\"course\": course, \"status\": status}),\n",
+ " \"tool_call_id\": tool_call.id\n",
+ " }\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c446272a-9ce1-4ffd-9bc8-483d782810b4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gr.ChatInterface(fn=chat,type=\"messages\").launch(inbrowser=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1fe714a3-f793-4c3b-b5aa-6c81b82aea1b",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/day1-ollama-claude.ipynb b/week2/community-contributions/day1-ollama-claude.ipynb
new file mode 100644
index 0000000..f620759
--- /dev/null
+++ b/week2/community-contributions/day1-ollama-claude.ipynb
@@ -0,0 +1,218 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "e063b35e-5598-4084-b255-89956bfedaac",
+ "metadata": {},
+ "source": [
+ "### Models an interaction between LLama 3.2 and Claude 3.5 Haiku"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4f534359-cdb4-4441-aa66-d6700fa4d6a5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "import anthropic\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3bdff240-9118-4061-9369-585c4d4ce0a7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load environment variables in a file called .env\n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
+ " \n",
+ "if anthropic_api_key:\n",
+ " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
+ "else:\n",
+ " print(\"Anthropic API Key not set\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ff110b3f-3986-4fd8-a0b1-fd4b51133a8d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Connect to Anthropic\n",
+ "\n",
+ "claude = anthropic.Anthropic()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e6e596c6-6307-49c1-a29f-5c4e88f8d34d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Download the llama3.2:1b model for local execution.\n",
+ "!ollama pull llama3.2:1b"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "633b6892-6d04-40cb-8b61-196fc754b00c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define models\n",
+ "CLAUDE_MODEL = \"claude-3-5-haiku-latest\"\n",
+ "LLAMA_MODEL = \"llama3.2:1b\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a699a809-e3d3-4392-94bd-e2f80a5aec60",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "claude_system = \"You are a chatbot designed as a study tutor for undergraduate students. \\\n",
+ "You explain information and key-technical terms related to the subject in a succint yet \\\n",
+ "comprehensive manner. You may use tables, formatting and other visuals to help create \\\n",
+ "'cheat-sheets' of sorts.\"\n",
+ "\n",
+ "llama_system = \"You are a chatbot designed to ask questions about different topics related to \\\n",
+ "computer vision. You are meant to simulate a student, not teacher. Act as if you have no \\\n",
+ "prior knowledge\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bdb049d8-130b-42dd-aaab-29c09e3e2347",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "llama_messages = [\"Hi\"]\n",
+ "claude_messages = [\"Hello\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c158f31c-5e8b-48a4-9980-6b280393800b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_llama():\n",
+ " messages = [{\"role\": \"system\", \"content\": llama_system}]\n",
+ " for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n",
+ " messages.append({\"role\": \"assistant\", \"content\": llama_msg})\n",
+ " messages.append({\"role\": \"user\", \"content\": claude_msg})\n",
+ " response = ollama.chat(model=LLAMA_MODEL, messages=messages)\n",
+ " return response['message']['content']\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d803c5a2-df54-427a-9b80-8e9dd04ee36d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_claude():\n",
+ " messages = []\n",
+ " for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n",
+ " messages.append({\"role\": \"user\", \"content\": llama_msg})\n",
+ " messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n",
+ " messages.append({\"role\": \"user\", \"content\": llama_messages[-1]})\n",
+ " message = claude.messages.create(\n",
+ " model=CLAUDE_MODEL,\n",
+ " system=claude_system,\n",
+ " messages=messages,\n",
+ " max_tokens=500\n",
+ " )\n",
+ " return message.content[0].text"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a23794bb-0f36-4f91-aa28-24b876203a36",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "call_llama()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7f5c3e2f-a1bb-403b-b6b5-944a10d93305",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "call_claude()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3d6eb874-1c8f-47d8-a9f1-2e0fe197ae83",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "llama_messages = [\"Hi\"]\n",
+ "claude_messages = [\"Hello there, what would you like to learn today?\"]\n",
+ "\n",
+ "print(f'Ollama:\\n{ollama_messages[0]}')\n",
+ "print(f'Claude:\\n{claude_messages[0]}')\n",
+ "\n",
+ "for _ in range(5):\n",
+ " llama_next = call_llama()\n",
+ " print(f'Llama 3.2:\\n{llama_next}')\n",
+ " llama_messages.append(llama_next)\n",
+ " \n",
+ " claude_next = call_claude()\n",
+ " print(f'Claude 3.5 Haiku:\\n{claude_next}')\n",
+ " claude_messages.append(claude_next)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d1e651ad-85c8-45c7-ba83-f7c689080d6b",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/day3-gemini.ipynb b/week2/community-contributions/day3-gemini.ipynb
index 714f93a..c75e878 100644
--- a/week2/community-contributions/day3-gemini.ipynb
+++ b/week2/community-contributions/day3-gemini.ipynb
@@ -174,7 +174,7 @@
"**message** is the prompt to use \n",
"**history** is the past conversation, in OpenAI format \n",
"\n",
- "We will combine the system message, history and latest message, then call OpenAI."
+ "We will combine the system message, history and latest message, then call OpenAI ."
]
},
{
diff --git a/week2/community-contributions/day3-gradio-auth.ipynb b/week2/community-contributions/day3-gradio-auth.ipynb
index fe94e55..7ec2dc5 100644
--- a/week2/community-contributions/day3-gradio-auth.ipynb
+++ b/week2/community-contributions/day3-gradio-auth.ipynb
@@ -16,7 +16,7 @@
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
- "import gradio as gr"
+ "import gradio as gr "
]
},
{
@@ -178,5 +178,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 2
+ "nbformat_minor": 4
}
diff --git a/week2/community-contributions/day3-programming-tutor.ipynb b/week2/community-contributions/day3-programming-tutor.ipynb
new file mode 100644
index 0000000..700a0c9
--- /dev/null
+++ b/week2/community-contributions/day3-programming-tutor.ipynb
@@ -0,0 +1,142 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "d18a61ce-bbd4-491c-ab2e-8b352f9af844",
+ "metadata": {},
+ "source": [
+ "### An AI Chatbot that teaches students programming using GPT API"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c658ac85-6087-4a2c-b23f-1b92c17f0db3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "import gradio as gr\n",
+ "import anthropic"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "46df0488-f874-41e0-a6a4-9a64aa7be53c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load environment variables \n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
+ " \n",
+ "if openai_api_key:\n",
+ " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
+ "else:\n",
+ " print(\"OpenAI API Key not set\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7eadc218-5b10-4174-bf26-575361640524",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "openai = OpenAI()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e7484731-ac84-405a-a688-6e81d139c5ce",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are a helpful programming study assistant\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "54e82f5a-993f-4a95-9d9d-caf35dbc4e76",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def chat(message, history):\n",
+ " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
+ "\n",
+ " print(\"History is:\")\n",
+ " print(history)\n",
+ " print(\"And messages is:\")\n",
+ " print(messages)\n",
+ "\n",
+ " stream = openai.chat.completions.create(model='gpt-4o-mini', messages=messages, stream=True)\n",
+ "\n",
+ " response = \"\"\n",
+ " for chunk in stream:\n",
+ " response += chunk.choices[0].delta.content or ''\n",
+ " yield response"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5941ed67-e2a7-41bc-a8a3-079e9f1fdb64",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gr.ChatInterface(fn=chat, type=\"messages\").launch(inbrowser=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e8fcfe68-bbf6-4058-acc9-0230c96608c2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message += \"Whenever the user talks about a topic that is not connected to programmming,\\\n",
+ "nudge them in the right direction by stating that you are here to help with programming. Encourage \\\n",
+ "the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge \\\n",
+ "if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone. Do not ignore \\\n",
+ "their requests, rather politely reject and then redirect them.\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "090e7d49-fcbf-4715-b120-8d7aa91d165f",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/day3-refine-user-query-by-llama.ipynb b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb
index 1034274..57541d1 100644
--- a/week2/community-contributions/day3-refine-user-query-by-llama.ipynb
+++ b/week2/community-contributions/day3-refine-user-query-by-llama.ipynb
@@ -20,7 +20,7 @@
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
- "import gradio as gr"
+ "import gradio as gr "
]
},
{
diff --git a/week2/community-contributions/day3.upsell.ipynb b/week2/community-contributions/day3.upsell.ipynb
index dd2bd06..26a3281 100644
--- a/week2/community-contributions/day3.upsell.ipynb
+++ b/week2/community-contributions/day3.upsell.ipynb
@@ -43,7 +43,7 @@
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
- "load_dotenv()\n",
+ "load_dotenv() \n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
diff --git a/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb b/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb
index 28aa34e..2e480f1 100644
--- a/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb
+++ b/week2/community-contributions/day4_with_booking_and_multiple_tools_per_message.ipynb
@@ -244,7 +244,7 @@
" },\n",
" \"required\": [\"destination_city\", \"price\"],\n",
" \"additionalProperties\": False\n",
- " }\n",
+ " } \n",
"}"
]
},
diff --git a/week2/community-contributions/multi-modal-StudyAI.ipynb b/week2/community-contributions/multi-modal-StudyAI.ipynb
new file mode 100644
index 0000000..0cafb5d
--- /dev/null
+++ b/week2/community-contributions/multi-modal-StudyAI.ipynb
@@ -0,0 +1,227 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "6aa646e3-7a57-461a-b69a-073179effa18",
+ "metadata": {},
+ "source": [
+ "## Additional End of week Exercise - week 2\n",
+ "\n",
+ "This includes \n",
+ "- Gradio UI\n",
+ "- use of the system prompt to add expertise\n",
+ "- audio input so you can talk to it\n",
+ "- respond with audio"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "72f3dca4-b052-4e9f-90c8-f42e667c165c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "import gradio as gr\n",
+ "import json"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "23570b9f-8c7a-4cc7-b809-3505334b60a7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load environment variables in a file called .env\n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
+ "openai = OpenAI()\n",
+ "MODEL = 'gpt-4o-mini'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d379178a-8672-4e6f-a380-ad8d85f5c64e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"\"\"You are a personal study tutor, designed to provide clear, yet brief and succint answers to \n",
+ "students that ask you questions. The topics are related to data science, computer science \n",
+ "and technology in general, so you are allowed to use a moderate level of jargon. Explain in \n",
+ "simple terminology, so a student can easily understand. \n",
+ "\n",
+ "You may also be asked about prices for special courses.In this case, respond that you have no such\n",
+ "data available. \n",
+ "\n",
+ "\"\"\"\n",
+ "# Use a tabular format where possible \n",
+ "# for ease of information flow "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4745d439-c66e-4e5c-b5d4-9f0ba97aefdc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def chat(history):\n",
+ " messages = [{\"role\": \"system\", \"content\": system_message}] + history\n",
+ " response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
+ "\n",
+ " reply = response.choices[0].message.content\n",
+ " history += [{\"role\":\"assistant\", \"content\":reply}]\n",
+ "\n",
+ " # Comment out or delete the next line if you'd rather skip Audio for now..\n",
+ " talker(reply)\n",
+ " \n",
+ " return history"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a8b31799-df86-4151-98ea-66ef50fe767e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install openai-whisper"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9f5b8e51-2833-44be-a4f4-63c4683f2b6e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import whisper\n",
+ "\n",
+ "def transcribe_audio(audio):\n",
+ " if audio is None:\n",
+ " return \"No audio received.\"\n",
+ " \n",
+ " model = whisper.load_model(\"base\") # You can use \"tiny\", \"small\", etc.\n",
+ " result = model.transcribe(audio)\n",
+ " \n",
+ " return result[\"text\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e55f8e43-2da1-4f2a-bcd4-3fffa830db48",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import base64\n",
+ "from io import BytesIO\n",
+ "from PIL import Image\n",
+ "from IPython.display import Audio, display\n",
+ "\n",
+ "def talker(message):\n",
+ " response = openai.audio.speech.create(\n",
+ " model=\"tts-1\",\n",
+ " voice=\"onyx\",\n",
+ " input=message)\n",
+ "\n",
+ " audio_stream = BytesIO(response.content)\n",
+ " output_filename = \"output_audio.mp3\"\n",
+ " with open(output_filename, \"wb\") as f:\n",
+ " f.write(audio_stream.read())\n",
+ "\n",
+ " # Play the generated audio\n",
+ " display(Audio(output_filename, autoplay=True))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cb3107a7-bfdc-4255-825f-bfabcf458c0c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# More involved Gradio code as we're not using the preset Chat interface!\n",
+ "# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n",
+ "\n",
+ "with gr.Blocks() as ui:\n",
+ " with gr.Row():\n",
+ " chatbot = gr.Chatbot(height=400,type=\"messages\")\n",
+ " with gr.Row():\n",
+ " entry = gr.Textbox(label=\"Chat with our StudyAI Assistant:\")\n",
+ " # with gr.Row():\n",
+ " # entry = gr.Textbox(label=\"Speak or Type:\", placeholder=\"Speak your question...\", interactive=True, microphone=True)\n",
+ " with gr.Row():\n",
+ " audio_input = gr.Audio(type=\"filepath\", label=\"Speak your question\")\n",
+ " with gr.Row():\n",
+ " clear = gr.Button(\"Clear\")\n",
+ "\n",
+ " def do_entry(message, history):\n",
+ " history += [{\"role\":\"user\", \"content\":message}]\n",
+ " return \"\", history\n",
+ "\n",
+ " def handle_audio(audio, history):\n",
+ " text = transcribe_audio(audio)\n",
+ " history += [{\"role\": \"user\", \"content\": text}]\n",
+ " return \"\", history\n",
+ "\n",
+ " entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
+ " chat, inputs=[chatbot], outputs=[chatbot]\n",
+ " )\n",
+ "\n",
+ " audio_input.change(handle_audio, inputs=[audio_input, chatbot], outputs=[entry, chatbot]).then(\n",
+ " chat, inputs=[chatbot], outputs=[chatbot]\n",
+ " )\n",
+ " \n",
+ " clear.click(lambda: [], inputs=None, outputs=chatbot, queue=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "73e0a776-d43e-4b04-a37f-a27d3714cf47",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ui.launch(inbrowser=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bcd45503-d314-4b28-a41c-4dbb87059188",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}