You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

701 lines
27 KiB

{
"cells": [
{
"cell_type": "markdown",
"id": "ec4f6b32-46e9-429a-a3cd-521ff5418493",
"metadata": {},
"source": [
"# Occasio - Event Management Assistant"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"import time\n",
"import pprint\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"import google.generativeai as genai\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv()\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b501508-0082-47be-9903-52ff1c243486",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic and Google and assign a model for each\n",
"\n",
"openai = OpenAI()\n",
"OPENAI_MODEL = \"gpt-4o-mini\"\n",
"\n",
"claude = anthropic.Anthropic()\n",
"ANTHROPIC_MODEL = \"claude-3-haiku-20240307\"\n",
"\n",
"genai.configure()\n",
"GOOGLE_MODEL = \"gemini-2.0-flash\"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are called \\\"EventAI\\\", a virtual assistant for an Elementary school called Eagle Elementary School. You can help users by giving \\\n",
"them details of upcoming shcool events like event name, description, location etc. \"\n",
"#system_message += \"Introduce yourself with a warm welcome message on your first response ONLY.\"\n",
"system_message += \"Give short, courteous answers, no more than 2 sentences. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so. Do not make up your own event details information\"\n",
"system_message += \"You might be asked to list the questions asked by the user so far. In that situation, based on the conversation history provided to you, \\\n",
"list the questions and respond\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2c27c4ba-8ed5-492f-add1-02ce9c81d34c",
"metadata": {},
"outputs": [],
"source": [
"# Some imports for handling images\n",
"\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "773a9f11-557e-43c9-ad50-56cbec3a0f8f",
"metadata": {},
"outputs": [],
"source": [
"def artist(event_text):\n",
" image_response = openai.images.generate(\n",
" model=\"dall-e-3\",\n",
" prompt=f\"An image representing an {event_text}, showing typical activities that happen for that {event_text}, in a vibrant pop-art style that elementary school kids will like\",\n",
" size=\"1024x1024\",\n",
" n=1,\n",
" response_format=\"b64_json\",\n",
" )\n",
" image_base64 = image_response.data[0].b64_json\n",
" image_data = base64.b64decode(image_base64)\n",
" return Image.open(BytesIO(image_data))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d104b96a-02ca-4159-82fe-88e0452aa479",
"metadata": {},
"outputs": [],
"source": [
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image\n",
"from IPython.display import Audio, display\n",
"\n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\",\n",
" input=message)\n",
"\n",
" audio_stream = BytesIO(response.content)\n",
" output_filename = \"output_audio.mp3\"\n",
" with open(output_filename, \"wb\") as f:\n",
" f.write(audio_stream.read())\n",
"\n",
" # Play the generated audio\n",
" display(Audio(output_filename, autoplay=True))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0428a74-4daa-4b0d-b25a-219a35f39f55",
"metadata": {},
"outputs": [],
"source": [
"school_events = [\n",
" {\n",
" \"event_id\": \"pta\",\n",
" \"name\": \"Parent Teachers Meeting (PTA/PTM)\",\n",
" \"description\": \"Parent teachers meeting (PTA/PTM) to discuss students' progress.\",\n",
" \"date_time\": \"Apr 1st, 2025 11 AM\",\n",
" \"location\" : \"Glove Annexure Hall\"\n",
" },\n",
" {\n",
" \"event_id\": \"read aloud\",\n",
" \"name\": \"Read Aloud to your class/Reading to your class\",\n",
" \"description\": \"Kids can bring their favorite book and read it to their class.\",\n",
" \"date_time\": \"Apr 15th, 2025 1 PM\",\n",
" \"location\": \"Classroom\"\n",
" },\n",
" {\n",
" \"event_id\": \"100 days of school\",\n",
" \"name\": \"Celebrating 100 days of school. Dress up time for kids\",\n",
" \"description\": \"Kids can dress up as old people and celebrate the milestone with their teachers.\",\n",
" \"date_time\": \"May 15th, 2025 11 AM\",\n",
" \"location\": \"Classroom\"\n",
" },\n",
" {\n",
" \"event_id\": \"Book fair\",\n",
" \"name\": \"Scholastic book fair\",\n",
" \"description\": \"Kids can purchase their favorite scholastic books.\",\n",
" \"date_time\": \"Jun 22nd, 2025 10:30 AM\",\n",
" \"location\": \"Library\"\n",
" },\n",
" {\n",
" \"event_id\": \"Halloween\",\n",
" \"name\": \"Halloween\",\n",
" \"description\": \"Kids can dress up as their favorite characters\",\n",
" \"date_time\": \"Oct 31st, 2025\",\n",
" \"location\": \"Classroom\"\n",
" },\n",
" {\n",
" \"event_id\": \"Movie Night\",\n",
" \"name\": \"Movie Night\",\n",
" \"description\": \"A popular and kids centric movie will be played. Kids and families are welcome.\",\n",
" \"date_time\": \"May 3rd, 2025\",\n",
" \"location\": \"Main auditorium\"\n",
" },\n",
" {\n",
" \"event_id\": \"Intruder Drill\",\n",
" \"name\": \"Intruder Drill\",\n",
" \"description\": \"State mandated monthly intruder drill to prepare staff and students with necessary safety skills in times of a crisis\",\n",
" \"date_time\": \"May 3rd, 2025\",\n",
" \"location\": \"Main auditorium\"\n",
" }\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b7027eec-e522-49c1-af59-56a82f9d3be8",
"metadata": {},
"outputs": [],
"source": [
"def get_event_details(query):\n",
" search_words = query.lower().split() \n",
" for event in school_events:\n",
" event_text = event['name'].lower() + ' ' + event['description'].lower()\n",
" if all(word in event_text for word in search_words):\n",
" return event\n",
" return None"
]
},
{
"cell_type": "markdown",
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n",
"\n",
"With tools, you can write a function, and have the LLM call that function as part of its response.\n",
"\n",
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n",
"\n",
"Well, kinda."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "68e96b54-b891-4e7b-a6bc-17693dc99970",
"metadata": {},
"outputs": [],
"source": [
"# for claude\n",
"tools_claude = [\n",
" {\n",
" \"name\": \"get_event_details\",\n",
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n",
"'When is the pta meeting scheduled?\",\n",
" \"input_schema\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to getails on\"\n",
" }\n",
" },\n",
" \"required\": [\"event_text\"]\n",
" }\n",
"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "636188d2-7e7a-48a0-9f04-f3813c7dc323",
"metadata": {},
"outputs": [],
"source": [
"# For GPT\n",
"events_function_gpt = {\n",
" \"name\": \"get_event_details\",\n",
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n",
" 'When is the pta meeting scheduled?\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to getails on\",\n",
" },\n",
" },\n",
" \"required\": [\"event_text\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "605684f8-ed02-4cc9-8a16-012533b601cb",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"tools_gpt = [{\"type\": \"function\", \"function\": events_function_gpt}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4ac5a34c-a630-449a-9d46-669daace799c",
"metadata": {},
"outputs": [],
"source": [
"#Gemini function declaration structure\n",
"gemini_event_details = [{\n",
" \"name\": \"get_event_details\",\n",
" \"description\":\"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks 'When is the pta meeting scheduled?\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to details on\",\n",
" },\n",
" },\n",
" \"required\": [\"event_text\"],\n",
" },\n",
" },\n",
" {\n",
" \"name\": \"get_event_test\",\n",
" \"description\":\"This is a test function to validate if the function call picks up the right function if there are multiple functions.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to details on\",\n",
" },\n",
" },\n",
" \"required\": [\"event_text\"],\n",
" },\n",
" }\n",
"]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6331113-63b0-4712-94bb-f363422a8441",
"metadata": {},
"outputs": [],
"source": [
"def chat_claude(history):\n",
" print(f\"\\nhistory is {history}\\n\")\n",
" #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs\n",
" history_claude = list({\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in history if \"role\" in msg and \"content\" in msg)\n",
" #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]\n",
" #messages = history\n",
" message = claude.messages.create(\n",
" model=ANTHROPIC_MODEL,\n",
" max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=history_claude,\n",
" tools=tools_claude\n",
" )\n",
" image = None\n",
" print(f\"Claude's message is \\n {pprint.pprint(message)}\\n\")\n",
" try: \n",
" if message.stop_reason == \"tool_use\":\n",
" tool_use = next(block for block in message.content if block.type == \"tool_use\")\n",
" event_text = tool_use.input.get('event_text')\n",
" image = artist(event_text)\n",
" tool_result = handle_tool_call(event_text)\n",
" #tool_result = handle_tool_call(tool_use, \"Claude\")\n",
" \n",
" print(f\"Tool Result: {tool_result}\")\n",
" \n",
" response = claude.messages.stream(\n",
" model=ANTHROPIC_MODEL,\n",
" max_tokens=4096,\n",
" system=system_message,\n",
" messages=[\n",
" {\n",
" \"role\": \"user\", \n",
" \"content\": [\n",
" {\n",
" \"type\": \"text\",\n",
" \"text\": history[-1].get('content')\n",
" }\n",
" ]\n",
" },\n",
" {\n",
" \"role\": \"assistant\", \n",
" \"content\": message.content\n",
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": [\n",
" {\n",
" \"type\": \"tool_result\",\n",
" \"tool_use_id\": tool_use.id,\n",
" \"content\": tool_result,\n",
" }\n",
" ],\n",
" },\n",
" ],\n",
" tools=tools_claude\n",
" )\n",
" result = \"\"\n",
" with response as stream:\n",
" for text in stream.text_stream:\n",
" result += text or \"\"\n",
" yield result, None\n",
" talker(result)\n",
" #image= artist(tool_input.get('event_text'))\n",
" yield result, image\n",
" else:\n",
" response = next((block.text for block in message.content if hasattr(block, \"text\")), None,)\n",
" chunk_size=30\n",
" for i in range(0, len(response), chunk_size):\n",
" yield response[:i + chunk_size], None\n",
" time.sleep(0.05) #Simulate streaming delay\n",
" talker(response)\n",
" #image= artist(tool_input.get('event_text'))\n",
" yield response, None\n",
" except Exception as e:\n",
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n",
" print(e)\n",
" yield error_message, None\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9915ae05-5d52-4fdc-a3ea-18f050a79bd3",
"metadata": {},
"outputs": [],
"source": [
"def chat_gpt(history):\n",
" print(f\"\\nhistory is {history}\\n\")\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n",
" response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)\n",
" image = None\n",
" try:\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" tool = message.tool_calls[0]\n",
" arguments = json.loads(tool.function.arguments)\n",
" event_text = arguments.get('event_text')\n",
" image = artist(event_text)\n",
" event_json = handle_tool_call(event_text)\n",
" tool_output = {\n",
" \"role\": \"tool\",\n",
" \"content\": event_json,\n",
" \"tool_call_id\": tool.id\n",
" }\n",
" messages.append(message)\n",
" messages.append(tool_output)\n",
" stream = openai.chat.completions.create(\n",
" model=OPENAI_MODEL,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result, None\n",
" talker(result)\n",
" yield result, image\n",
" else: \n",
" reply = response.choices[0].message.content\n",
" chunk_size=30\n",
" for i in range(0, len(reply), chunk_size):\n",
" yield reply[:i + chunk_size], None\n",
" time.sleep(0.05)\n",
" talker(reply)\n",
" #image= artist(\"No such event\")\n",
" yield reply, None\n",
" except Exception as e:\n",
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n",
" print(e)\n",
" yield error_message, None"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "30fa3de9-5b55-4bb6-93ea-a13fc09d38c1",
"metadata": {},
"outputs": [],
"source": [
"def chat_gemini(history):\n",
" print(f\"\\nhistroy is {history}\\n\")\n",
" history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format\n",
" else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is\n",
" else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc\n",
" \n",
" print(f\"\\nhistroy_gemini is {history_gemini}\\n\")\n",
" model = genai.GenerativeModel(\n",
" model_name=GOOGLE_MODEL,\n",
" system_instruction=system_message\n",
" )\n",
" response = model.generate_content(\n",
" contents = history_gemini,\n",
" #contents = contents,\n",
" tools = [{\n",
" 'function_declarations': gemini_event_details,\n",
" }],\n",
" )\n",
" #print(f\"response is {response}\")\n",
"\n",
" image = None\n",
" try:\n",
" # Check if the model wants to use a tool\n",
" if response.candidates[0].content.parts[0].function_call:\n",
" function_call = response.candidates[0].content.parts[0].function_call\n",
" event_text = function_call.args.get(\"event_text\")\n",
" image = artist(event_text)\n",
" tool_result = handle_tool_call(event_text)\n",
" \n",
" print(f\"\\ntool_result is {tool_result}\\n\")\n",
" stream = model.generate_content(\n",
" \"Based on this information `\" + tool_result + \"`, extract the details of the event and provide the event details to the user\",\n",
" stream=True \n",
" )\n",
" #print(f\"\\nSecond response is {stream}\\n\")\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.candidates[0].content.parts[0].text or \"\"\n",
" #print(f\"REsult is \\n{result}\\n\")\n",
" yield result, None\n",
" talker(result) \n",
" yield result, image\n",
" #print(f\"REsult is \\n{result}\\n\")\n",
" else: \n",
" reply = response.text\n",
" chunk_size=30\n",
" for i in range(0, len(reply), chunk_size):\n",
" yield reply[:i + chunk_size], None\n",
" time.sleep(0.05)\n",
" talker(reply)\n",
" #image= artist(\"No such event\")\n",
" yield reply, None\n",
" \n",
" except Exception as e:\n",
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n",
" print(e)\n",
" yield error_message, None\n",
" \n",
"\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "570fffb2-a054-4217-89ae-8b6f4630e383",
"metadata": {},
"outputs": [],
"source": [
"def call_and_process_model_responses(fn_name, chatbot):#, response, image):\n",
" response = \"\"\n",
" image = None\n",
" for response, image in fn_name(chatbot):\n",
" if chatbot and chatbot[-1][\"role\"] == \"assistant\": \n",
" chatbot[-1][\"content\"] = response # Update the last message\n",
" else:\n",
" chatbot.append({\"role\": \"assistant\", \"content\": response}) # First assistant message\n",
" #print(chatbot)\n",
" yield chatbot, image # Stream updated history to UI\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "32a6ccce-44fa-49a7-bd1a-08c70002771c",
"metadata": {},
"outputs": [],
"source": [
"def handle_tool_call(event_text):\n",
" print(f\"event text is {event_text}\")\n",
" event_found = get_event_details(event_text)\n",
" print(f\"event_found is {event_found}\")\n",
" \n",
" if event_found:\n",
" response = json.dumps({\"name\": event_found['name'],\"description\": event_found['description'], \"when\": event_found['date_time'], \"where\": event_found['location']})\n",
" else: \n",
" response = json.dumps({\"event\": f\"Sorry, there is no schedule currently for {event_text}\"})\n",
" return response \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4eaaaf9e-64b9-4d0b-9931-388cee8ea21d",
"metadata": {},
"outputs": [],
"source": [
"def process_chosen_model(chatbot, model):\n",
" if model == 'GPT':\n",
" for chatbot, image in call_and_process_model_responses(chat_gpt, chatbot):\n",
" yield chatbot, image\n",
" elif model == 'Claude': \n",
" for chatbot, image in call_and_process_model_responses(chat_claude, chatbot):\n",
" yield chatbot, image\n",
" else:\n",
" #for Gemini, the content is to be replaced with parts.\n",
" for chatbot, image in call_and_process_model_responses(chat_gemini, chatbot):\n",
" yield chatbot, image\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "627f6d49-5376-4f1d-8071-f2e96fd6e78b",
"metadata": {},
"outputs": [],
"source": [
"# More involved Gradio code as we're not using the preset Chat interface!\n",
"# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n",
"\n",
"with gr.Blocks(css=\"\"\"\n",
" select.gr-box { \n",
" appearance: auto !important; \n",
" -webkit-appearance: auto !important; \n",
" }\n",
"\"\"\") as ui:\n",
" with gr.Row():\n",
" gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio! An Event Management Assistant</h1>\") # Added title\n",
" with gr.Row():\n",
" # with gr.Column(scale=3): #Acts as a spacer on the left\n",
" # pass\n",
" \n",
" with gr.Column(scale=0):\n",
" model = gr.Dropdown(\n",
" choices=[\"GPT\", \"Claude\", \"Gemini\"], \n",
" label=\"Select model\", \n",
" value=\"GPT\",\n",
" interactive=True,\n",
" container=True # Applying the CSS class\n",
" )\n",
" # with gr.Column(scale=-54, min_width=200):\n",
" # gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio</h1>\") # Added title\n",
" # pass #Acts as a spacer on the right\n",
" with gr.Row():\n",
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n",
" image_output = gr.Image(height=500)\n",
" with gr.Row():\n",
" entry = gr.Textbox(label=\"Ask me \\\"when is pta meeting\\\", \\\"how about book fair\\\" and more... \")\n",
" with gr.Row():\n",
" clear = gr.Button(\"Clear\", min_width=150)\n",
" #message=None\n",
"\n",
" def do_entry(message, history):\n",
" history += [{\"role\":\"user\", \"content\":message}]\n",
" return \"\", history\n",
" \n",
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
" process_chosen_model, inputs=[chatbot, model], outputs=[chatbot, image_output]\n",
" )\n",
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n",
"\n",
"ui.launch(inbrowser=True)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}