{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "2ce61bb5-1d5b-43b8-b5bb-6aeae91c7574", "metadata": {}, "outputs": [], "source": [ "import os\n", "from dotenv import load_dotenv\n", "from openai import OpenAI\n", "from IPython.display import Markdown, display" ] }, { "cell_type": "code", "execution_count": 2, "id": "3399686d-5f14-4fb2-8939-fd2401be3007", "metadata": {}, "outputs": [], "source": [ "MODEL = \"gpt-4o-mini\"\n", "SYSTEM_PROMPT_PATH = \"Chat_Summary_Data/System_Prompt.txt\"\n", "CHATS_PATH = \"Chat_Summary_Data/Chat_Examples/\"" ] }, { "cell_type": "code", "execution_count": 3, "id": "d97b8374-a161-435c-8317-1d0ecaaa9b71", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "API key found and looks good so far!\n" ] } ], "source": [ "# Load environment variables in a file called .env\n", "\n", "load_dotenv(override=True)\n", "api_key = os.getenv('OPENAI_API_KEY')\n", "\n", "# Check the key\n", "\n", "if not api_key:\n", " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", "elif not api_key.startswith(\"sk-proj-\"):\n", " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", "elif api_key.strip() != api_key:\n", " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", "else:\n", " print(\"API key found and looks good so far!\")\n" ] }, { "cell_type": "code", "execution_count": 4, "id": "b3f4afb4-2e4a-4971-915e-a8634a17eda8", "metadata": {}, "outputs": [], "source": [ "class ChatAI:\n", " def __init__(self, system_prompt_path=SYSTEM_PROMPT_PATH, model=MODEL):\n", " with open(system_prompt_path, \"r\") as file:\n", " self.system_prompt = file.read()\n", "\n", " self.openai = OpenAI()\n", " self.model = model\n", " \n", " @staticmethod\n", " def _get_user_prompt(chat_txt):\n", " with open(chat_txt, \"r\") as file:\n", " user_prompt_str = file.read()\n", " return user_prompt_str\n", " \n", " def generate(self, chat_txt):\n", " messages = [\n", " {\"role\": \"system\", \"content\": self.system_prompt},\n", " {\"role\": \"user\", \"content\": self._get_user_prompt(chat_txt)}\n", " ]\n", "\n", " response = self.openai.chat.completions.create(model=self.model, messages=messages)\n", " return response.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": 5, "id": "d243b582-66af-49f9-bcd1-e05a63e61c34", "metadata": {}, "outputs": [], "source": [ "chat_ai = ChatAI()" ] }, { "cell_type": "code", "execution_count": 8, "id": "c764ace6-5a0f-4dd0-9454-0b8a093b97fc", "metadata": {}, "outputs": [ { "data": { "text/markdown": [ "# Chat1" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/markdown": [ "- **Order:** 2 Medium Chicken BBQ Pizzas\n", "- **Cost:** 342 LE\n", "- **Experience:** Negative\n", " - **Summary:** The client expressed dissatisfaction with the pizza taste." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/markdown": [ "# Chat2" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/markdown": [ "- The client ordered: Nothing \n", "- Summary: The client did not place an order because the chicken ranch pizza was unavailable." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/markdown": [ "# Chat3" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/markdown": [ "- **Order**: Large pepperoni pizza and onion rings \n", "- **Total Cost**: 250 LE \n", "- **Experience**: Positive \n", " - The client enjoyed the pizza despite the delay in delivery." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "chats_txt = os.listdir(CHATS_PATH)\n", "for chat_file in chats_txt:\n", " markdown_heading = f\"# {chat_file[:-4]}\"\n", " display(Markdown(markdown_heading))\n", " display(Markdown(chat_ai.generate(CHATS_PATH+chat_file)))" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 5 }