2 changed files with 438 additions and 0 deletions
@ -0,0 +1,256 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Import tkinter and ollama to create the app" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 20, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import ollama\n", |
||||
"import tkinter as tk\n", |
||||
"from tkinter import ttk" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Basic configuration parameters for the Ollama API:" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 21, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n", |
||||
"HEADERS = {\"Content-Type\":\"application/json\"}\n", |
||||
"MODEL = \"llama3.2\"\n" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Initialize conversation history." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 22, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"conversation_history = []" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Defining the key presses. If user presses shit + enter then simply go to the next line. \n", |
||||
"\n", |
||||
"If user presses only enter then submit the question." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 23, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def handle_keypress(event):\n", |
||||
" if event.state & 0x1: # Check if Shift is pressed\n", |
||||
" return\n", |
||||
" else:\n", |
||||
" display_answer()\n", |
||||
" return 'break'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Defining the function that will display answers using Ollama.\n", |
||||
"\n", |
||||
"\n", |
||||
"To turn it into a chatbot we simply append user's question and Ollama's response to our conversation history and pass that into Ollama as our next question." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 24, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def display_answer(event=None):\n", |
||||
" question_text['state'] = 'disabled'\n", |
||||
" question_text['bg'] = '#F0F0F0'\n", |
||||
" status_label.config(text=\"Looking for an answer...\")\n", |
||||
" root.update()\n", |
||||
"\n", |
||||
" # Get question text and prepare message\n", |
||||
" question = question_text.get(\"1.0\", tk.END).strip()\n", |
||||
" if question:\n", |
||||
" # Append the user's question to the conversation history\n", |
||||
" conversation_history.append({\"role\": \"user\", \"content\": question})\n", |
||||
"\n", |
||||
" # Pass the entire conversation history to Ollama\n", |
||||
" try:\n", |
||||
" # Get the answer\n", |
||||
" response = ollama.chat(model=MODEL, messages=conversation_history)\n", |
||||
" answer = response[\"message\"][\"content\"]\n", |
||||
"\n", |
||||
" # Append the assistant's answer to the conversation history\n", |
||||
" conversation_history.append({\"role\": \"assistant\", \"content\": answer})\n", |
||||
"\n", |
||||
" # Update the text widget with the answer\n", |
||||
" answer_text.configure(state='normal')\n", |
||||
" answer_text.delete(1.0, tk.END)\n", |
||||
" answer_text.insert(tk.END, answer)\n", |
||||
" answer_text.configure(state='disabled')\n", |
||||
"\n", |
||||
" status_label.config(text=\"Answered\")\n", |
||||
" except Exception as e:\n", |
||||
" answer_text.configure(state='normal')\n", |
||||
" answer_text.delete(1.0, tk.END)\n", |
||||
" answer_text.insert(tk.END, f\"Error: {str(e)}\")\n", |
||||
" answer_text.configure(state='disabled')\n", |
||||
" status_label.config(text=\"Error\")\n", |
||||
" else:\n", |
||||
" # If empty question string was received\n", |
||||
" answer_text.configure(state='normal')\n", |
||||
" answer_text.delete(1.0, tk.END)\n", |
||||
" answer_text.insert(tk.END, \"Please enter a question.\")\n", |
||||
" answer_text.configure(state='disabled')\n", |
||||
" status_label.config(text=\"\")\n", |
||||
"\n", |
||||
" # Re-enable question input and restore normal background\n", |
||||
" question_text['state'] = 'normal'\n", |
||||
" question_text['bg'] = 'white'\n", |
||||
" root.update()" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"A button to remove the conversation history and start all over again." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def remove_all():\n", |
||||
" \"\"\"Clears the conversation history and resets the interface.\"\"\"\n", |
||||
" global conversation_history\n", |
||||
" conversation_history = [] # Clear conversation history\n", |
||||
"\n", |
||||
" # Reset text widgets\n", |
||||
" question_text.delete(1.0, tk.END)\n", |
||||
" answer_text.configure(state='normal')\n", |
||||
" answer_text.delete(1.0, tk.END)\n", |
||||
" answer_text.insert(tk.END, \"Your answer will appear here.\")\n", |
||||
" answer_text.configure(state='disabled')\n", |
||||
"\n", |
||||
" # Reset status label\n", |
||||
" status_label.config(text=\"\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Creating the app window using tkinter." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 18, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"# Create the main window\n", |
||||
"root = tk.Tk()\n", |
||||
"root.title(\"Ollama with GUI\")\n", |
||||
"root.geometry(\"500x800\")\n", |
||||
"\n", |
||||
"# Create and configure the Questions window\n", |
||||
"question_frame = ttk.LabelFrame(root, text=\"Questions\", padding=(10, 10))\n", |
||||
"question_frame.pack(fill=\"both\", expand=True, padx=10, pady=10)\n", |
||||
"\n", |
||||
"question_label = ttk.Label(question_frame, text=\"Enter your question:\")\n", |
||||
"question_label.pack(anchor=\"w\", pady=5)\n", |
||||
"\n", |
||||
"# Replace Entry with Text widget for questions\n", |
||||
"question_text = tk.Text(question_frame, wrap=tk.WORD, width=50, height=4)\n", |
||||
"question_text.pack(anchor=\"w\", pady=5)\n", |
||||
"question_text.bind(\"<Return>\", handle_keypress)\n", |
||||
"\n", |
||||
"# Add status label\n", |
||||
"status_label = ttk.Label(question_frame, text=\"\")\n", |
||||
"status_label.pack(anchor=\"w\", pady=5)\n", |
||||
"\n", |
||||
"# Add Remove All button\n", |
||||
"remove_all_button = ttk.Button(question_frame, text=\"Remove All\", command=remove_all)\n", |
||||
"remove_all_button.pack(anchor=\"e\", pady=5)\n", |
||||
"\n", |
||||
"# Create and configure the Answers window\n", |
||||
"answer_frame = ttk.LabelFrame(root, text=\"Answer\", padding=(10, 10))\n", |
||||
"answer_frame.pack(fill=\"both\", expand=True, padx=10, pady=10)\n", |
||||
"\n", |
||||
"# Create a frame to hold the text widget and scrollbar\n", |
||||
"text_frame = ttk.Frame(answer_frame)\n", |
||||
"text_frame.pack(fill=\"both\", expand=True)\n", |
||||
"\n", |
||||
"# Create the text widget and scrollbar\n", |
||||
"answer_text = tk.Text(text_frame, wrap=tk.WORD, width=70, height=100)\n", |
||||
"scrollbar = ttk.Scrollbar(text_frame, orient=\"vertical\", command=answer_text.yview)\n", |
||||
"answer_text.configure(yscrollcommand=scrollbar.set)\n", |
||||
"\n", |
||||
"# Pack the text widget and scrollbar\n", |
||||
"answer_text.pack(side=\"left\", fill=\"both\", expand=True)\n", |
||||
"scrollbar.pack(side=\"right\", fill=\"y\")\n", |
||||
"\n", |
||||
"# Set initial text and disable editing\n", |
||||
"answer_text.insert(tk.END, \"Your answer will appear here.\")\n", |
||||
"answer_text.configure(state='disabled')\n", |
||||
"\n", |
||||
"# Run the main event loop\n", |
||||
"root.mainloop()\n" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 2 |
||||
} |
@ -0,0 +1,182 @@
|
||||
{ |
||||
"cells": [ |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Import Required Libraries" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 49, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"import os\n", |
||||
"from dotenv import load_dotenv\n", |
||||
"from openai import OpenAI\n", |
||||
"import gradio as gr" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Load Environment Variables" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 50, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"load_dotenv()\n", |
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
||||
"if not openai_api_key:\n", |
||||
" print(\"OpenAI API Key not set\")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Initialize OpenAI Client and Define Model" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 51, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"openai = OpenAI()\n", |
||||
"MODEL = 'gpt-4o-mini'" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Define the System Message" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 52, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"system_message = (\n", |
||||
" \"You are a helpful assistant, trying your best to answer every question as accurately as possible. \"\n", |
||||
" \"You are also free to say you do not know if you do not have the information to answer a question. \"\n", |
||||
" \"You always respond in markdown.\"\n", |
||||
")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Define the Chat Function" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 53, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"def chat(message, history):\n", |
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", |
||||
"\n", |
||||
" stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n", |
||||
"\n", |
||||
" response = \"\"\n", |
||||
" for chunk in stream:\n", |
||||
" response += chunk.choices[0].delta.content or ''\n", |
||||
" yield response" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Create the Chat Interface" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 54, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"demo = gr.ChatInterface(\n", |
||||
" fn=chat,\n", |
||||
" title=\"AI chatbot\",\n", |
||||
" description=\"Please login to use the chat interface\",\n", |
||||
" type='messages',\n", |
||||
")" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"auth_data is a list of tuples, where each tuple contains a username and password." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": 55, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"auth_data = [(\"user_1\", \"password_1\"), (\"user_2\", \"password_2\"), (\"user_3\", \"password_3\")]" |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "markdown", |
||||
"metadata": {}, |
||||
"source": [ |
||||
"Add Authentication and Launch\n", |
||||
"\n", |
||||
"auth_message is the message displayed to users before accessing the interface." |
||||
] |
||||
}, |
||||
{ |
||||
"cell_type": "code", |
||||
"execution_count": null, |
||||
"metadata": {}, |
||||
"outputs": [], |
||||
"source": [ |
||||
"demo.launch(share=True,\n", |
||||
" auth=auth_data,\n", |
||||
" auth_message=\"Please enter your credentials to access the chat interface\",\n", |
||||
")" |
||||
] |
||||
} |
||||
], |
||||
"metadata": { |
||||
"kernelspec": { |
||||
"display_name": "llms", |
||||
"language": "python", |
||||
"name": "python3" |
||||
}, |
||||
"language_info": { |
||||
"codemirror_mode": { |
||||
"name": "ipython", |
||||
"version": 3 |
||||
}, |
||||
"file_extension": ".py", |
||||
"mimetype": "text/x-python", |
||||
"name": "python", |
||||
"nbconvert_exporter": "python", |
||||
"pygments_lexer": "ipython3", |
||||
"version": "3.11.11" |
||||
} |
||||
}, |
||||
"nbformat": 4, |
||||
"nbformat_minor": 2 |
||||
} |
Loading…
Reference in new issue