You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

208 lines
5.6 KiB

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "76cf81ba-4caf-41dc-9cdb-c3a89d4b9f50",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"from google import genai #had to pip install google-genai\n",
"import anthropic\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8fa0a173-710c-4fe8-94a3-c0768ae0a067",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv()\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cd9f3562-6049-44e4-80ad-d41318a547b5",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"gemini = genai.Client(api_key=google_api_key)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "85c9be6c-a3e9-4c8a-9f61-338828425849",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are the world's most knowledgeable and friendly chat assistant. Format your answers in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2e27aed6-735e-4c1f-b74f-5e08e9d2c8e5",
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e5faea20-2168-4386-b22c-12a240b45990",
"metadata": {},
"outputs": [],
"source": [
"force_dark_mode = \"\"\"\n",
"function refresh() {\n",
" const url = new URL(window.location);\n",
" if (url.searchParams.get('__theme') !== 'dark') {\n",
" url.searchParams.set('__theme', 'dark');\n",
" window.location.href = url.href;\n",
" }\n",
"}\n",
"\"\"\"\n",
"\n",
"force_light_mode = \"\"\"\n",
"function refresh() {\n",
" const url = new URL(window.location);\n",
" if (url.searchParams.get('__theme') !== 'light') {\n",
" url.searchParams.set('__theme', 'light');\n",
" window.location.href = url.href;\n",
" }\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9e626a04-e55e-43e3-b888-44dcf5f1260b",
"metadata": {},
"outputs": [],
"source": [
"def stream_claude(prompt):\n",
" result = claude.messages.stream(\n",
" model=\"claude-3-haiku-20240307\",\n",
" max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": prompt},\n",
" ],\n",
" )\n",
" response = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" response += text or \"\"\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fe988edf-a10f-42e2-a2a9-c0c191162f01",
"metadata": {},
"outputs": [],
"source": [
"def stream_gemini(prompt):\n",
" response = gemini.models.generate_content_stream(\n",
" model=\"gemini-2.0-flash\",\n",
" contents=[prompt]\n",
" )\n",
" result = \"\"\n",
" for chunk in response:\n",
" #print(chunk.text, end=\"\", flush=True)\n",
" result += chunk.text or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4828289-0a69-4ecb-a1f6-1af9d26555ca",
"metadata": {},
"outputs": [],
"source": [
"def stream_model(prompt, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Gemini\":\n",
" result = stream_gemini(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "50f57015-bc49-4b4c-a4f9-eb5adfd334a3",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_model,\n",
" inputs=[gr.Textbox(label=\"Your message:\"), gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\", value=\"GPT\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\",\n",
" js=force_dark_mode\n",
")\n",
"view.launch(inbrowser=True)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}