|
|
|
@ -13,60 +13,15 @@
|
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": 13, |
|
|
|
|
"id": "fa9aa00e-806d-4d42-911f-a58114f343df", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [ |
|
|
|
|
"# imports\n", |
|
|
|
|
"import os\n", |
|
|
|
|
"import json\n", |
|
|
|
|
"import requests\n", |
|
|
|
|
"from dotenv import load_dotenv\n", |
|
|
|
|
"from bs4 import BeautifulSoup\n", |
|
|
|
|
"from IPython.display import Markdown, display, clear_output\n", |
|
|
|
|
"from openai import OpenAI\n", |
|
|
|
|
"import ollama\n", |
|
|
|
|
"import gradio as gr" |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": 14, |
|
|
|
|
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [ |
|
|
|
|
"# constants\n", |
|
|
|
|
"MODEL_GPT = 'gpt-4o-mini'\n", |
|
|
|
|
"MODEL_LLAMA = 'llama3.2'\n", |
|
|
|
|
"URL_LLAMA = 'http://localhost:11434/api/chat'" |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": 15, |
|
|
|
|
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [ |
|
|
|
|
"# set up environment\n", |
|
|
|
|
"load_dotenv()\n", |
|
|
|
|
"api_key = os.getenv('OPENAI_API_KEY')\n", |
|
|
|
|
"openai = OpenAI()" |
|
|
|
|
] |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": 25, |
|
|
|
|
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798", |
|
|
|
|
"execution_count": 27, |
|
|
|
|
"id": "c1070317-3ed9-4659-abe3-828943230e03", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [ |
|
|
|
|
{ |
|
|
|
|
"name": "stdout", |
|
|
|
|
"output_type": "stream", |
|
|
|
|
"text": [ |
|
|
|
|
"* Running on local URL: http://127.0.0.1:7872\n", |
|
|
|
|
"* Running on local URL: http://127.0.0.1:7878\n", |
|
|
|
|
"\n", |
|
|
|
|
"To create a public link, set `share=True` in `launch()`.\n" |
|
|
|
|
] |
|
|
|
@ -74,7 +29,7 @@
|
|
|
|
|
{ |
|
|
|
|
"data": { |
|
|
|
|
"text/html": [ |
|
|
|
|
"<div><iframe src=\"http://127.0.0.1:7872/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
|
|
|
|
"<div><iframe src=\"http://127.0.0.1:7878/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>" |
|
|
|
|
], |
|
|
|
|
"text/plain": [ |
|
|
|
|
"<IPython.core.display.HTML object>" |
|
|
|
@ -87,55 +42,32 @@
|
|
|
|
|
"data": { |
|
|
|
|
"text/plain": [] |
|
|
|
|
}, |
|
|
|
|
"execution_count": 25, |
|
|
|
|
"execution_count": 27, |
|
|
|
|
"metadata": {}, |
|
|
|
|
"output_type": "execute_result" |
|
|
|
|
}, |
|
|
|
|
{ |
|
|
|
|
"name": "stderr", |
|
|
|
|
"output_type": "stream", |
|
|
|
|
"text": [ |
|
|
|
|
"Traceback (most recent call last):\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\queueing.py\", line 624, in process_events\n", |
|
|
|
|
" response = await route_utils.call_process_api(\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\route_utils.py\", line 323, in call_process_api\n", |
|
|
|
|
" output = await app.get_blocks().process_api(\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 2019, in process_api\n", |
|
|
|
|
" result = await self.call_function(\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\blocks.py\", line 1578, in call_function\n", |
|
|
|
|
" prediction = await utils.async_iteration(iterator)\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 710, in async_iteration\n", |
|
|
|
|
" return await anext(iterator)\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 704, in __anext__\n", |
|
|
|
|
" return await anyio.to_thread.run_sync(\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", |
|
|
|
|
" return await get_async_backend().run_sync_in_worker_thread(\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2441, in run_sync_in_worker_thread\n", |
|
|
|
|
" return await future\n", |
|
|
|
|
" ^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 943, in run\n", |
|
|
|
|
" result = context.run(func, *args)\n", |
|
|
|
|
" ^^^^^^^^^^^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 687, in run_sync_iterator_async\n", |
|
|
|
|
" return next(iterator)\n", |
|
|
|
|
" ^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"D:\\Anaconda\\envs\\llms\\Lib\\site-packages\\gradio\\utils.py\", line 848, in gen_wrapper\n", |
|
|
|
|
" response = next(iterator)\n", |
|
|
|
|
" ^^^^^^^^^^^^^^\n", |
|
|
|
|
" File \"C:\\Users\\InnovAI Work\\AppData\\Local\\Temp\\ipykernel_20988\\223263199.py\", line 34, in stream_model\n", |
|
|
|
|
" raise ValueError(\"Unknown model\")\n", |
|
|
|
|
"ValueError: Unknown model\n" |
|
|
|
|
] |
|
|
|
|
} |
|
|
|
|
], |
|
|
|
|
"source": [ |
|
|
|
|
"import os\n", |
|
|
|
|
"import requests\n", |
|
|
|
|
"from bs4 import BeautifulSoup\n", |
|
|
|
|
"from typing import List\n", |
|
|
|
|
"from dotenv import load_dotenv\n", |
|
|
|
|
"from openai import OpenAI\n", |
|
|
|
|
"import google.generativeai\n", |
|
|
|
|
"import anthropic\n", |
|
|
|
|
"import ollama\n", |
|
|
|
|
"import gradio as gr\n", |
|
|
|
|
"\n", |
|
|
|
|
"load_dotenv()\n", |
|
|
|
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", |
|
|
|
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", |
|
|
|
|
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", |
|
|
|
|
"\n", |
|
|
|
|
"openai = OpenAI()\n", |
|
|
|
|
"\n", |
|
|
|
|
"claude = anthropic.Anthropic()\n", |
|
|
|
|
"\n", |
|
|
|
|
"# here is the question; type over this to ask something new\n", |
|
|
|
|
"\n", |
|
|
|
|
"question = \"\"\"\n", |
|
|
|
@ -244,7 +176,7 @@
|
|
|
|
|
{ |
|
|
|
|
"cell_type": "code", |
|
|
|
|
"execution_count": null, |
|
|
|
|
"id": "d9703f20-9d8b-4feb-888f-204be52e9d96", |
|
|
|
|
"id": "333b7231-93ba-4d4a-b38d-41e72f2f3863", |
|
|
|
|
"metadata": {}, |
|
|
|
|
"outputs": [], |
|
|
|
|
"source": [] |
|
|
|
|