You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

393 lines
11 KiB

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "1b89f103-fc49-487e-930e-14abff8bfab1",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1a40e64b-14c6-4589-a671-6817f9cb09f0",
"metadata": {},
"outputs": [],
"source": [
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "c0990b15-313d-4cf8-bc5b-fc14d263ba27",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "66a38e1f-db7e-4697-aa9c-a303f9828531",
"metadata": {},
"outputs": [],
"source": [
"google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "beb9606e-9be9-4f2e-adfe-4e41fb99566e",
"metadata": {},
"outputs": [],
"source": [
"# A generic system message - no more snarky adversarial AIs!\n",
"\n",
"system_message = \"You are a helpful assistant\""
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "19ab23bc-59cf-48a3-8651-f7b1c52874db",
"metadata": {},
"outputs": [],
"source": [
"def message_gemini(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" gemini = google.generativeai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" system_instruction=system_message\n",
")\n",
" response = gemini.generate_content(prompt)\n",
" return response.text\n",
"\n",
"\n",
"# gemini = google.generativeai.GenerativeModel(\n",
"# model_name='gemini-1.5-flash',\n",
"# system_instruction=system_message\n",
"# )\n",
"# response = gemini.generate_content(user_prompt)\n",
"# print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8fe3c66c-d25d-4627-a401-d84c7d6613e7",
"metadata": {},
"outputs": [],
"source": [
"message_gemini(\"What is today's date?\")\n",
"# message_gemini(\"tell me a funny machine learning joke\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b27027ed-4bff-493c-a41e-8318003e0387",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai as genai\n",
"for model in genai.list_models():\n",
" print(model.name)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "2f82d61b-a7cd-4bee-994d-2e83d0a01bfc",
"metadata": {},
"outputs": [],
"source": [
"# here's a simple function\n",
"\n",
"def shout(text):\n",
" print(f\"Shout has been called with input {text}\")\n",
" return text.upper()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5941fe3f-aab9-47ba-b29f-d99aa3b40aed",
"metadata": {},
"outputs": [],
"source": [
"shout(\"hello\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d6470847-1cce-4bf0-8364-199504a5335f",
"metadata": {},
"outputs": [],
"source": [
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n",
"\n",
"force_dark_mode = \"\"\"\n",
"function refresh() {\n",
" const url = new URL(window.location);\n",
" if (url.searchParams.get('__theme') !== 'dark') {\n",
" url.searchParams.set('__theme', 'dark');\n",
" window.location.href = url.href;\n",
" }\n",
"}\n",
"\"\"\"\n",
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\", js=force_dark_mode).launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "69715604-cc64-4563-967f-b5720462ac69",
"metadata": {},
"outputs": [],
"source": [
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", js=force_dark_mode).launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dede1d8c-fb7a-456a-923b-e221eaa30bd9",
"metadata": {},
"outputs": [],
"source": [
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", flagging_mode=\"never\").launch(share=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "55ae11b9-e7af-449f-b737-48dd7dc1a5b2",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=shout,\n",
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n",
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cba667cf-d270-426e-b940-a01083352ecb",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=message_gemini,\n",
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n",
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b8bb7885-740f-41f0-95e3-dabe864cea14",
"metadata": {},
"outputs": [],
"source": [
"# Let's use Markdown\n",
"# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n",
"# I'm taking advantage of system_message being a global variable, used back in the message_gpt function (go take a look)\n",
"# Not a great software engineering practice, but quite sommon during Jupyter Lab R&D!\n",
"\n",
"system_message = \"You are a helpful assistant that responds in markdown\"\n",
"\n",
"view = gr.Interface(\n",
" fn=message_gemini,\n",
" inputs=[gr.Textbox(label=\"Your message:\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "43d17b00-f4bc-45ad-a679-3112a170f5fb",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai as genai\n",
"\n",
"def stream_gemini(prompt):\n",
" gemini = genai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" safety_settings=None,\n",
" system_instruction=system_message\n",
" )\n",
"\n",
" response = gemini.generate_content(prompt, safety_settings=[\n",
" {\"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_HATE_SPEECH\", \"threshold\": \"BLOCK_NONE\"},\n",
" {\"category\": \"HARM_CATEGORY_HARASSMENT\", \"threshold\": \"BLOCK_NONE\"}], stream=True)\n",
" \n",
" result = \"\"\n",
" for chunk in response:\n",
" result += chunk.text\n",
" yield result\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "840f3d11-e66b-4b6b-9b98-70e0f02be9e6",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_gemini,\n",
" inputs=[gr.Textbox(label=\"Your message:\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "markdown",
"id": "ea8a0081-8d2e-4960-b479-7c1ef346f524",
"metadata": {},
"source": [
"# Building a company brochure generator\n",
"\n",
"Now you know how - it's simple!"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "2d43360a-515e-4008-9eef-7a3c4e47cfba",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "08a07e55-b05d-4360-8e05-61dd39cc019b",
"metadata": {},
"outputs": [],
"source": [
"def stream_brochure(company_name, url, model, response_tone):\n",
" prompt = f\"Please generate a {response_tone} company brochure for {company_name}. Here is their landing page:\\n\"\n",
" prompt += Website(url).get_contents()\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Gemini\":\n",
" result = stream_gemini(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d9554211-c832-4558-90c8-fceab95fd23c",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\"),\n",
" gr.Dropdown([\"Informational\", \"Promotional\", \"Humorous\"], label=\"Select tone\")],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4d4e6efd-66e8-4388-bfc3-782bde4babfb",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}