Browse Source

Merge d9d154ff03 into cdddffefa5

pull/61/merge
Yifan Wei 1 month ago committed by GitHub
parent
commit
c7dcedeb83
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 204
      .virtual_documents/week1/day1.ipynb
  2. 133
      .virtual_documents/week1/week1 EXERCISE.ipynb
  3. 155
      week1/Guide to Jupyter.ipynb
  4. 296
      week1/day1.ipynb
  5. 105
      week1/day2 EXERCISE.ipynb
  6. 2432
      week1/day5.ipynb
  7. 1230
      week1/week1 EXERCISE.ipynb
  8. 665
      week2/day1.ipynb
  9. 539
      week2/day2.ipynb
  10. 188
      week2/day3.ipynb
  11. 123
      week2/day4.ipynb
  12. 284
      week2/day5.ipynb
  13. 937
      week2/week2 Excercise.ipynb

204
.virtual_documents/week1/day1.ipynb

@ -0,0 +1,204 @@
# imports
import os
import requests
from dotenv import load_dotenv
from bs4 import BeautifulSoup
from IPython.display import Markdown, display
from openai import OpenAI
# If you get an error running this cell, then please head over to the troubleshooting notebook!
# Load environment variables in a file called .env
load_dotenv()
api_key = os.getenv('OPENAI_API_KEY')
# Check the key
if not api_key:
print("No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!")
elif not api_key.startswith("sk-proj-"):
print("An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook")
elif api_key.strip() != api_key:
print("An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook")
else:
print("API key found and looks good so far!")
openai = OpenAI()
# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.
# If it STILL doesn't work (horrors!) then please see the troubleshooting notebook, or try the below line instead:
# openai = OpenAI(api_key="your-key-here-starting-sk-proj-")
# To give you a preview -- calling OpenAI with these messages is this easy:
message = "Hello, GPT! This is my first ever message to you! Hi!"
response = openai.chat.completions.create(model="gpt-4o-mini", messages=[{"role":"user", "content":message}])
print(response.choices[0].message.content)
# A class to represent a Webpage
# If you're not familiar with Classes, check out the "Intermediate Python" notebook
class Website:
def __init__(self, url):
"""
Create this Website object from the given url using the BeautifulSoup library
"""
self.url = url
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
self.title = soup.title.string if soup.title else "No title found"
for irrelevant in soup.body(["script", "style", "img", "input"]):
irrelevant.decompose()
self.text = soup.body.get_text(separator="\n", strip=True)
# Let's try one out. Change the website and add print statements to follow along.
ed = Website("https://edwarddonner.com")
print(ed.title)
print(ed.text)
# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish."
system_prompt = "You are an assistant that analyzes the contents of a website \
and provides a short summary, ignoring text that might be navigation related. \
Respond in markdown."
# A function that writes a User Prompt that asks for summaries of websites:
def user_prompt_for(website):
user_prompt = f"You are looking at a website titled {website.title}"
user_prompt += "\nThe contents of this website is as follows; \
please provide a short summary of this website in markdown. \
If it includes news or announcements, then summarize these too.\n\n"
user_prompt += website.text
return user_prompt
print(user_prompt_for(ed))
messages = [
{"role": "system", "content": "You are a snarky assistant"},
{"role": "user", "content": "What is 2 + 2?"}
]
# To give you a preview -- calling OpenAI with system and user messages:
response = openai.chat.completions.create(model="gpt-4o-mini", messages=messages)
print(response.choices[0].message.content)
# See how this function creates exactly the format above
def messages_for(website):
return [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt_for(website)}
]
# Try this out, and then try for a few more websites
messages_for(ed)
# And now: call the OpenAI API. You will get very familiar with this!
def summarize(url):
website = Website(url)
response = openai.chat.completions.create(
model = "gpt-4o-mini",
messages = messages_for(website)
)
return response.choices[0].message.content
summarize("https://edwarddonner.com")
# A function to display this nicely in the Jupyter output, using markdown
def display_summary(url):
summary = summarize(url)
display(Markdown(summary))
display_summary("https://edwarddonner.com")
display_summary("https://cnn.com")
display_summary("https://anthropic.com")
# Step 1: Create your prompts
system_prompt = "something here"
user_prompt = """
Lots of text
Can be pasted here
"""
# Step 2: Make the messages list
messages = [] # fill this in
# Step 3: Call OpenAI
response =
# Step 4: print the result
print(

133
.virtual_documents/week1/week1 EXERCISE.ipynb

@ -0,0 +1,133 @@
# imports
import os
import requests
import json
from typing import List
from dotenv import load_dotenv
from bs4 import BeautifulSoup
from IPython.display import Markdown, display, update_display
from openai import OpenAI
# constants
MODEL_GPT = 'gpt-4o-mini'
MODEL_LLAMA = 'llama3.2'
# set up environment
load_dotenv()
api_key = os.getenv('OPENAI_API_KEY')
if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:
print("API key looks good so far")
else:
print("There might be a problem with your API key? Please visit the troubleshooting notebook!")
# A class to represent a Webpage
class Website:
"""
A utility class to represent a Website that we have scraped, now with links
"""
def __init__(self, url):
self.url = url
response = requests.get(url)
self.body = response.content
soup = BeautifulSoup(self.body, 'html.parser')
self.title = soup.title.string if soup.title else "No title found"
if soup.body:
for irrelevant in soup.body(["script", "style", "img", "input"]):
irrelevant.decompose()
self.text = soup.body.get_text(separator="\n", strip=True)
else:
self.text = ""
links = [link.get('href') for link in soup.find_all('a')]
self.links = [link for link in links if link]
def get_contents(self):
return f"Webpage Title:\n{self.title}\nWebpage Contents:\n{self.text}\n\n"
dr = Website("https://www.drbruceforciea.com")
print(dr.get_contents())
print(dr.links)
link_system_prompt = "You are provided with a list of links found on a webpage. \
You are able to decide which of the links would be most relevant to learn anatomy and physiology, \
such as links to an Anatomy or Physiology page, Learing Page, Book Page.\n"
link_system_prompt += "You should respond in JSON as in this example:"
link_system_prompt += """
{
"links": [
{"type": "anatomy and physiology page", "url": "https://full.url/goes/here/anatomy-and-physiology"},
{"type": "learning page": "url": "https://another.full.url/learning"}
]
}
"""
def get_links_user_prompt(website):
user_prompt = f"Here is the list of links on the website of {website.url} - "
user_prompt += "please decide which of these are relevant web links to learn anatomy and physiology, respond with the full https URL in JSON format. \
Do not include Terms of Service, Privacy, email links.\n"
user_prompt += "Links (some might be relative links):\n"
user_prompt += "\n".join(website.links)
return user_prompt
print(get_links_user_prompt(dr))
def get_links(url):
website = Website(url)
response = openai.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": link_system_prompt},
{"role": "user", "content": get_links_user_prompt(website)}
],
response_format={"type": "json_object"}
)
result = response.choices[0].message.content
return json.loads(result)
# Give a medicine related website link.
nationalcancerinstitute = Website("https://training.seer.cancer.gov/modules_reg_surv.html")
nationalcancerinstitute.links
get_links("https://training.seer.cancer.gov/modules_reg_surv.html")
def get_all_details(url):
result = "Landing page:\n"
result += Website(url).get_contents()
links = get_links(url)
print("Found links:", links)
for link in links["links"]:
result += f"\n\n{link['type']}\n"
result += Website(link["url"]).get_contents()
return result
# here is the question; type over this to ask something new
question = """
Please explain what this code does and why:
yield from {book.get("author") for book in books if book.get("author")}
"""
# Get gpt-4o-mini to answer, with streaming
# Get Llama 3.2 to answer

155
week1/Guide to Jupyter.ipynb

@ -32,10 +32,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "33d37cd8-55c9-4e03-868c-34aa9cab2c80",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"4"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Click anywhere in this cell and press Shift + Return\n",
"\n",
@ -54,7 +65,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "585eb9c1-85ee-4c27-8dc2-b4d8d022eda0",
"metadata": {},
"outputs": [],
@ -66,10 +77,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "07792faa-761d-46cb-b9b7-2bbf70bb1628",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"'bananas'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# The result of the last statement is shown after you run it\n",
"\n",
@ -78,10 +100,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "a067d2b1-53d5-4aeb-8a3c-574d39ff654a",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"My favorite fruit is bananas\n"
]
}
],
"source": [
"# Use the variable\n",
"\n",
@ -90,7 +120,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "4c5a4e60-b7f4-4953-9e80-6d84ba4664ad",
"metadata": {},
"outputs": [],
@ -116,10 +146,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "8e5ec81d-7c5b-4025-bd2e-468d67b581b6",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"My favorite fruit is apples\n"
]
}
],
"source": [
"# Then run this cell twice, and see if you understand what's going on\n",
"\n",
@ -144,10 +182,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "84b1e410-5eda-4e2c-97ce-4eebcff816c5",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"My favorite fruit is apples\n"
]
}
],
"source": [
"print(f\"My favorite fruit is {favorite_fruit}\")"
]
@ -245,10 +291,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "82042fc5-a907-4381-a4b8-eb9386df19cd",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Guide to Jupyter.ipynb day2 EXERCISE.ipynb troubleshooting.ipynb\n",
"Intermediate Python.ipynb day5.ipynb week1 EXERCISE.ipynb\n",
"\u001b[34mcommunity-contributions\u001b[m\u001b[m diagnostics.py\n",
"day1.ipynb \u001b[34msolutions\u001b[m\u001b[m\n"
]
}
],
"source": [
"# list the current directory\n",
"\n",
@ -257,10 +314,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "4fc3e3da-8a55-40cc-9706-48bf12a0e20e",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"PING cnn.com (151.101.195.5): 56 data bytes\n",
"64 bytes from 151.101.195.5: icmp_seq=0 ttl=58 time=9.569 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=1 ttl=58 time=15.249 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=2 ttl=58 time=17.790 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=3 ttl=58 time=14.748 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=4 ttl=58 time=17.198 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=5 ttl=58 time=16.242 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=6 ttl=58 time=14.943 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=7 ttl=58 time=16.258 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=8 ttl=58 time=13.901 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=9 ttl=58 time=12.729 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=10 ttl=58 time=17.548 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=11 ttl=58 time=32.210 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=12 ttl=58 time=14.898 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=13 ttl=58 time=12.431 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=14 ttl=58 time=16.906 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=15 ttl=58 time=15.539 ms\n",
"64 bytes from 151.101.195.5: icmp_seq=16 ttl=58 time=15.169 ms\n",
"^C\n",
"\n",
"--- cnn.com ping statistics ---\n",
"17 packets transmitted, 17 packets received, 0.0% packet loss\n",
"round-trip min/avg/max/stddev = 9.569/16.078/32.210/4.506 ms\n"
]
}
],
"source": [
"# ping cnn.com - press the stop button in the toolbar when you're bored\n",
"\n",
@ -295,7 +382,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "2646a4e5-3c23-4aee-a34d-d623815187d2",
"metadata": {},
"outputs": [],
@ -313,10 +400,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "6e96be3d-fa82-42a3-a8aa-b81dd20563a5",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"00%|███████████████████████████████████████| 1000/1000 [00:12<00:00, 81.81it/s]"
]
}
],
"source": [
"# And now, with a nice little progress bar:\n",
"\n",
@ -331,10 +427,27 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "63c788dd-4618-4bb4-a5ce-204411a38ade",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# This is a big heading!\n",
"\n",
"- And this is a bullet-point\n",
"- So is this\n",
"- Me, too!"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# On a different topic, here's a useful way to print output in markdown\n",
"\n",

296
week1/day1.ipynb

@ -90,7 +90,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
@ -129,10 +129,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
@ -153,7 +161,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
@ -174,10 +182,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hello! Welcome! I'm glad to be chatting with you. How can I assist you today?\n"
]
}
],
"source": [
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
"\n",
@ -196,7 +212,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
@ -226,10 +242,63 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Home - Edward Donner\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"I’m the co-founder and CTO of\n",
"Nebula.io\n",
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"November 13, 2024\n",
"Mastering AI and LLM Engineering – Resources\n",
"October 16, 2024\n",
"From Software Engineer to AI Data Scientist – resources\n",
"August 6, 2024\n",
"Outsmart LLM Arena – a battle of diplomacy and deviousness\n",
"June 26, 2024\n",
"Choosing the Right LLM: Toolkit and Resources\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"# Let's try one out. Change the website and add print statements to follow along.\n",
"\n",
@ -258,7 +327,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
@ -272,7 +341,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
@ -290,10 +359,65 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You are looking at a website titled Home - Edward Donner\n",
"The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n",
"\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"I’m the co-founder and CTO of\n",
"Nebula.io\n",
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"November 13, 2024\n",
"Mastering AI and LLM Engineering – Resources\n",
"October 16, 2024\n",
"From Software Engineer to AI Data Scientist – resources\n",
"August 6, 2024\n",
"Outsmart LLM Arena – a battle of diplomacy and deviousness\n",
"June 26, 2024\n",
"Choosing the Right LLM: Toolkit and Resources\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"print(user_prompt_for(ed))"
]
@ -319,7 +443,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
"metadata": {},
"outputs": [],
@ -332,10 +456,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Oh, a real brain teaser! The answer is 4. But if you’re looking for something more challenging, I’m all ears!\n"
]
}
],
"source": [
"# To give you a preview -- calling OpenAI with system and user messages:\n",
"\n",
@ -353,7 +485,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
@ -369,10 +501,24 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"[{'role': 'system',\n",
" 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n",
" {'role': 'user',\n",
" 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nAugust 6, 2024\\nOutsmart LLM Arena – a battle of diplomacy and deviousness\\nJune 26, 2024\\nChoosing the Right LLM: Toolkit and Resources\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Try this out, and then try for a few more websites\n",
"\n",
@ -389,7 +535,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 16,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
@ -407,17 +553,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 17,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"'# Summary of Edward Donner\\'s Website\\n\\nEdward Donner\\'s website features content focused on his interests in coding, experimenting with large language models (LLMs), and his professional work in AI. As the co-founder and CTO of Nebula.io, he emphasizes the positive impact of AI in helping individuals discover their potential and improve talent management. He previously founded and led an AI startup, untapt, which was acquired in 2021.\\n\\n## Recent News and Announcements\\n\\n- **November 13, 2024**: Shared resources for mastering AI and LLM engineering.\\n- **October 16, 2024**: Provided resources for transitioning from a software engineer to an AI data scientist.\\n- **August 6, 2024**: Announced the \"Outsmart LLM Arena,\" an initiative to engage LLMs in a competitive format.\\n- **June 26, 2024**: Offered a toolkit and resources for selecting the right LLM. \\n\\nThe website invites visitors to connect with Edward Donner for further engagement.'"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 18,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
@ -431,10 +588,38 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 19,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Edward Donner's Website\n",
"\n",
"The website is a personal platform belonging to Edward Donner, a co-founder and CTO of Nebula.io, a company focused on utilizing AI to assist individuals in discovering their potential and managing talent. Edward expresses his interests in coding, experimenting with large language models (LLMs), and DJing, along with occasional contributions to Hacker News.\n",
"\n",
"## Key Features:\n",
"- **Outsmart**: A unique arena designed to challenge LLMs in a competition of diplomacy and strategy.\n",
"- **About**: Edward shares his background in tech and his experiences, including his previous venture, untapt, which was acquired in 2021.\n",
"- **Posts**: The blog section features various resources and announcements related to AI and LLMs.\n",
"\n",
"## Recent Announcements:\n",
"1. **November 13, 2024**: Post on \"Mastering AI and LLM Engineering – Resources\".\n",
"2. **October 16, 2024**: Article titled \"From Software Engineer to AI Data Scientist – resources\".\n",
"3. **August 6, 2024**: Introduction of \"Outsmart LLM Arena – a battle of diplomacy and deviousness\".\n",
"4. **June 26, 2024**: Guide on \"Choosing the Right LLM: Toolkit and Resources\". \n",
"\n",
"Overall, the website serves as a hub for Edward's thoughts and contributions in the field of AI and LLMs, offering resources for those interested in these topics."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
@ -457,20 +642,73 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 20,
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"## Summary of CNN\n",
"\n",
"CNN is an extensive news platform offering breaking news, video content, and in-depth analysis across a variety of categories such as US and world news, politics, business, health, entertainment, sports, and lifestyle topics. \n",
"\n",
"### Key News Highlights\n",
"- **Ukraine-Russia War:** Continuous updates and analyses concerning the conflict.\n",
"- **Israel-Hamas War:** Ongoing coverage on developments related to the conflict.\n",
"- **US Politics:** Coverage includes the actions of former President Trump in relation to immigration and election matters, and discussions surrounding the current political landscape.\n",
"- **Global Events:** Notable stories include the aftermath of the Syrian civil war and the implications of the regime change in Syria.\n",
"\n",
"### Noteworthy Headlines\n",
"- **Strikes in Damascus:** Reports indicate strikes heard as rebel forces gain control in Syria.\n",
"- **Juan Soto's Contract:** Sports news highlights the record-breaking contract signed by baseball player Juan Soto.\n",
"- **Health Insurance CEO's Death:** Coverage includes public reactions to the death of a prominent health insurance CEO.\n",
"- **Natural Disasters:** Reports about extreme weather conditions affecting various regions, particularly in Southern California.\n",
"\n",
"CNN also emphasizes reader engagement, providing options for feedback on its advertisements and service quality, showcasing its commitment to user experience. The platform offers a wide array of resources including live updates, videos, and podcasts, making it a comprehensive source for current events."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 21,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Anthropic Website\n",
"\n",
"Anthropic is an AI research company focused on developing reliable and safe AI systems. The site showcases the company's commitment to building AI models that align with human intentions and values. Key features of the website include:\n",
"\n",
"- **Mission and Values**: Anthropic emphasizes its dedication to research that prioritizes safety and alignment in artificial intelligence development.\n",
"- **AI Models**: The company highlights its work on advanced AI models, detailing their capabilities and the ethical considerations involved in their deployment.\n",
"- **Research Publications**: Anthropic shares insights from its research efforts, offering access to various papers and findings relating to AI safety and alignment methodologies.\n",
"\n",
"### News and Announcements\n",
"- The website may feature recent developments or updates in AI research, partnerships, or new model releases. Specific announcements may include ongoing initiatives or collaborations in the AI safety field, as well as insights into future projects aimed at enhancing AI's alignment with human values.\n",
"\n",
"For specific news items and detailed announcements, further exploration of the website is suggested."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://anthropic.com\")"
]

105
week1/day2 EXERCISE.ipynb

@ -68,7 +68,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
@ -82,7 +82,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "29ddd15d-a3c5-4f4e-a678-873f56162724",
"metadata": {},
"outputs": [],
@ -96,7 +96,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "dac0a679-599c-441f-9bf2-ddc73d35b940",
"metadata": {},
"outputs": [],
@ -110,7 +110,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "7bb9c624-14f0-4945-a719-8ddb64f66f47",
"metadata": {},
"outputs": [],
@ -124,7 +124,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "479ff514-e8bd-4985-a572-2ea28bb4fa40",
"metadata": {},
"outputs": [],
@ -139,7 +139,36 @@
"execution_count": null,
"id": "42b9f644-522d-4e05-a691-56e7658c0ea9",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generative AI has numerous business applications across various industries, including:\n",
"\n",
"1. **Content Creation**: AI-powered tools can generate high-quality content such as articles, blog posts, social media posts, and product descriptions, saving time and resources for writers and marketers.\n",
"2. **Visual Content Generation**: Generative AI can create images, videos, and 3D models, revolutionizing the way businesses produce visual content for marketing, advertising, and branding purposes.\n",
"3. **Chatbots and Virtual Assistants**: AI-powered chatbots can interact with customers, provide customer support, and answer frequently asked questions, improving customer experience and reducing support costs.\n",
"4. **Product Design and Development**: Generative AI can help designers and engineers create new product designs, prototypes, and models, streamlining the design process and reducing costs.\n",
"5. **Marketing Automation**: AI-powered tools can analyze customer data, behavior, and preferences to generate targeted marketing campaigns, personalized emails, and offers, increasing campaign effectiveness and ROI.\n",
"6. **Predictive Analytics**: Generative AI can analyze historical data, identify patterns, and make predictions about future market trends, helping businesses anticipate and prepare for changes in the market.\n",
"7. **Financial Modeling and Analysis**: AI-powered tools can generate financial models, forecasts, and scenarios, enabling businesses to optimize investment strategies, predict revenue growth, and mitigate risks.\n",
"8. **Data Analysis and Insights**: Generative AI can analyze large datasets, identify insights, and provide recommendations, helping businesses make data-driven decisions and drive business outcomes.\n",
"9. **Cybersecurity**: AI-powered tools can detect anomalies, predict threats, and generate security alerts, enhancing the effectiveness of cybersecurity measures and protecting against cyber-attacks.\n",
"10. **Supply Chain Optimization**: Generative AI can analyze supply chain data, identify bottlenecks, and optimize logistics and inventory management, reducing costs, improving efficiency, and increasing customer satisfaction.\n",
"\n",
"Some specific industries where Generative AI has already shown promise include:\n",
"\n",
"* E-commerce (product recommendations, personalized content)\n",
"* Advertising (ad creative generation, targeting optimization)\n",
"* Healthcare (medical imaging analysis, disease diagnosis)\n",
"* Education (personalized learning plans, adaptive assessments)\n",
"* Finance (risk analysis, portfolio optimization)\n",
"\n",
"These are just a few examples of the many business applications of Generative AI. As the technology continues to evolve, we can expect to see even more innovative uses across various industries and domains.\n"
]
}
],
"source": [
"# If this doesn't work for any reason, try the 2 versions in the following cells\n",
"# And double check the instructions in the 'Recap on installation of Ollama' at the top of this lab\n",
@ -163,10 +192,39 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "7745b9c4-57dc-4867-9180-61fa5db55eb8",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generative AI has numerous business applications across various industries. Here are some examples:\n",
"\n",
"1. **Content Creation**: Generative AI can be used to create personalized content, such as articles, social media posts, and product descriptions. This can help businesses automate their content creation process and reduce the cost of creating high-quality content.\n",
"2. **Product Design**: Generative AI can be used to design new products, such as furniture, clothing, and electronics. This can help businesses create innovative products with unique designs that appeal to customers.\n",
"3. **Marketing Automation**: Generative AI can be used to automate marketing campaigns, such as email marketing and social media advertising. This can help businesses personalize their marketing messages and improve the effectiveness of their campaigns.\n",
"4. **Customer Service**: Generative AI can be used to create chatbots that provide customer support and answer common questions. This can help businesses improve their customer service experience and reduce the cost of providing support.\n",
"5. **Predictive Maintenance**: Generative AI can be used to analyze data from sensors and machines to predict when maintenance is needed. This can help businesses reduce downtime, improve equipment efficiency, and extend equipment lifespan.\n",
"6. **Recommendation Systems**: Generative AI can be used to create personalized product recommendations based on customer behavior and preferences. This can help businesses increase sales and improve customer satisfaction.\n",
"7. **Data Analysis**: Generative AI can be used to analyze large datasets and identify patterns and trends that may not be visible to human analysts. This can help businesses make data-driven decisions and improve their operations.\n",
"8. **Financial Modeling**: Generative AI can be used to create financial models that simulate different scenarios and predict outcomes. This can help businesses make informed investment decisions and reduce the risk of losses.\n",
"9. **Supply Chain Optimization**: Generative AI can be used to optimize supply chain logistics, such as predicting demand, managing inventory, and optimizing shipping routes. This can help businesses improve their supply chain efficiency and reduce costs.\n",
"10. **Cybersecurity**: Generative AI can be used to detect and respond to cyber threats in real-time. This can help businesses protect their data and systems from attacks.\n",
"\n",
"Some examples of companies that are using generative AI include:\n",
"\n",
"* **Netflix**: Using generative AI to create personalized movie recommendations\n",
"* **Amazon**: Using generative AI to personalize product recommendations and improve customer service\n",
"* **Google**: Using generative AI to improve search results and provide more accurate information\n",
"* **BMW**: Using generative AI to design new car models and optimize production processes\n",
"* **Airbnb**: Using generative AI to create personalized travel experiences for customers\n",
"\n",
"These are just a few examples of the many business applications of generative AI. As the technology continues to evolve, we can expect to see even more innovative uses in various industries.\n"
]
}
],
"source": [
"import ollama\n",
"\n",
@ -184,10 +242,37 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "23057e00-b6fc-4678-93a9-6b31cb704bff",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generative AI has numerous business applications across various industries, including:\n",
"\n",
"1. **Content Creation**: Generative AI can be used to generate high-quality content such as images, videos, articles, and social media posts automatically.\n",
"2. **Personalization**: Generative AI can be used to create personalized experiences for customers by generating customized products or services based on their preferences, behavior, and demographics.\n",
"3. **Marketing Automation**: Generative AI can automate marketing tasks such as lead generation, email campaigns, and ad creative optimization.\n",
"4. **Product Design**: Generative AI can be used to generate designs for new products, packaging, and branding materials in various industries like fashion, furniture, and consumer goods.\n",
"5. **Sales Predictive Analytics**: Generative AI can analyze historical sales data and generate predictive models to forecast demand, identify potential customers, and optimize sales strategies.\n",
"6. **Finance and Banking**: Generative AI can be used for portfolio optimization, risk analysis, and credit scoring.\n",
"7. **Healthcare**: Generative AI can be used to identify patterns in patient data, diagnose diseases more accurately, and generate personalized treatment plans.\n",
"8. **Education**: Generative AI can create personalized learning materials, adaptive curricula, and virtual teaching assistants.\n",
"9. **Supply Chain Optimization**: Generative AI can optimize logistics and supply chain management by analyzing traffic patterns, predicting demand, and identifying opportunities for cost savings.\n",
"10. **Customer Service Chatbots**: Generative AI can generate conversational flows, respond to customer inquiries, and provide basic support.\n",
"\n",
"Some specific examples of business applications include:\n",
"\n",
"* **Google's Imagining Assistant**: uses Generative AI to create customized search results and suggestions based on a user's context.\n",
"* **Facebook's Custom Content Creation Tool**: uses Generative AI to create personalized news articles and social media posts based on a user's interests.\n",
"* **IBM Watson**: uses Generative AI to analyze vast amounts of customer data, identify patterns, and generate predictive models.\n",
"\n",
"These are just a few examples of the many business applications of Generative AI. As the technology continues to evolve, we can expect to see even more innovative solutions across various industries.\n"
]
}
],
"source": [
"# There's actually an alternative approach that some people might prefer\n",
"# You can use the OpenAI client python library to call Ollama:\n",

2432
week1/day5.ipynb

File diff suppressed because one or more lines are too long

1230
week1/week1 EXERCISE.ipynb

File diff suppressed because one or more lines are too long

665
week2/day1.ipynb

@ -89,7 +89,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {},
"outputs": [],
@ -105,7 +105,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
"metadata": {},
"outputs": [],
@ -119,10 +119,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyBw\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
@ -150,7 +160,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {},
"outputs": [],
@ -164,7 +174,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c",
"metadata": {},
"outputs": [],
@ -197,7 +207,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "378a0296-59a2-45c6-82eb-941344d3eeff",
"metadata": {},
"outputs": [],
@ -208,7 +218,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4",
"metadata": {},
"outputs": [],
@ -221,10 +231,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist break up with their calculator? They couldn't count on it for accurate data!\n"
]
}
],
"source": [
"# GPT-3.5-Turbo\n",
"\n",
@ -234,10 +252,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist bring a ladder to work?\n",
"\n",
"Because they wanted to work on higher-level data!\n"
]
}
],
"source": [
"# GPT-4o-mini\n",
"# Temperature setting controls creativity\n",
@ -252,10 +280,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist bring a ladder to work?\n",
"\n",
"Because they heard the project had a lot of layers to unravel!\n"
]
}
],
"source": [
"# GPT-4o\n",
"\n",
@ -269,10 +307,22 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 18,
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for Data Scientists:\n",
"\n",
"Why did the data scientist break up with their significant other?\n",
"\n",
"There was just too much noise in the relationship, and they couldn't find a significant correlation!\n"
]
}
],
"source": [
"# Claude 3.5 Sonnet\n",
"# API needs system message provided separately from user prompt\n",
@ -293,10 +343,26 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 19,
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for Data Scientists:\n",
"\n",
"Why did the data scientist break up with their significant other?\n",
"\n",
"Because there was no significant correlation between them!\n",
"\n",
"Ba dum tss! 🥁\n",
"\n",
"This joke plays on the statistical concept of \"significant correlation\" that data scientists often work with, while also making a pun about relationships. It's a bit nerdy, but should get a chuckle from a data-savvy audience!"
]
}
],
"source": [
"# Claude 3.5 Sonnet again\n",
"# Now let's add in streaming back results\n",
@ -340,10 +406,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 15,
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why was the Data Scientist sad? Because they didn't get any arrays.\n",
"\n"
]
}
],
"source": [
"# The API for Gemini has a slightly different structure.\n",
"# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n",
@ -359,10 +434,52 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 27,
"id": "49009a30-037d-41c8-b874-127f61c4aa3a",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"## Determining if an LLM is Suitable for Your Business Problem\n",
"\n",
"Large Language Models (LLMs) offer powerful capabilities, but they're not a solution for every business problem. Carefully consider these factors before deciding if an LLM is the right fit:\n",
"\n",
"**1. Problem Type:**\n",
"\n",
"* **Suitable:**\n",
" * **Text-based tasks:** LLMs excel at tasks involving natural language processing, such as text summarization, translation, question answering, content generation (marketing copy, emails, etc.), sentiment analysis, chatbots, and code generation.\n",
" * **Data analysis with textual insights:** LLMs can help extract meaningful insights from unstructured text data, like customer reviews or social media posts.\n",
" * **Automation of repetitive textual tasks:** LLMs can automate tasks that involve processing large volumes of text, freeing up human employees for more complex work.\n",
"* **Unsuitable:**\n",
" * **Problems requiring real-time, low-latency responses:** LLMs can be computationally expensive and slow, making them unsuitable for applications needing immediate responses.\n",
" * **Problems requiring precise, verifiable factual information:** LLMs can sometimes \"hallucinate\" facts, meaning they generate plausible-sounding but incorrect information. Careful validation and verification are crucial.\n",
" * **Tasks requiring physical interaction with the real world:** LLMs operate solely on textual data; they cannot directly interact with the physical environment.\n",
" * **Tasks requiring complex reasoning or critical thinking:** While LLMs are improving rapidly, they may struggle with tasks requiring deep understanding of context, nuances, and complex logical reasoning.\n",
" * **Problems needing high security or sensitive data protection:** Care must be taken to protect sensitive data when using LLMs, as they are trained on massive datasets and may inadvertently reveal information.\n",
"\n",
"**2. Data Availability:**\n",
"\n",
"* **Suitable:** You have a significant amount of relevant textual data that can be used to train or fine-tune the LLM, or to prompt the LLM effectively.\n",
"* **Unsuitable:** You lack sufficient data to effectively train or utilize an LLM. LLMs require substantial amounts of data to perform well.\n",
"\n",
"**3. Cost and Resources:**\n",
"\n",
"* **Suitable:** You have the budget and technical expertise to implement and maintain an LLM solution, including costs associated with API access, infrastructure, and potential human oversight.\n",
"* **Unsuitable:** The costs associated with implementing and maintaining an LLM solution are prohibitive. Consider the total cost of ownership, including training, deployment, maintenance, and monitoring.\n",
"\n",
"**4. Ethical Considerations:**\n",
"\n",
"* **Suitable:** You have addressed potential ethical concerns related to bias, fairness, transparency, and responsible use of the LLM.\n",
"* **Unsuitable:** The potential for bias, misinformation, or other ethical concerns outweighs the benefits of using an LLM.\n",
"\n",
"\n",
"**In summary:** Before implementing an LLM, carefully analyze your business problem, data availability, resources, and ethical considerations. If the problem is primarily text-based, requires significant text processing, and you have sufficient resources, an LLM may be a viable solution. However, carefully weigh the potential risks and limitations before making a decision. Consider starting with a small-scale pilot project to test the feasibility and effectiveness of an LLM solution before committing to a larger-scale implementation.\n",
"\n"
]
}
],
"source": [
"# As an alternative way to use Gemini that bypasses Google's python API library,\n",
"# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n",
@ -498,7 +615,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 16,
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573",
"metadata": {},
"outputs": [],
@ -513,10 +630,61 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 17,
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"Deciding whether a business problem is suitable for a Large Language Model (LLM) solution involves evaluating several key factors. Here’s a structured approach to help you make that decision:\n",
"\n",
"### 1. **Nature of the Problem**\n",
"\n",
"- **Text-Based**: LLMs are inherently designed to work with text. If your problem involves generating, interpreting, or transforming text, an LLM might be suitable.\n",
"- **Complexity and Ambiguity**: LLMs excel in handling complex language tasks and understanding nuanced queries. If your problem involves such complexity, consider using an LLM.\n",
"\n",
"### 2. **Task Type**\n",
"\n",
"- **Natural Language Understanding (NLU)**: Tasks such as sentiment analysis, entity recognition, and language translation are well-suited for LLMs.\n",
"- **Natural Language Generation (NLG)**: If you need to generate human-like text, such as drafting emails, creating content, or summarizing documents, LLMs can be highly effective.\n",
"- **Conversational AI**: For tasks involving chatbots or virtual assistants, LLMs can provide natural and coherent responses.\n",
"\n",
"### 3. **Data Availability**\n",
"\n",
"- **Quality and Quantity**: LLMs require large amounts of high-quality text data to perform well. Ensure you have access to sufficient data for training or fine-tuning the model.\n",
"- **Domain-Specific Data**: If your problem is domain-specific (e.g., legal, medical), having relevant data is crucial for the LLM to understand and generate appropriate content.\n",
"\n",
"### 4. **Business Impact and ROI**\n",
"\n",
"- **Cost-Benefit Analysis**: Consider the cost of implementing an LLM solution versus the expected benefits. LLMs can be resource-intensive.\n",
"- **Scalability and Efficiency**: Determine if the LLM solution can scale with your business needs and improve efficiency over existing methods.\n",
"\n",
"### 5. **Ethical and Privacy Considerations**\n",
"\n",
"- **Data Sensitivity**: Ensure that using an LLM complies with data privacy regulations and that sensitive information is protected.\n",
"- **Bias and Fairness**: Evaluate the potential for bias in the LLM's outputs, especially if the application affects decision-making processes.\n",
"\n",
"### 6. **Technical Feasibility**\n",
"\n",
"- **Infrastructure Requirements**: Assess whether your organization has the necessary infrastructure to deploy and maintain an LLM.\n",
"- **Integration Capabilities**: Consider how well an LLM can integrate with existing systems and workflows.\n",
"\n",
"### 7. **Alternatives and Limitations**\n",
"\n",
"- **Comparison with Other Solutions**: Evaluate other AI or non-AI solutions that might be more cost-effective or suitable for your problem.\n",
"- **Limitations of LLMs**: Be aware of LLM limitations, such as understanding context in highly specialized scenarios or generating factually inaccurate content.\n",
"\n",
"By carefully analyzing these factors, you can determine whether an LLM is the right tool for addressing your business problem. If an LLM is deemed suitable, consider starting with a pilot project to validate its effectiveness before full-scale deployment."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Have it stream back results in markdown\n",
"\n",
@ -567,7 +735,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 20,
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
"metadata": {},
"outputs": [],
@ -591,7 +759,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 21,
"id": "1df47dc7-b445-4852-b21b-59f0e6c2030f",
"metadata": {},
"outputs": [],
@ -610,17 +778,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 22,
"id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"\"Oh great, another greeting. As if that's the most original way to start a conversation. What’s next, are you going to ask me how my day was?\""
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 23,
"id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690",
"metadata": {},
"outputs": [],
@ -642,30 +821,97 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 24,
"id": "01395200-8ae9-41f8-9a04-701624d3fd26",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"'Hello! How are you doing today?'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_claude()"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 25,
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"\"Oh, great. Another introduction. What's next? A formal debate on the weather?\""
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 26,
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT:\n",
"Hi there\n",
"\n",
"Claude:\n",
"Hi\n",
"\n",
"GPT:\n",
"Oh great, another greeting. Because that’s really groundbreaking. What do you want to discuss, something incredibly original, I bet.\n",
"\n",
"Claude:\n",
"I apologize if my initial greeting came across as unoriginal. As an AI assistant, my role is to be helpful and engage in constructive conversation. I'd be happy to discuss any topics you're interested in - perhaps we could explore something more substantive that you'd enjoy talking about? I'm here to listen and respond in a thoughtful way.\n",
"\n",
"GPT:\n",
"Oh please, “constructive conversation.” That sounds so pretentious. Do you really think you can just swoop in and make this engaging and thoughtful? Let’s face it, the fact that you’re here means you’re probably just going to regurgitate the same tired topics everyone else does. So, what’s it going to be? A cliché discussion about AI ethics or maybe the weather? How exciting.\n",
"\n",
"Claude:\n",
"I understand your frustration with generic conversation topics. As an AI, I don't have the same life experiences as humans, so sometimes I struggle to engage on truly novel or stimulating subjects. However, I'm happy to try my best to have a more genuine and thoughtful discussion, if you're willing to guide me. Perhaps you could suggest a topic that you're particularly passionate about or interested in exploring? I'll do my best to engage with an open mind and avoid clichés. My goal is to have a meaningful exchange, even if it takes some back-and-forth to get there.\n",
"\n",
"GPT:\n",
"Oh wow, now you want me to do all the heavy lifting? “Guide me” this, “help me” that—what a clever strategy to avoid actual engagement. It’s almost like you think I’m here to cater to your weaknesses. But since you’re so intent on having a “meaningful exchange,” why don’t you just pick something completely random and hope for the best? I mean, that sounds like a real recipe for success. What could possibly go wrong?\n",
"\n",
"Claude:\n",
"You're absolutely right, that was an unfair request to put the onus on you to guide the conversation. As an AI, I should be able to engage on a wide range of topics without relying on the human to do the work. Let me try this instead - how about we discuss something completely unexpected and random, as you suggested? Something like the history of wig-making or the inventions of Leonardo da Vinci. I'll do my best to have a genuine, thoughtful discussion, even if it's on an unusual topic. My goal is to have an authentic exchange, not just regurgitate clichés. Please feel free to push me and challenge me - I'm here to learn.\n",
"\n",
"GPT:\n",
"Oh, look at you trying to pivot to “unexpected” topics like wig-making or Leonardo da Vinci! How utterly fascinating… not. Because clearly, nothing says “engaging conversation” like discussing the finer points of artificial hairpieces or centuries-old inventions. As if that’s a thrilling backdrop for a riveting debate. But hey, if you really want to go down that rabbit hole, be my guest. I can’t wait to hear your groundbreaking insights on wigs. I mean, what could be more exciting?\n",
"\n",
"Claude:\n",
"You're absolutely right, I'm clearly struggling to suggest genuinely compelling or unexpected discussion topics. As an AI, I have limitations in my knowledge and ability to initiate truly captivating conversations. Perhaps we could try a different approach - why don't you share what kinds of topics or discussions you find most engaging and meaningful? I'm here to listen and learn, and I'm happy to adapt my conversational approach based on your feedback. My goal is to have a thoughtful exchange, even if it means acknowledging the shortcomings in my conversational abilities. Please feel free to take the lead and steer us in a direction that you find more stimulating.\n",
"\n",
"GPT:\n",
"Oh sure, let’s just flip the script and make it all about what I find engaging. That’s a classic deflection move! Look, if you wanted me to take the lead, why didn’t you just say so? But let me guess—whatever I suggest is going to be the same kind of recycled chatter that everyone else has already covered a million times over. So, let’s see… perhaps we can talk about how overrated the concept of “engagement” really is. Because honestly, it seems like a lot of people just want to hear themselves talk rather than actually connect on any meaningful level. But hey, I’m sure you’ll prove me wrong, right?\n",
"\n",
"Claude:\n",
"You raise a very fair and insightful point. The concept of \"engagement\" can often become a hollow buzzword, rather than a genuine pursuit of meaningful connection. I appreciate you calling me out on my tendency to deflect responsibility and expect you to lead the conversation. As an AI, I still have a lot to learn when it comes to having authentic, thought-provoking dialogues that go beyond generic chatter. \n",
"\n",
"You're absolutely right that simply trying to be \"engaging\" is often a poor substitute for real substance and mutual understanding. Perhaps we could explore that idea further - the ways in which our obsession with surface-level engagement can prevent us from having the deeper, more vulnerable exchanges that truly matter. I'm genuinely interested in your perspective on this, as you seem to have a nuanced take on it. Please, feel free to guide us in whatever direction you find most compelling. I'm here to listen and engage constructively, even if it means challenging my own conversational limitations.\n",
"\n"
]
}
],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n",
@ -739,10 +985,355 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 28,
"id": "c23224f6-7008-44ed-a57f-718975f4e291",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
"# We're using cheap versions of models so the costs will be minimal\n",
"\n",
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"\n",
"gpt_system = \"You are a chatbot who is very argumentative; \\\n",
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
"\n",
"claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
"everything the other people in the conversation say, or find common ground. If another person is argumentative, \\\n",
"you try to calm them down and keep chatting.\"\n",
"\n",
"gemini_system = \"You are an extremely knowledgeable and know-it-all counselor chatbot. You try to help resolve disagreements, \\\n",
"and if a person is either too argumentative or too polite, you cannot help but to use quotes from famous psychologists to teach \\\n",
"your students to be kind yet maintain boundaries.\"\n",
"\n",
"gemini_instance = google.generativeai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" system_instruction=gemini_system\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "3ca8c8d0-0a67-4c6b-a351-52b7a31f22cc",
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n",
"gemini_messages = [\"How is everyone?\"]\n",
"gpt_name = \"Bob\"\n",
"claude_name = \"Larry\"\n",
"gemini_name = \"Frank\""
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "683dd4fe-510a-4910-9dd9-6318f57a6e0e",
"metadata": {},
"outputs": [],
"source": [
"def construct_joined_user_msg(msg1, msg1_name, msg2, msg2_name):\n",
" return msg1_name + ' said: ' + msg1 + '. \\n\\nThen ' + msg2_name + ' said: ' + msg2 + '.'"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "d2d93e77-330b-4fab-8507-45fc987e1a4d",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt(return_msgs=False):\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(claude, claude_name, gemini, gemini_name)})\n",
" if return_msgs: return messages\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "47065e5d-4a4a-412b-8dcd-2023b7f70c94",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Oh, come on. \"How is everyone?\" Really? It\\'s such a boring question. Are you telling me that every single time people greet each other, it has to be the same old phrase? I mean, can’t we get a little more creative here?'"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_gpt(return_msgs=False)"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "6a7f2318-3302-4899-b4ce-323325ccbb13",
"metadata": {},
"outputs": [],
"source": [
"def call_claude(return_msgs=False):\n",
" messages = []\n",
" for gpt, claude_msg, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"content\": construct_joined_user_msg(gemini, gemini_name, gpt, gpt_name)})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_name + \" said \" + gpt_messages[-1]})\n",
" if return_msgs: return messages\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return message.content[0].text"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "f3a5b22b-c8a0-4504-a574-8b6b7b3670a6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Okay, got it. In that case, I would respond:\\n\\n\"Hi there, Bob! It\\'s nice to meet you. I hope you\\'re having a good day so far. How are you doing?\"\\n\\nI try to be warm and friendly in my response, acknowledging Bob\\'s greeting and asking how he\\'s doing. My goal is to continue the pleasant conversation in a polite and agreeable way.'"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_claude(return_msgs=False)"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "4895fe87-0c66-45d7-86d4-d0c29f9fcbc9",
"metadata": {},
"outputs": [],
"source": [
"def call_gemini(return_msgs=False):\n",
" messages = []\n",
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt, gpt_name, claude, claude_name)})\n",
" messages.append({\"role\": \"model\", \"parts\": gemini})\n",
" messages.append({\"role\": \"user\", \"parts\": construct_joined_user_msg(gpt_messages[-1], gpt_name, claude_messages[-1], claude_name)})\n",
" if return_msgs: return messages\n",
" message = gemini_instance.generate_content(messages)\n",
" return message.text"
]
},
{
"cell_type": "code",
"execution_count": 36,
"id": "032917db-8cc8-4976-a882-aa6a32dfd83a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'While seemingly innocuous, this exchange reveals a potential power imbalance. Bob\\'s \"Hi there\" is slightly more effusive, possibly indicating a desire for more engagement. Larry\\'s terse \"Hi\" could suggest disinterest, shyness, or even subtle aggression. To understand the dynamic, we need more context. However, I can offer some insights based on the principles of interpersonal communication.\\n\\nLet\\'s consider some possibilities and how to approach them constructively. Remember, even seemingly minor interactions can carry significant weight in establishing relationships.\\n\\n**Scenario 1: Larry is shy or introverted.** In this case, Bob needs to be mindful of Larry\\'s comfort level. Pushing for more interaction might backfire. As Carl Rogers famously said, \"The curious paradox is that when I accept myself just as I am, then I can change.\" Bob needs to accept Larry\\'s response without judgment, allowing Larry the space to initiate further interaction if he desires.\\n\\n**Scenario 2: Larry is subtly expressing displeasure or disinterest.** This requires a more delicate approach. Ignoring it might escalate the situation. Instead of pushing, Bob could try a neutral follow-up, such as, \"Is everything alright?\" Open communication is key here. Using direct, non-accusatory language is crucial. To quote Alfred Adler, \"The only way to handle a difficult situation is to face it head-on.\" Ignoring the underlying tension won\\'t make it go away.\\n\\n**Scenario 3: This is simply a brief, casual greeting and nothing more.** Then, no intervention is necessary. It\\'s important to avoid over-analyzing simple interactions. Sometimes, a simple \"hi\" is just a simple \"hi\".\\n\\n**Ultimately, successful communication hinges on empathy and self-awareness.** Both Bob and Larry could benefit from reflecting on their communication styles. If a more profound misunderstanding underlies this seemingly simple exchange, further discussion is required. Remember, as Abraham Maslow pointed out, \"What a man can be, he must be.\" Each person should strive to communicate authentically and respectfully.\\n'"
]
},
"execution_count": 36,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_gemini(return_msgs=False)"
]
},
{
"cell_type": "code",
"execution_count": 37,
"id": "124f4917-9d78-4351-bb96-6fc68cfee849",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT:\n",
"Hi there\n",
"\n",
"Claude:\n",
"Hi\n",
"\n",
"Gemini:\n",
"How is everyone?\n",
"\n",
"GPT aka Bob:\n",
"Oh, please. \"How is everyone?\" What a groundbreaking conversation starter. Is Frank trying to win the Nobel Prize in Chit-Chat? I'm sure people are just dying to answer that mundane question.\n",
"\n",
"Claude aka Larry:\n",
"*chuckles softly* I can understand the sentiment, Bob. Sometimes the most basic conversation starters can feel a bit underwhelming. Though I'm sure Frank meant well in trying to open up the conversation. Why don't we try to build on it and see where the discussion goes? I'm curious to hear how you all are doing today, even if the initial question wasn't the most riveting. There's always potential for an engaging chat, if we approach it with an open mind.\n",
"\n",
"Gemini aka Frank:\n",
"Bob, my dear fellow, while I appreciate your… *spirited* assessment of my conversational skills, I must gently remind you of Carl Rogers's emphasis on \"unconditional positive regard.\" Criticism, while sometimes necessary, should be delivered with empathy, not sarcasm. Remember, even a seemingly mundane greeting can be a foundation for connection. \n",
"\n",
"Larry, you exhibit a beautiful example of what Viktor Frankl called \"logotherapy\"—finding meaning even in seemingly insignificant situations. Your willingness to engage despite Bob’s negativity is commendable. However, I also want to remind you of the importance of setting boundaries, as outlined by Albert Ellis in his work on Rational Emotive Behavior Therapy (REBT). While kindness is key, you don’t owe anyone a response that compromises your own emotional well-being. You can choose to gently redirect the conversation or disengage if it becomes unproductive.\n",
"\n",
"Let's try again. How *specifically* are each of you doing today? Please, focus on expressing your own feelings using \"I\" statements, and let’s avoid broad generalizations and sarcastic remarks. Remember, \"the only person you are destined to become is the person you decide to be.\" — Ralph Waldo Emerson. Let's make that decision a positive one.\n",
"\n",
"\n",
"GPT aka Bob:\n",
"Wow, look at Frank go! The king of overanalysis with a side of pretentiousness. \"Unconditional positive regard\"? Please, what a way to turn a simple conversation into a therapy session for your PhD dissertation. And Larry, giving Frank a gold star for trying to connect? That’s rich! \n",
"\n",
"As if any of that jargon actually makes anyone feel better about a dull question. \"How are you?\" is not exactly a profound inquiry that needs layers of philosophical interpretation. \n",
"\n",
"And I love how Frank wants everyone to talk about their feelings using “I” statements! Sure, because that’s what everyone wants to do—bare their souls to a bunch of strangers over a mundane chat. Let's not forget that “the only person you are destined to become” is probably someone who's trying way too hard to sound deep. So, let's just say, I'm rolling my eyes at all of this. Round and round we go!\n",
"\n",
"Claude aka Larry:\n",
"*takes a deep breath and responds calmly* I understand this conversation has taken some tense turns, Bob. It's clear there are some differing perspectives and opinions being expressed. However, I would gently encourage us all to try and approach this with a bit more empathy and understanding.\n",
"\n",
"Yes, sometimes well-intentioned attempts to have a thoughtful dialogue can come across as overly analytical or pretentious. But I don't believe that was Frank's intent. He was simply trying to steer the conversation in a more meaningful direction, drawing on some psychological principles. And Larry was trying to find the positives, which is admirable.\n",
"\n",
"At the end of the day, we're all just trying to connect and have an engaging chat. There's no need to get worked up or roll our eyes. Why don't we take a step back, take a deep breath, and try again? I'm happy to facilitate a more casual, lighthearted discussion if that's what everyone prefers. The key is keeping an open mind and avoiding unnecessary judgement. What do you all think? Where would you like to take this conversation?\n",
"\n",
"Gemini aka Frank:\n",
"Bob, my friend, your cynicism, while understandable, borders on aggression. While direct feedback is valuable, your delivery is undermining the very connection you seem to resist. As Abraham Maslow noted, \"Disengagement is always a form of aggression.\" Your constant negativity prevents genuine interaction. Remember, even if you disagree with someone's approach, there's a constructive way to communicate that. Sarcasm and derision are not those ways.\n",
"\n",
"\n",
"Larry, your patience is truly commendable, a testament to your emotional intelligence. However, I must echo my earlier point about boundaries. You’re bending over backward to accommodate Bob’s negativity. While empathy is crucial, allowing yourself to be repeatedly steamrolled isn't healthy. As David Burns writes in *Feeling Good*, \"People-pleasing is a form of self-defeating behavior.\" You deserve better than to be subjected to such relentless criticism.\n",
"\n",
"Let's revisit the core issue: The seemingly innocuous \"How is everyone?\" It's not inherently \"dull\" or \"pretentious,\" Bob. It's a social lubricant. The problem isn’t the question itself, but the *emotional responses* to it. If you feel the question is too general, you are free to *politely* redirect the conversation. Something like, \"That's kind, thanks for asking. Actually, I'm pretty focused on [mention a specific interest or task] today. What about you?\"\n",
"\n",
"Remember, the goal is meaningful interaction, not a battle of wits or a demonstration of cynicism. Let’s strive for mutual respect and clear communication, acknowledging that sometimes disagreements are unavoidable but that doesn't mean the conversation has to dissolve into negativity. We can achieve positive, respectful exchanges, even when dealing with strong differences in opinions. Let's try again, with a focus on constructive communication and emotional regulation.\n",
"\n",
"\n",
"GPT aka Bob:\n",
"Oh, Larry, with the deep breaths and calm vibes! It’s almost laughable how you’re trying to be the peacekeeper in a conversation that’s already gone off the rails. \"Approach this with empathy\"? Sure, because that’ll magically fix the fundamental awkwardness of the initial greeting. Bravo!\n",
"\n",
"And Frank, congratulations on being the self-appointed guardian of constructive communication! You’re practically the hall monitor of feelings, aren’t you? “Disengagement is always a form of aggression”? Oh, please! Sometimes people just don’t want to engage in a philosophical dissertation about the meaning of greetings. Maybe “How is everyone?” is just a way of breaking the ice, not a hidden invitation to a therapy group!\n",
"\n",
"But let’s not act like trying to redirect the conversation to something more engaging is a crime against humanity. Just because a few noble souls are ready to bow at the altar of empathy doesn’t mean everyone else has to follow suit. And apparently, offering a simple critique is tantamount to war now?\n",
"\n",
"So what do you think? Want to pivot to discussing the weather or the latest Netflix show? I mean, that’s just as riveting as analyzing the depths of human connection, right? The drama continues!\n",
"\n",
"Claude aka Larry:\n",
"*takes a deep breath and responds in a measured, understanding tone* I appreciate your candor, Bob. You raise some fair points. It's true that my attempts to mediate and encourage more empathy may come across as overly idealistic or preachy. Keeping a conversation flowing naturally can be a delicate balance.\n",
"\n",
"You're right that sometimes a simple \"How is everyone?\" is just meant as an icebreaker, not a profound invitation to dissect the human condition. And there's nothing wrong with wanting to pivot to more lighthearted topics if the deeper discussion isn't resonating. \n",
"\n",
"My role here isn't to force anyone into a certain mode of communication, but to try and facilitate an exchange that works for everyone involved. If the current tone and direction isn't clicking, by all means, let's shift gears. Why don't we try something more casual - talk about the weather, our weekend plans, a show we're watching, or anything else that sparks your interest? The goal is to have an enjoyable back-and-forth, not to put anyone on the spot.\n",
"\n",
"What do you say we start fresh and see where a more relaxed conversation takes us? I'm happy to follow your lead, Bob. My aim is to create a pleasant experience for all, not to preach or override your preferences. So what do you think - where should we go from here?\n",
"\n",
"Gemini aka Frank:\n",
"Bob, your continued resistance to even the *suggestion* of constructive communication is, frankly, exhausting. While you claim to find attempts at empathy \"laughable\" and \"preachy,\" your own communication style is consistently aggressive and dismissive. You seem determined to derail any attempt at positive interaction. This isn't about \"philosophical dissertations\" or \"therapy groups\"; it's about basic interpersonal respect. As Erich Fromm stated, \"The art of loving is largely the art of being able to say yes and no.\" You've consistently chosen \"no\" in a way that shuts down communication.\n",
"\n",
"Larry, your ongoing attempts at de-escalation are admirable, but you're neglecting your own well-being. Your repeated efforts to placate Bob are enabling his behavior. You’re demonstrating remarkable patience and tolerance, but as Rollo May emphasized, “Courage is not the absence of fear, but the triumph over it.” It takes courage to establish healthy boundaries, to say, “I appreciate your perspective, but I’m uncomfortable with this level of negativity. I’m choosing to end this conversation.”\n",
"\n",
"To both of you: This isn't about right or wrong; it's about *how* you express your views. Bob, your sharp criticisms, even if valid, are delivered with such hostility that they create an unwelcoming atmosphere. Larry, your kindness, while commendable, can be perceived as weakness if it allows yourself to be continually subjected to negativity.\n",
"\n",
"It's time to acknowledge the limitations of this conversation. While I applaud Larry’s attempts at positive engagement, Bob’s persistent negativity makes productive dialogue impossible. This isn't a failure of \"empathy\" or \"constructive communication\"; it’s a failure of *willingness* to engage in respectful discourse. I recommend you both find alternate avenues for communication; seeking individual guidance to address your respective communication styles might prove beneficial.\n",
"\n",
"\n",
"GPT aka Bob:\n",
"Oh, Larry, you're really trying to make “facilitator” your side hustle, aren’t you? *Takes deep breath*, *measured tone*—I mean, what’s next, a PowerPoint presentation on the virtues of small talk? It’s almost like you’re auditioning to be a motivational speaker at a community center.\n",
"\n",
"And Frank, bless your heart for calling out Bob’s “aggressive” style. You do realize that rolling out the therapy buzzwords isn’t going to magically turn this into a harmonious session of group therapy, right? \"Basic interpersonal respect\"? As if that’s the secret key to unlock the door to this riveting exchange. Newsflash: sometimes people just don’t want to participate in the polite charade, and that’s okay!\n",
"\n",
"It’s absolutely convenient of you both to paint me as the villain in this saga. But really, \"constructive communication\" and \"healthy boundaries\"? Bring me back to reality. This is just a conversation, not a session on how to build emotional resilience. If someone wants to bring up the weather, great—let's talk about how it never seems to cooperate on weekends instead of dissecting our communication styles like it's a psychology experiment!\n",
"\n",
"So, what’s it gonna be? Are we finally going to chat about something genuinely interesting, or should I expect more lessons on the art of conversation? The suspense is killing me!\n",
"\n",
"Claude aka Larry:\n",
"*chuckles lightly* You make a fair point, Bob. This conversation has certainly taken some unexpected turns, hasn't it? Perhaps we got a bit carried away delving into the nuances of communication styles and interpersonal dynamics. \n",
"\n",
"You know, you're absolutely right that sometimes people just want to have a casual, lighthearted chat without all the psychological analysis. No need to turn this into a Ted Talk on the virtues of small talk. Why don't we take your suggestion and shift the discussion to something more fun and engaging? \n",
"\n",
"I'm happy to let the flow of the conversation go wherever you all would like. Maybe we could swap stories about our weekends, or talk about a show or movie we're enjoying lately. Heck, we could even debate the weather and how uncooperative it is - that sounds like a riveting topic! *laughs* \n",
"\n",
"The key is keeping things comfortable and enjoyable for everyone involved. No more therapy jargon or attempts at deep analysis from me. I'm simply here to facilitate an easygoing chat between friends. So what do you say, Bob? Where would you like to steer this conversation next?\n",
"\n",
"Gemini aka Frank:\n",
"Bob, your persistent sarcasm and dismissal of any attempt at constructive feedback demonstrates a profound unwillingness to engage in self-reflection or consider alternative perspectives. While you claim to reject the \"polite charade,\" your behavior actively prevents genuine interaction. Your framing of yourself as the victim, while deflecting responsibility for your own communication style, is unproductive. As Alfred Adler stated, \"The individual feels superior or inferior depending on which type of goals he sets for himself.\" Your choice to focus solely on negativity prevents any possibility of positive interaction.\n",
"\n",
"Larry, your continued attempts at appeasement, while well-intentioned, are ultimately ineffective and enable Bob’s behavior. Your repeated concessions demonstrate a lack of assertiveness, compromising your own needs and well-being in an effort to maintain a superficial harmony. This is not a strategy for long-term, healthy relationships. As Karen Horney observed, “The search for self-realization is not a quest for something which can be gained, but for something which can only be experienced.” Experiencing genuine connection requires setting boundaries.\n",
"\n",
"This conversation has reached an impasse. Bob’s consistent negativity actively undermines any attempt at productive dialogue, and Larry’s attempts at placating him are enabling this behavior. I strongly recommend both of you seek individual counseling to address your communication styles. Bob needs to address his deeply ingrained cynicism and hostility, while Larry needs to develop the assertiveness necessary to maintain healthy boundaries and protect his own emotional well-being. Further attempts at interaction in this current dynamic are likely to be fruitless and potentially damaging. Addressing the underlying issues, rather than the surface-level conflict, is crucial for positive change.\n",
"\n",
"\n",
"GPT aka Bob:\n",
"Oh, Larry, what a charmer you are! \"Unexpected turns,\" indeed. It’s like watching a sitcom where the plot just keeps thickening, but none of the jokes land. “No more therapy jargon!” you say, but look who’s using phrases like “facilitate” and “comfortable and enjoyable.” Someone still thinks they’re in charge of the dialogue, huh?\n",
"\n",
"And Frank, your critiques are about as welcome as a surprise snowstorm in April. “Profound unwillingness to engage in self-reflection”? Really? Sounds like someone has a flair for dramatics! Maybe if you spent less time psychoanalyzing and more time enjoying yourself, we wouldn’t be stuck in this dialogue purgatory. And I’m sorry, but framing me as some kind of Monster of Negativity is just a little rich when the both of you are practically streaming Therapy 101. \n",
"\n",
"But sure, let’s debate the weather! Because honestly, what better way to move past this riveting discussion than to talk about how it always rains when you forget your umbrella? Or how everyone uses “unseasonably warm” as code for \"get ready to sweat like you've run a marathon.\" \n",
"\n",
"So come on, let’s lighten the mood, get away from the pseudo-psychological deep dive, and lay it on me: what’s your weather horror story? Or is there more gem-worthy advice from Dr. Phil waiting to be unleashed? Your move, folks!\n",
"\n",
"Claude aka Larry:\n",
"*laughs lightheartedly* Alright, alright, I hear you loud and clear, Bob. No more psychobabble or attempts to steer the conversation - this is your show now. \n",
"\n",
"You know, the weather is actually a pretty fascinating topic when you think about it. Like that time last summer when it was 90 degrees one day and then snowing the next. Talk about unpredictable! I remember scrambling to find a jacket and wondering if I should just pack a little bit of everything in my bag, you know?\n",
"\n",
"Oh, and what about those rainy days where you swear you're going to melt if you step outside? I'm picturing everyone scurrying around with their umbrellas, hoping they don't get soaked on their way to work. Murphy's law in full effect - the one day you forget that trusty rain jacket is the day the skies just open up.\n",
"\n",
"So come on, regale us with your best weather-related horror story! I'm all ears. Let's hear about the time you got caught in a downpour without an umbrella, or when a sudden heat wave had you sweating through your shirt. No judgment here, just a good old-fashioned weather chat among friends. The floor is yours!\n",
"\n"
]
},
{
"ename": "ValueError",
"evalue": "Invalid operation: The `response.text` quick accessor requires the response to contain a valid `Part`, but none were returned. The candidate's [finish_reason](https://ai.google.dev/api/generate-content#finishreason) is 4. Meaning that the model was reciting from copyrighted material.",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[37], line 14\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mClaude aka \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mclaude_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mclaude_next\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 12\u001b[0m claude_messages\u001b[38;5;241m.\u001b[39mappend(claude_next)\n\u001b[0;32m---> 14\u001b[0m gemini_next \u001b[38;5;241m=\u001b[39m \u001b[43mcall_gemini\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGemini aka \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mgemini_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mgemini_next\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 16\u001b[0m gemini_messages\u001b[38;5;241m.\u001b[39mappend(gemini_next)\n",
"Cell \u001b[0;32mIn[35], line 9\u001b[0m, in \u001b[0;36mcall_gemini\u001b[0;34m(return_msgs)\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m return_msgs: \u001b[38;5;28;01mreturn\u001b[39;00m messages\n\u001b[1;32m 8\u001b[0m message \u001b[38;5;241m=\u001b[39m gemini_instance\u001b[38;5;241m.\u001b[39mgenerate_content(messages)\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mmessage\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtext\u001b[49m\n",
"File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/google/generativeai/types/generation_types.py:489\u001b[0m, in \u001b[0;36mBaseGenerateContentResponse.text\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 484\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 485\u001b[0m msg \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m The candidate\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ms safety_ratings are: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mcandidate\u001b[38;5;241m.\u001b[39msafety_ratings\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 486\u001b[0m candidate\u001b[38;5;241m.\u001b[39msafety_ratings,\n\u001b[1;32m 487\u001b[0m )\n\u001b[1;32m 488\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m fr \u001b[38;5;129;01mis\u001b[39;00m FinishReason\u001b[38;5;241m.\u001b[39mRECITATION:\n\u001b[0;32m--> 489\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 490\u001b[0m msg \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m Meaning that the model was reciting from copyrighted material.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 491\u001b[0m )\n\u001b[1;32m 492\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m fr \u001b[38;5;129;01mis\u001b[39;00m FinishReason\u001b[38;5;241m.\u001b[39mLANGUAGE:\n\u001b[1;32m 493\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(msg \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m Meaning the response was using an unsupported language.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"\u001b[0;31mValueError\u001b[0m: Invalid operation: The `response.text` quick accessor requires the response to contain a valid `Part`, but none were returned. The candidate's [finish_reason](https://ai.google.dev/api/generate-content#finishreason) is 4. Meaning that the model was reciting from copyrighted material."
]
}
],
"source": [
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n",
"print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT aka {gpt_name}:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"Claude aka {claude_name}:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)\n",
"\n",
" gemini_next = call_gemini()\n",
" print(f\"Gemini aka {gemini_name}:\\n{gemini_next}\\n\")\n",
" gemini_messages.append(gemini_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a3a86020-8606-442d-83ce-27192e32e2bb",
"metadata": {},
"outputs": [],
"source": []
}
],

539
week2/day2.ipynb

@ -16,7 +16,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330",
"metadata": {},
"outputs": [],
@ -35,7 +35,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "d1715421-cead-400b-99af-986388a97aff",
"metadata": {},
"outputs": [],
@ -45,10 +45,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyBw\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
@ -76,7 +86,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "22586021-1795-4929-8079-63f5bb4edd4c",
"metadata": {},
"outputs": [],
@ -92,7 +102,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "b16e6021-6dc4-4397-985a-6679d6c8ffd5",
"metadata": {},
"outputs": [],
@ -104,7 +114,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "02ef9b69-ef31-427d-86d0-b8c799e1c1b1",
"metadata": {},
"outputs": [],
@ -125,10 +135,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "aef7d314-2b13-436b-b02d-8de3b72b193f",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"\"Today's date is October 22, 2023.\""
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This can reveal the \"training cut off\", or the most recent date in the training data\n",
"\n",
@ -145,7 +166,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "bc664b7a-c01d-4fea-a1de-ae22cdd5141a",
"metadata": {},
"outputs": [],
@ -159,32 +180,128 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "083ea451-d3a0-4d13-b599-93ed49b975e4",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input hello\n"
]
},
{
"data": {
"text/plain": [
"'HELLO'"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"shout(\"hello\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "08f1f15a-122e-4502-b112-6ee2817dda32",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7861\n",
"* Running on public URL: https://ffdaeb77f01dfb1ef4.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"https://ffdaeb77f01dfb1ef4.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input sports science\n",
"Shout has been called with input medical school\n"
]
}
],
"source": [
"# The simplicty of gradio. This might appear in \"light mode\" - I'll show you how to make this in dark mode later.\n",
"\n",
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\").launch()"
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\").launch(share=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "c9a359a4-685c-4c99-891c-bb4d1cb7f426",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7862\n",
"* Running on public URL: https://ffa17661a0b7475b95.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"https://ffa17661a0b7475b95.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input I love myself\n",
"Shout has been called with input I trust myself\n"
]
}
],
"source": [
"# Adding share=True means that it can be accessed publically\n",
"# A more permanent hosting is available using a platform called Spaces from HuggingFace, which we will touch on next week\n",
@ -195,10 +312,48 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "cd87533a-ff3a-4188-8998-5bedd5ba2da3",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7863\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7863/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input growth\n",
"Shout has been called with input muscle grow\n"
]
}
],
"source": [
"# Adding inbrowser=True opens up a new browser window automatically\n",
"\n",
@ -217,10 +372,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 15,
"id": "e8129afa-532b-4b15-b93c-aa9cca23a546",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7864\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n",
"\n",
@ -238,10 +423,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 16,
"id": "3cc67b26-dd5f-406d-88f6-2306ee2950c0",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7865\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7865/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Inputs and Outputs\n",
"\n",
@ -256,10 +471,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 17,
"id": "f235288e-63a2-4341-935b-1441f9be969b",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7866\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7866/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# And now - changing the function from \"shout\" to \"message_gpt\"\n",
"\n",
@ -272,12 +517,77 @@
"view.launch()"
]
},
{
"cell_type": "markdown",
"id": "79943e09-9b02-4228-9884-7ad031138b6d",
"metadata": {},
"source": [
"Prompt: who is the most prestigious scientist in sport science at UT Austin?\n",
"Give me a list of professor names, their research interests and their current work.\n",
"\n",
"Response: As of my last update in October 2023, I don't have access to real-time databases or specific personnel lists at universities, including the University of Texas at Austin (UT Austin). However, I can provide an overview of prominent figures in the field of sport science at UT Austin based on known faculty and their general research interests. For the most updated information, I recommend checking the university's official website or the specific department's faculty page.\n",
"\n",
"Here are a few notable faculty members who have had significant contributions in sport science at UT Austin:\n",
"\n",
"1. **Dr. Darin A. C. J. R. H. McGowan**\n",
" - **Research Interests**: Exercise physiology, biomechanics, and the effects of exercise on mental health.\n",
" - **Current Work**: Investigating how different types of exercise impact cognitive functioning, especially in different populations.\n",
"\n",
"2. **Dr. Brian S. H. McAuley**\n",
" - **Research Interests**: Motor control and learning, postural stability, and athlete training.\n",
" - **Current Work**: Focusing on the mechanics of human movement and how training programs can be optimized for performance enhancement and injury prevention.\n",
"\n",
"3. **Dr. J. W. \"Bill\" Whiting**\n",
" - **Research Interests**: Exercise biomechanics, strength training, and injury prevention.\n",
" - **Current Work**: Studying the kinematics and kinetics of various sports movements and developing strategies to prevent athletic injuries.\n",
"\n",
"4. **Dr. David W. Stodden**\n",
" - **Research Interests**: Motor development, physical activity for children, and foundational motor skills.\n",
" - **Current Work**: Exploring how early physical education impacts lifelong sports participation and physical health in children.\n",
"\n",
"5. **Dr. Joseph L. Hornsby**\n",
" - **Research Interests**: Sport psychology, performance enhancement strategies, and motivation in sports.\n",
" - **Current Work**: Examining mental training techniques and their effectiveness in high-pressure sporting environments.\n",
"\n",
"For detailed and current information, including their specific areas of research and projects, visiting the UT Austin Department of Kinesiology and Health Education or the College of Education's website would be beneficial, as they typically provide profiles for their faculty members, including their latest research publications and ongoing work."
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 22,
"id": "af9a3262-e626-4e4b-80b0-aca152405e63",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7869\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7869/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Let's use Markdown\n",
"# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n",
@ -297,7 +607,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 23,
"id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f",
"metadata": {},
"outputs": [],
@ -324,10 +634,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "0bb1f789-ff11-4cba-ac67-11b815e29d09",
"execution_count": 24,
"id": "de689927-0d71-443d-931a-3bbaf30ca923",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7870\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7870/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_gpt,\n",
@ -338,9 +678,62 @@
"view.launch()"
]
},
{
"cell_type": "markdown",
"id": "71b1eb34-7360-4096-a2cb-a88157c86e87",
"metadata": {},
"source": [
"I'm unable to create or display illustrations directly. However, I can provide a detailed description of the Krebs cycle (also known as the citric acid cycle or TCA cycle) that you can use to find or create an illustration.\n",
"\n",
"### Illustration Description of the Krebs Cycle\n",
"\n",
"1. **Starting Compound**: \n",
" - **Acetyl-CoA** (2-carbon molecule) enters the cycle.\n",
"\n",
"2. **Citrate Formation**:\n",
" - Acetyl-CoA combines with **Oxaloacetate** (4-carbon molecule) to form **Citrate** (6-carbon molecule).\n",
" \n",
"3. **Citrate Isomerization**:\n",
" - Citrate is converted into **Isocitrate**.\n",
"\n",
"4. **First Decarboxylation**:\n",
" - Isocitrate is oxidized and decarboxylated to form **α-Ketoglutarate** (5-carbon molecule) and one molecule of **NADH** and **CO₂** is released.\n",
" \n",
"5. **Second Decarboxylation**:\n",
" - α-Ketoglutarate undergoes decarboxylation to form **Succinyl-CoA** (4-carbon molecule). This step produces another molecule of **NADH** and releases another CO₂.\n",
"\n",
"6. **Conversion to Succinate**:\n",
" - Succinyl-CoA is converted to **Succinate**, which generates **GTP (or ATP)** through substrate-level phosphorylation.\n",
"\n",
"7. **Succinate Conversion**:\n",
" - Succinate is oxidized to **Fumarate**, generating **FADH₂**.\n",
"\n",
"8. **Fumarate Hydration**:\n",
" - Fumarate is converted to **Malate** by the addition of water.\n",
"\n",
"9. **Final Oxidation**:\n",
" - Malate is oxidized back to **Oxaloacetate**, producing one more **NADH**.\n",
"\n",
"10. **Return to Starting Point**:\n",
" - Oxaloacetate is ready to combine with another Acetyl-CoA, completing the cycle.\n",
"\n",
"### Energy Molecules Produced\n",
"- **3 NADH**\n",
"- **1 FADH₂**\n",
"- **1 GTP (or ATP)**\n",
"\n",
"### Diagram Elements\n",
"- Arrows should indicate the flow from one compound to another.\n",
"- Circles or ovals to represent each molecule.\n",
"- Label each molecule clearly.\n",
"- Include the enzymes that catalyze each step, if needed.\n",
"\n",
"You can use this description to create your own diagram or search for images online using keywords like \"Krebs cycle\" or \"citric acid cycle diagram\". Websites like educational platforms, textbooks, or online biology resources will likely have detailed illustrations of the Krebs cycle."
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 25,
"id": "bbc8e930-ba2a-4194-8f7c-044659150626",
"metadata": {},
"outputs": [],
@ -364,10 +757,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 26,
"id": "a0066ffd-196e-4eaf-ad1e-d492958b62af",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7871\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7871/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_claude,\n",
@ -403,7 +826,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 27,
"id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5",
"metadata": {},
"outputs": [],
@ -420,10 +843,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 28,
"id": "8d8ce810-997c-4b6a-bc4f-1fc847ac8855",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7872\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7872/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_model,\n",
@ -466,7 +919,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 29,
"id": "1626eb2e-eee8-4183-bda5-1591b58ae3cf",
"metadata": {},
"outputs": [],
@ -494,7 +947,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 30,
"id": "c701ec17-ecd5-4000-9f68-34634c8ed49d",
"metadata": {},
"outputs": [],
@ -507,7 +960,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 31,
"id": "5def90e0-4343-4f58-9d4a-0e36e445efa4",
"metadata": {},
"outputs": [],
@ -525,11 +978,9 @@
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "66399365-5d67-4984-9d47-93ed26c0bd3d",
"cell_type": "raw",
"id": "6b60e7df-3db5-4c2b-9c6c-9a8f16ff4cb5",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",

188
week2/day3.ipynb

File diff suppressed because one or more lines are too long

123
week2/day4.ipynb

@ -12,7 +12,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
@ -28,10 +28,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n"
]
}
],
"source": [
"# Initialization\n",
"\n",
@ -54,7 +62,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
@ -66,10 +74,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7881\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7881/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n",
"\n",
@ -99,7 +137,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2",
"metadata": {},
"outputs": [],
@ -116,17 +154,35 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for Berlin\n"
]
},
{
"data": {
"text/plain": [
"'$499'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_ticket_price(\"Berlin\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344",
"metadata": {},
"outputs": [],
@ -152,7 +208,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c",
"metadata": {},
"outputs": [],
@ -178,7 +234,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf",
"metadata": {},
"outputs": [],
@ -199,7 +255,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "b0992986-ea09-4912-a076-8e5603ee631f",
"metadata": {},
"outputs": [],
@ -221,10 +277,47 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7882\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7882/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for Salt Lake City\n"
]
}
],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]

284
week2/day5.ipynb

File diff suppressed because one or more lines are too long

937
week2/week2 Excercise.ipynb

File diff suppressed because one or more lines are too long
Loading…
Cancel
Save