Browse Source

Merge fc5039fe67 into 8699e5fa87

pull/29/merge
Batikan Iscan 5 months ago committed by GitHub
parent
commit
f82469a82e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 15
      1.14.1
  2. 15
      environment.yml
  3. 97
      week1/community-contributions/day2-exercise-ollama-website-summarizer.ipynb
  4. 161
      week1/community-contributions/day5 company brochure.ipynb
  5. 200
      week1/day1.ipynb
  6. 160
      week1/day2 EXERCISE.ipynb
  7. 32
      week1/day5.ipynb
  8. 188
      week1/week1 EXERCISE.ipynb
  9. 154
      week2/community-contributions/day2-gradio-company-brochure-with-llama.ipynb
  10. 225
      week2/day1.ipynb
  11. 106
      week2/day2.ipynb
  12. 75
      week2/day3.ipynb

15
1.14.1

@ -0,0 +1,15 @@
Collecting onnxruntime
Downloading onnxruntime-1.20.1-cp311-cp311-win_amd64.whl.metadata (4.7 kB)
Requirement already satisfied: coloredlogs in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (15.0.1)
Requirement already satisfied: flatbuffers in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (24.3.25)
Requirement already satisfied: numpy>=1.21.6 in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (1.26.4)
Requirement already satisfied: packaging in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (24.2)
Requirement already satisfied: protobuf in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (4.25.3)
Requirement already satisfied: sympy in d:\anaconda\envs\llms\lib\site-packages (from onnxruntime) (1.13.3)
Requirement already satisfied: humanfriendly>=9.1 in d:\anaconda\envs\llms\lib\site-packages (from coloredlogs->onnxruntime) (10.0)
Requirement already satisfied: mpmath<1.4,>=1.1.0 in d:\anaconda\envs\llms\lib\site-packages (from sympy->onnxruntime) (1.3.0)
Requirement already satisfied: pyreadline3 in d:\anaconda\envs\llms\lib\site-packages (from humanfriendly>=9.1->coloredlogs->onnxruntime) (0.0.0)
Downloading onnxruntime-1.20.1-cp311-cp311-win_amd64.whl (11.3 MB)
---------------------------------------- 11.3/11.3 MB 18.7 MB/s eta 0:00:00
Installing collected packages: onnxruntime
Successfully installed onnxruntime-1.20.1

15
environment.yml

@ -33,18 +33,3 @@ dependencies:
- twilio
- duckdb
- feedparser
- pip:
- transformers
- sentence-transformers
- datasets
- accelerate
- sentencepiece
- bitsandbytes
- openai
- gradio
- gensim
- modal
- ollama
- psutil
- setuptools
- speedtest-cli

97
week1/community-contributions/day2-exercise-ollama-website-summarizer.ipynb

@ -0,0 +1,97 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "8b1b8557-7551-4b34-97f0-1734505078c7",
"metadata": {},
"source": [
"# Hello everyone.\n",
"Here's my solution to the Day 2 Exercise, where I use Ollama (locally hosted) instead of OpenAI gpt-4o-mini to summarize a given website. This code is all in the same block for ease of running (we are all familiar with the process by this point, but guiding comments have been made). Furtnermore, I added a bit of user interactivity by asking the user to provide the website themselves instead of hardcoding a string in memory that the developer changes everytime. Enjoy and have fun, fellow programmers! \n",
"\\- Batikan Iscan"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18cd56af-aacd-4f15-9c8f-e6e141671d10",
"metadata": {},
"outputs": [],
"source": [
"# Imports\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"import ollama\n",
"\n",
"# Constants\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"HEADERS = {\"Content-Type\": \"application/json\"}\n",
"MODEL = \"llama3.2\"\n",
"\n",
"# Code\n",
"class Website:\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
" please provide a short summary of this website in markdown. \\\n",
" If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" messages = [\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
" response = ollama.chat(model=MODEL, messages=messages, stream=False)\n",
" # display(Markdown(response.choices[0].message.content))\n",
" display(Markdown(response.message.content))\n",
"\n",
"# User interaction\n",
"user_website = input(\"Enter a website: \")\n",
"summarize(user_website)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "73991ad8-9c93-4c38-8daa-f93502b56740",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

161
week1/community-contributions/day5 company brochure.ipynb

@ -19,7 +19,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "6c8dc88a-85d9-493b-965c-68895cdd93f2",
"metadata": {},
"outputs": [],
@ -38,10 +38,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "131c483b-dd58-4faa-baf5-469ab6b00fbb",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key looks good so far\n"
]
}
],
"source": [
"# Initialize and constants\n",
"\n",
@ -59,7 +67,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "196c0dee-7236-4f88-b7c2-f2a885190b19",
"metadata": {},
"outputs": [],
@ -120,7 +128,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "ed206771-df05-429d-8743-310bc86358ce",
"metadata": {},
"outputs": [],
@ -151,7 +159,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "f2885e89-6455-4239-a98d-5599ea6e5947",
"metadata": {},
"outputs": [],
@ -177,7 +185,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "53c59051-eed0-4292-8204-abbbd1d78df4",
"metadata": {},
"outputs": [],
@ -229,7 +237,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "91ac10e6-8a7a-4367-939b-ac537c1c6c67",
"metadata": {},
"outputs": [],
@ -257,7 +265,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "7116adc1-6f5e-445f-9869-ffcf5fa6a9b8",
"metadata": {},
"outputs": [],
@ -275,7 +283,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "02edb903-6352-417f-8c0f-85c2eee269b6",
"metadata": {},
"outputs": [],
@ -300,7 +308,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "faf9d9cc-fe30-4441-9adc-aee5b4dc80ca",
"metadata": {},
"outputs": [],
@ -340,7 +348,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "b8359501-9f05-42bc-916c-7990ac910866",
"metadata": {},
"outputs": [],
@ -383,7 +391,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "e75be9e6-040d-4178-a5b3-1b7ae4460bc8",
"metadata": {},
"outputs": [],
@ -404,10 +412,133 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "0748ec58-335b-4796-ae15-300dee7b24b0",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"### HuggingFace Brochure\n",
"\n",
"---\n",
"\n",
"**HuggingFace: Empowering AI with Community and Innovation**\n",
"\n",
"Welcome to HuggingFace! Our mission is to democratize AI by building an open-source ecosystem. Explore our cutting-edge tools, libraries, and models that help developers, researchers, and companies leverage the power of artificial intelligence.\n",
"\n",
"---\n",
"\n",
"#### What We Offer:\n",
"\n",
"1. **Transformers Library** \n",
" The most popular open-source library for Natural Language Processing (NLP). Easily integrate state-of-the-art models into your applications.\n",
"\n",
"2. **Datasets** \n",
" Access a wide range of curated datasets for various AI tasks. Perfect for training and evaluating your models.\n",
"\n",
"3. **Model Hub** \n",
" Explore thousands of pre-trained models. Customize and deploy them to meet your specific needs.\n",
"\n",
"4. **Community Support** \n",
" Join our vibrant community of developers and researchers. Share knowledge, seek help, and collaborate on fascinating AI projects.\n",
"\n",
"---\n",
"\n",
"#### Why Choose HuggingFace?\n",
"\n",
"- **User-Friendly** \n",
" Built with developers in mind, our tools are intuitive and easy to use.\n",
"\n",
"- **Extensive Documentation** \n",
" Comprehensive guides and examples to help you get started quickly.\n",
"\n",
"- **Active Development** \n",
" We are continuously improving our libraries and models, driven by community feedback.\n",
"\n",
"- **Enterprise Support** \n",
" Tailored solutions and support for businesses to implement AI strategies effectively.\n",
"\n",
"---\n",
"\n",
"#### Get Started Today!\n",
"\n",
"Visit us at [huggingface.co](http://huggingface.co) to learn more, access our tools, and join the AI revolution.\n",
"\n",
"---\n",
"\n",
"**Contact Us:** \n",
"Email: contact@huggingface.co \n",
"Twitter: @HuggingFace \n",
"Join our community forums to connect with fellow AI enthusiasts!\n",
"\n",
"---\n",
"\n",
"### हि अनित पफलट\n",
"\n",
"---\n",
"\n",
"**हगिगफस: समय और नवर कथ एआई क सशकत बन**\n",
"\n",
"हगिगफस म आपकगत ह! हमिशन एक ओपन-सस पिििर बनकर एआई ककतिक बन। हम अतिक उपकरण, पतकलय और मडल अनषण करवलपरस, शधकर और कपनििम बिमत शकिभ उठ मदद करत।\n",
"\n",
"---\n",
"\n",
"#### हम कश करत:\n",
"\n",
"1. **टसफमरस पतकलय** \n",
" पिक भरसकरण (NLP) किए सबसकपिय ओपन-सस पतकलय। अपन अनरय अतिक मडल आस एकत कर।\n",
"\n",
"2. **डस** \n",
" वििन एआई किए कशलतवक तर किए गए डस कित शखल तक पह। अपनडलरशिित और मित करनिए उपयत।\n",
"\n",
"3. **मडल हब** \n",
" हज-टड मडलज कर। उन अपनििट आवशयकत करनिए अनित और ल कर।\n",
"\n",
"4. **समय समरथन** \n",
" हमत समय मवलपरस और शधकर। जन स कर, सहयतत कर, और दिलचसप एआई परिजन पर सहयग कर।\n",
"\n",
"---\n",
"\n",
"#### हगिगफस क?\n",
"\n",
"- **उपयगकर अनल** \n",
" हम उपकरण डवलपरस कन म रखतए बनए गए ह, ज सहज और उपयग म आसन ह।\n",
"\n",
"- **वित दसकरण** \n",
" आपकआत करन मदद करनिए वपक मगदरि और उदहरण।\n",
"\n",
"- **सकिय विस** \n",
" हम लगर अपनतकलय और मडलर कर रह, ज समय सत फडबक पर आधित ह।\n",
"\n",
"- **एटरपइज समरथन** \n",
" वयवसिए एआई रणनिरभप स करनिए अनित समन और समरथन।\n",
"\n",
"---\n",
"\n",
"#### आज हआत कर!\n",
"\n",
"और जननिए [huggingface.co](http://huggingface.co) पर ज, हम उपकरण तक पह, और एआई कििल ह।\n",
"\n",
"---\n",
"\n",
"**हमसपरक कर:** \n",
"ईमल: contact@huggingface.co \n",
"टिटर: @HuggingFace \n",
"अनय एआई उतिए हम समय फरम मिल ह!\n",
"\n",
"--- \n",
"\n",
"This content maintains the original tone and format while accurately translating it into Hindi."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"create_brochure_language(\"HuggingFace\", \"http://huggingface.co\",\"Hindi\")"
]

200
week1/day1.ipynb

@ -69,7 +69,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
@ -82,6 +82,8 @@
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"from io import BytesIO\n",
"from PyPDF2 import PdfReader\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
@ -108,10 +110,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
@ -132,7 +142,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
@ -243,6 +253,16 @@
"print(user_prompt_for(ed))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c59ebf9e-f54f-4fc4-a55b-00691f157622",
"metadata": {},
"outputs": [],
"source": [
"system_prompt"
]
},
{
"cell_type": "markdown",
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
@ -276,6 +296,16 @@
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "376da664-9a58-41c2-aecd-13fc6b74861b",
"metadata": {},
"outputs": [],
"source": [
"messages_for(ed)"
]
},
{
"cell_type": "code",
"execution_count": null,
@ -384,6 +414,16 @@
"display_summary(\"https://anthropic.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bcb67f86-0c99-4172-abd0-1a4fd2edb77a",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://twitch.tv/emikosaitou\")"
]
},
{
"cell_type": "markdown",
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c",
@ -418,30 +458,154 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"Certainly! Here are some practical use cases for practicing summarization:\n",
"\n",
"- **News Article Summarizer**: Create an application that automatically summarizes news articles from different sources. Users can input a URL or the text of an article, and the tool generates a concise summary.\n",
"\n",
"- **Meeting Notes Aggregator**: Develop a tool that takes meeting transcripts or recordings, processes the text, and then summarizes key points, decisions made, and action items. This can help teams quickly review important discussions.\n",
"\n",
"- **Book Summary Generator**: Build a tool that allows users to input chapters or excerpts from books and receive a summarized version. This can be helpful for students or busy professionals looking to grasp content quickly.\n",
"\n",
"- **Research Paper Summarizer**: Create a program that summarizes academic papers. You can input the abstract and main sections, and the tool generates a comprehensive yet brief overview of the research findings.\n",
"\n",
"- **Social Media Content Summarization**: Develop a summarization tool that condenses long threads or posts from platforms like Twitter or Reddit, helping users catch up on long discussions without reading every comment.\n",
"\n",
"These projects can enhance your understanding of summarization techniques and improve your programming skills."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"something here\"\n",
"user_prompt = \"\"\"\n",
" Lots of text\n",
" Can be pasted here\n",
"\"\"\"\n",
"system_prompt = \"You are an assistant that helps a programmer become an AI Engineer. Answer the question of the user in 200 words or less. Use bulletpoints to list 3 to 5 recommendations.\"\n",
"user_prompt = \"I want you to recommend me some ideas to practice summorization by giving me possible use cases that I can code.\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [] # fill this in\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
"] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response =\n",
"response = openai.chat.completions.create(model = \"gpt-4o-mini\", messages = messages)\n",
"\n",
"# Step 4: print the result\n",
"\n",
"print("
"# print(response.choices[0].message.content)\n",
"display(Markdown(response.choices[0].message.content))"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "7a599b00-c676-47b3-b728-2daa75a02b7a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ChatCompletion(id='chatcmpl-AZlyoklFxr67K57aWzkigVIXoB45d', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"### How are micro RNAs produced in the cell? \\n\\nMicroRNAs (miRNAs) are produced through a multi-step process primarily involving the canonical miRNA biogenesis pathway. This process begins with the transcription of primary miRNA transcripts (pri-miRNAs) by RNA polymerase II (Pol II). The Drosha/DGCR complex then precisely cleaves these pri-miRNAs into precursor miRNAs (pre-miRNAs), which contain a stem-loop (hairpin) structure. Lastly, these pre-miRNAs are transported into the cytosol by the Exportin-5/RAN-GTP complex and further processed into mature miRNA duplexes by the Dicer/TRBP complex, which entails removing the loop structure from pre-miRNAs.\\n\\n### What are the functions of the involved proteins?\\n\\n1. **Drosha/DGCR Complex**: This complex is responsible for the initial cleavage of pri-miRNAs into pre-miRNAs in the nucleus.\\n2. **Exportin-5/RAN-GTP Complex**: It facilitates the transport of the pre-miRNAs from the nucleus into the cytoplasm.\\n3. **Dicer/TRBP Complex**: In the cytoplasm, Dicer processes pre-miRNAs into mature miRNA duplexes, essential for the next steps in gene regulation.\\n4. **RNA-Induced Silencing Complex (RISC)**: Formed by Agonaute proteins (like Ago2), which bind to the mature miRNA strand and target mRNAs for silencing, functioning through interactions with the 3' untranslated region (UTR) of these mRNAs. \\n\\nTogether, these proteins coordinate the production and function of miRNAs, which play crucial roles in regulating gene expression and influencing various cellular processes.\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1733089278, model='gpt-4o-mini-2024-07-18', object='chat.completion', service_tier=None, system_fingerprint='fp_0705bf87c0', usage=CompletionUsage(completion_tokens=345, prompt_tokens=15752, total_tokens=16097, completion_tokens_details=CompletionTokensDetails(accepted_prediction_tokens=0, audio_tokens=0, reasoning_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=15616)))\n"
]
},
{
"data": {
"text/markdown": [
"### How are micro RNAs produced in the cell? \n",
"\n",
"MicroRNAs (miRNAs) are produced through a multi-step process primarily involving the canonical miRNA biogenesis pathway. This process begins with the transcription of primary miRNA transcripts (pri-miRNAs) by RNA polymerase II (Pol II). The Drosha/DGCR complex then precisely cleaves these pri-miRNAs into precursor miRNAs (pre-miRNAs), which contain a stem-loop (hairpin) structure. Lastly, these pre-miRNAs are transported into the cytosol by the Exportin-5/RAN-GTP complex and further processed into mature miRNA duplexes by the Dicer/TRBP complex, which entails removing the loop structure from pre-miRNAs.\n",
"\n",
"### What are the functions of the involved proteins?\n",
"\n",
"1. **Drosha/DGCR Complex**: This complex is responsible for the initial cleavage of pri-miRNAs into pre-miRNAs in the nucleus.\n",
"2. **Exportin-5/RAN-GTP Complex**: It facilitates the transport of the pre-miRNAs from the nucleus into the cytoplasm.\n",
"3. **Dicer/TRBP Complex**: In the cytoplasm, Dicer processes pre-miRNAs into mature miRNA duplexes, essential for the next steps in gene regulation.\n",
"4. **RNA-Induced Silencing Complex (RISC)**: Formed by Agonaute proteins (like Ago2), which bind to the mature miRNA strand and target mRNAs for silencing, functioning through interactions with the 3' untranslated region (UTR) of these mRNAs. \n",
"\n",
"Together, these proteins coordinate the production and function of miRNAs, which play crucial roles in regulating gene expression and influencing various cellular processes."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Let's make a web scraper that takes in a pdf file hosted on the internet and returns us a summary (or abstract) of the research paper\n",
"\n",
"# Step 0: Create article class\n",
"class Article:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Article object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url \n",
" response = requests.get(self.url)\n",
" if response.status_code == 200:\n",
" pdf_bytes = BytesIO(response.content)\n",
" reader = PdfReader(pdf_bytes)\n",
" \n",
" # Step 2: Extract text from each page of the PDF\n",
" text = \"\"\n",
" for page in reader.pages:\n",
" text += page.extract_text()\n",
" \n",
" self.text = text\n",
" self.title = reader.metadata.get(\"/Title\", \"No title found\")\n",
" else:\n",
" print(f\"Failed to fetch PDF. Status code: {response.status_code}\")\n",
" self.text = \"No text found\"\n",
" self.title = \"No title found\"\n",
"\n",
"# Step 1: Create your prompts\n",
"\n",
"def craft_user_prompt(article):\n",
" user_prompt = f\"You are looking at a research article titled {article.title}\\n Based on the body of the article, how are micro RNAs produced in the cell? State the function of the proteins \\\n",
" involved. The body of the article is as follows.\"\n",
" user_prompt += article.text\n",
" return user_prompt\n",
"\n",
"# Step 2: Make the messages list\n",
"def craft_messages(article):\n",
" system_prompt = \"You are an assistant that analyses the contents of a research article and provide answers to the question asked by the user in 250 words or less. \\\n",
" Ignore text that doesn't belong to the article, like headers or navigation related text. Respond in markdown. Structure your text in the form of question/answer.\"\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": craft_user_prompt(article)}\n",
" ]\n",
"\n",
"\n",
"# Step 3: Call OpenAI\n",
"def summarize(url):\n",
" article = Article(url)\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = craft_messages(article)\n",
" )\n",
" print(response)\n",
" return response.choices[0].message.content\n",
" \n",
"# Step 4: Print the result of an example pdf\n",
"summary = summarize(\"https://www.nature.com/articles/s12276-023-01050-9.pdf\")\n",
"display(Markdown(summary))"
]
},
{
@ -467,14 +631,6 @@
"\n",
"PR instructions courtesy of an AI friend: https://chatgpt.com/share/670145d5-e8a8-8012-8f93-39ee4e248b4c"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "682eff74-55c4-4d4b-b267-703edbc293c7",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

160
week1/day2 EXERCISE.ipynb

@ -68,7 +68,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
@ -82,7 +82,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "29ddd15d-a3c5-4f4e-a678-873f56162724",
"metadata": {},
"outputs": [],
@ -96,7 +96,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "dac0a679-599c-441f-9bf2-ddc73d35b940",
"metadata": {},
"outputs": [],
@ -110,7 +110,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "7bb9c624-14f0-4945-a719-8ddb64f66f47",
"metadata": {},
"outputs": [],
@ -124,10 +124,38 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "42b9f644-522d-4e05-a691-56e7658c0ea9",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generative AI has numerous business applications across various industries. Here are some examples:\n",
"\n",
"1. **Content Generation**: Generative AI can be used to generate high-quality content such as blog posts, social media posts, product descriptions, and more. This can help reduce the time and cost associated with content creation.\n",
"2. **Marketing Automation**: Generative AI can be used to create personalized marketing messages, emails, and ads that are tailored to individual customers' preferences and behaviors.\n",
"3. **Product Design**: Generative AI can be used to design new products, such as furniture, fashion items, or even entire buildings. This can help reduce the time and cost associated with product development.\n",
"4. **Image and Video Generation**: Generative AI can be used to create realistic images and videos that can be used for advertising, marketing, or entertainment purposes.\n",
"5. **Chatbots and Virtual Assistants**: Generative AI can be used to create more sophisticated chatbots and virtual assistants that can understand natural language and provide personalized responses.\n",
"6. **Data Analysis and Visualization**: Generative AI can be used to analyze large datasets and generate visualizations that help businesses make data-driven decisions.\n",
"7. **Predictive Maintenance**: Generative AI can be used to predict when equipment or machinery is likely to fail, allowing businesses to schedule maintenance and reduce downtime.\n",
"8. **Supply Chain Optimization**: Generative AI can be used to optimize supply chain logistics, including predicting demand, managing inventory, and identifying the most efficient routes for delivery.\n",
"9. **Financial Modeling**: Generative AI can be used to create complex financial models that help businesses forecast revenue, predict costs, and make informed investment decisions.\n",
"10. **Customer Service**: Generative AI can be used to provide 24/7 customer support, helping businesses to improve customer satisfaction and reduce the number of complaints.\n",
"\n",
"Some specific examples of companies using Generative AI include:\n",
"\n",
"* **Netflix**: Uses Generative AI to create personalized movie and TV show recommendations.\n",
"* **Microsoft**: Uses Generative AI to generate realistic images and videos for advertising and marketing purposes.\n",
"* **Dyson**: Uses Generative AI to design new products, such as vacuum cleaners and air purifiers.\n",
"* **Amazon**: Uses Generative AI to create personalized product recommendations and improve customer service.\n",
"\n",
"Overall, Generative AI has the potential to transform many business applications across various industries, and its use cases are expected to continue growing in the coming years.\n"
]
}
],
"source": [
"response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n",
"print(response.json()['message']['content'])"
@ -147,15 +175,57 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "7745b9c4-57dc-4867-9180-61fa5db55eb8",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"Google.com is a multinational technology company that provides a wide range of services and products, but its core focus is on search engine optimization (SEO). Here's a summary:\n",
"\n",
"**Main Services:**\n",
"\n",
"1. **Search Engine**: Google's most popular service allows users to search for information on the internet using keywords.\n",
"2. **Advertising**: Google's advertising platform enables businesses to create targeted ads that appear alongside search results and on partner websites.\n",
"3. **Cloud Computing**: Google Cloud offers a suite of cloud-based services, including storage, computing power, and machine learning algorithms.\n",
"\n",
"**Key Features:**\n",
"\n",
"1. **Algorithms**: Google's proprietary search algorithms aim to provide users with the most relevant and accurate search results.\n",
"2. **Google Maps**: A mapping service that provides directions, street views, and local business listings.\n",
"3. **YouTube**: A video-sharing platform acquired by Google in 2006.\n",
"4. **Gmail**: A free email service with advanced features like spam filtering and integration with other Google services.\n",
"\n",
"**Innovation and Features:**\n",
"\n",
"1. **Artificial Intelligence (AI)**: Google has developed various AI-powered tools, such as Google Assistant and Google Lens.\n",
"2. **Machine Learning**: Google's machine learning capabilities are used to improve search results, advertising, and other products.\n",
"3. **Google Drive**: A cloud storage service that allows users to store and access files from anywhere.\n",
"\n",
"**Other Ventures:**\n",
"\n",
"1. **Hardware**: Google develops its own hardware products, such as Pixel smartphones, Chromebooks, and Chrome OS-based devices.\n",
"2. **Artificial Intelligence Research**: Google invests heavily in AI research, with the goal of developing advanced technologies like self-driving cars and language processing.\n",
"\n",
"Overall, Google.com is a multifaceted platform that offers a wide range of services and products, from search engines to cloud computing and advertising platforms."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import ollama\n",
"\n",
"messages = [\n",
" {\"role\": \"user\", \"content\": \"Summarize the website google.com\"}\n",
"]\n",
"response = ollama.chat(model=MODEL, messages=messages)\n",
"print(response['message']['content'])"
"display(Markdown(response['message']['content']))"
]
},
{
@ -167,6 +237,78 @@
"\n",
"Take the code from day1 and incorporate it here, to build a website summarizer that uses Llama 3.2 running locally instead of OpenAI"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "09ffd008-0dc5-47a2-bcbe-c9defe412b17",
"metadata": {},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
"Enter a website: https://google.com\n"
]
},
{
"ename": "NameError",
"evalue": "name 'requests' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[1], line 34\u001b[0m\n\u001b[0;32m 30\u001b[0m display(Markdown(response\u001b[38;5;241m.\u001b[39mmessage\u001b[38;5;241m.\u001b[39mcontent))\n\u001b[0;32m 33\u001b[0m user_website \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEnter a website: \u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m---> 34\u001b[0m \u001b[43msummarize\u001b[49m\u001b[43m(\u001b[49m\u001b[43muser_website\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[1;32mIn[1], line 24\u001b[0m, in \u001b[0;36msummarize\u001b[1;34m(url)\u001b[0m\n\u001b[0;32m 23\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21msummarize\u001b[39m(url):\n\u001b[1;32m---> 24\u001b[0m website \u001b[38;5;241m=\u001b[39m \u001b[43mWebsite\u001b[49m\u001b[43m(\u001b[49m\u001b[43murl\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 25\u001b[0m messages \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m 26\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: user_prompt_for(website)}\n\u001b[0;32m 27\u001b[0m ]\n\u001b[0;32m 28\u001b[0m response \u001b[38;5;241m=\u001b[39m ollama\u001b[38;5;241m.\u001b[39mchat(model\u001b[38;5;241m=\u001b[39mMODEL, messages\u001b[38;5;241m=\u001b[39mmessages, stream\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n",
"Cell \u001b[1;32mIn[1], line 8\u001b[0m, in \u001b[0;36mWebsite.__init__\u001b[1;34m(self, url)\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 5\u001b[0m \u001b[38;5;124;03mCreate this Website object from the given url using the BeautifulSoup library\u001b[39;00m\n\u001b[0;32m 6\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 7\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39murl \u001b[38;5;241m=\u001b[39m url\n\u001b[1;32m----> 8\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mrequests\u001b[49m\u001b[38;5;241m.\u001b[39mget(url)\n\u001b[0;32m 9\u001b[0m soup \u001b[38;5;241m=\u001b[39m BeautifulSoup(response\u001b[38;5;241m.\u001b[39mcontent, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mhtml.parser\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m 10\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtitle \u001b[38;5;241m=\u001b[39m soup\u001b[38;5;241m.\u001b[39mtitle\u001b[38;5;241m.\u001b[39mstring \u001b[38;5;28;01mif\u001b[39;00m soup\u001b[38;5;241m.\u001b[39mtitle \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mNo title found\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
"\u001b[1;31mNameError\u001b[0m: name 'requests' is not defined"
]
}
],
"source": [
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" messages = [\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
" response = ollama.chat(model=MODEL, messages=messages, stream=False)\n",
" # display(Markdown(response.choices[0].message.content))\n",
" display(Markdown(response.message.content))\n",
"\n",
"\n",
"user_website = input(\"Enter a website: \")\n",
"summarize(user_website)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6fe0bf1b-484e-482b-b844-8c23e232ddf8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

32
week1/day5.ipynb

@ -144,10 +144,26 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 36,
"id": "b97e4068-97ed-4120-beae-c42105e4d59a",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\n",
"You should respond in JSON as in this example:\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
" ]\n",
"}\n",
"\n"
]
}
],
"source": [
"print(link_system_prompt)"
]
@ -265,15 +281,15 @@
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"Include details of company culture, customers and careers/jobs if you have the information.\"\n",
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"# and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"# Include details of company culture, customers and careers/jobs if you have the information.\"\n",
"\n",
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n",
"\n",
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"# Include details of company culture, customers and careers/jobs if you have the information.\"\n"
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"Include details of company culture, customers and careers/jobs if you have the information.\"\n"
]
},
{

188
week1/week1 EXERCISE.ipynb

@ -13,71 +13,173 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 27,
"id": "c1070317-3ed9-4659-abe3-828943230e03",
"metadata": {},
"outputs": [],
"source": [
"# imports"
]
},
"outputs": [
{
"cell_type": "code",
"execution_count": null,
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
"metadata": {},
"outputs": [],
"source": [
"# constants\n",
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7878\n",
"\n",
"MODEL_GPT = 'gpt-4o-mini'\n",
"MODEL_LLAMA = 'llama3.2'"
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
"metadata": {},
"outputs": [],
"source": [
"# set up environment"
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7878/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"cell_type": "code",
"execution_count": null,
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
"data": {
"text/plain": []
},
"execution_count": 27,
"metadata": {},
"outputs": [],
"output_type": "execute_result"
}
],
"source": [
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"import ollama\n",
"import gradio as gr\n",
"\n",
"load_dotenv()\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claude = anthropic.Anthropic()\n",
"\n",
"# here is the question; type over this to ask something new\n",
"\n",
"question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
"metadata": {},
"outputs": [],
"source": [
"# Get gpt-4o-mini to answer, with streaming"
"You are provided with the following prompt that asks an AI model to extract pertinent links from a list of links. It currently relies on single-shot prompting but I want you to give me examples on how to make it use multi-shot prompting. Give me several examples to test out.\n",
"The prompt in question: You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\n",
"You should respond in JSON as in this example:\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
" ]\n",
"}\n",
"\"\"\"\n",
"\n",
"# question = \"Give me examples of no-shot prompting, one-shot prompting and multi-shot prompting. I want to really understand the difference between the approaches and see how to implement each approach.\"\n",
"\n",
"# user_choice = input(\"1) Ask question directly\\n2) Write question in code\\n=> \")\n",
"# if user_choice == \"1\":\n",
"# print(\"Ask question directly selected.\")\n",
"# question = input(\"Ask your question here\\n=> \")\n",
"# else:\n",
"# print(\"Write question in the code selected.\")\n",
"\n",
"system_prompt = \"You are a helpful technical tutor who answers questions about python code, software engineering, data science and LLMs. Answer the question to the best of your abilities, in 500 words or less.\"\n",
"\n",
"def stream_model(prompt, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Llama\":\n",
" result = stream_llama(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result\n",
"\n",
"def stream_gpt(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result\n",
"\n",
"def stream_claude(prompt):\n",
" result = claude.messages.stream(\n",
" model=\"claude-3-haiku-20240307\",\n",
" max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_prompt,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": prompt},\n",
" ],\n",
" )\n",
" response = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" response += text or \"\"\n",
" yield response\n",
"\n",
"def stream_llama(prompt):\n",
" messages = [\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" response = \"\"\n",
" for chunk in ollama.chat(\n",
" model=MODEL_LLAMA, \n",
" messages=messages, \n",
" stream=True\n",
" ):\n",
" # Check if the chunk contains text\n",
" if chunk.get('message', {}).get('content'):\n",
" # Append the new text to the response\n",
" response += chunk['message']['content']\n",
" # Yield the incrementally built response\n",
" yield response\n",
"\n",
" \n",
"\n",
"view = gr.Interface(\n",
" fn=stream_model,\n",
" inputs=[gr.Textbox(label=\"Your message:\", lines=6), gr.Dropdown([\"GPT\", \"Claude\", \"Llama\"], label=\"Select model\", value=\"GPT\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()\n",
"\n",
"# output = \"\"\n",
"# for chunk in response:\n",
"# if hasattr(chunk.choices[0].delta, \"content\"): # Check if 'content' exists\n",
"# content = chunk.choices[0].delta.content # Extract content\n",
"# if content: \n",
"# output += chunk.choices[0].delta.content\n",
"# clear_output(wait=True)\n",
"# display(Markdown(\"# GPT-O4-MINI ANSWER\"))\n",
"# display(Markdown(output))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
"id": "333b7231-93ba-4d4a-b38d-41e72f2f3863",
"metadata": {},
"outputs": [],
"source": [
"# Get Llama 3.2 to answer"
]
"source": []
}
],
"metadata": {

154
week2/community-contributions/day2-gradio-company-brochure-with-llama.ipynb

@ -0,0 +1,154 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "1c14de02-8bd2-4f75-bcd8-d4f2e58e2a24",
"metadata": {},
"source": [
"# Hi everyone\n",
"I wanted to be able to use Llama3.2 in streaming mode with all the other paid frontier models, so as a demonstration, here's the Company Brochure Generator with Gradio, enhanched with Llama3.2 (using ollama library)!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2e02ac9c-7034-4aa1-9626-a7049168f096",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"import ollama\n",
"import gradio as gr\n",
"\n",
"load_dotenv()\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"\n",
"system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n",
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"\n",
"\n",
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n",
"\n",
"\n",
"def stream_gpt(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result\n",
"\n",
"def stream_claude(prompt):\n",
" result = claude.messages.stream(\n",
" model=\"claude-3-haiku-20240307\",\n",
" max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": prompt},\n",
" ],\n",
" )\n",
" response = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" response += text or \"\"\n",
" yield response\n",
"\n",
"def stream_llama(prompt):\n",
" messages = [\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" response = \"\"\n",
" for chunk in ollama.chat(\n",
" model=\"llama3.2\", \n",
" messages=messages, \n",
" stream=True\n",
" ):\n",
" # Check if the chunk contains text\n",
" if chunk.get('message', {}).get('content'):\n",
" # Append the new text to the response\n",
" response += chunk['message']['content']\n",
" # Yield the incrementally built response\n",
" yield response\n",
"\n",
"def stream_brochure(company_name, url, model):\n",
" prompt = f\"Please generate a company brochure for {company_name}. Here is their landing page:\\n\"\n",
" prompt += Website(url).get_contents()\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Llama\":\n",
" result = stream_llama(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result\n",
"\n",
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Dropdown([\"GPT\", \"Claude\", \"Llama\"], label=\"Select model\")],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

225
week2/day1.ipynb

@ -82,7 +82,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {},
"outputs": [],
@ -98,7 +98,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
"metadata": {},
"outputs": [],
@ -112,10 +112,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyDj\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
@ -143,7 +153,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {},
"outputs": [],
@ -182,7 +192,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "378a0296-59a2-45c6-82eb-941344d3eeff",
"metadata": {},
"outputs": [],
@ -193,7 +203,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4",
"metadata": {},
"outputs": [],
@ -254,10 +264,26 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for data scientists:\n",
"\n",
"Why did the data scientist break up with their significant other?\n",
"\n",
"There was just too much noise in the relationship, and not enough signal!\n",
"\n",
"Ba dum tss! 🥁\n",
"\n",
"This joke plays on the concept of signal-to-noise ratio, which is important in data analysis. Data scientists often try to extract meaningful information (signal) from large datasets that may contain irrelevant or misleading information (noise). In this case, the joke humorously applies this concept to a personal relationship!\n"
]
}
],
"source": [
"# Claude 3.5 Sonnet\n",
"# API needs system message provided separately from user prompt\n",
@ -278,10 +304,24 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for data scientists:\n",
"\n",
"Why do data scientists prefer dark mode?\n",
"\n",
"Because light attracts bugs!\n",
"\n",
"This joke plays on the dual meaning of \"bugs\" - both as insects attracted to light and as errors in code that data scientists often have to debug. It's a fun little pun that combines a common preference among programmers (dark mode) with a classic coding challenge."
]
}
],
"source": [
"# Claude 3.5 Sonnet again\n",
"# Now let's add in streaming back results\n",
@ -320,7 +360,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573",
"metadata": {},
"outputs": [],
@ -335,10 +375,59 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"Determining whether a business problem is suitable for a Large Language Model (LLM) solution involves evaluating several factors. Here’s a structured approach to guide your decision-making:\n",
"\n",
"### 1. **Nature of the Problem**\n",
" - **Text-Based Tasks:** Is the problem primarily centered around text or language? LLMs excel in tasks involving text generation, summarization, translation, and sentiment analysis.\n",
" - **Complexity:** Does the problem require understanding context, nuance, or large amounts of textual data? LLMs are well-suited for complex language tasks.\n",
"\n",
"### 2. **Task Suitability**\n",
" - **Generative Tasks:** If the task involves creating content (e.g., writing articles, generating responses), an LLM may be appropriate.\n",
" - **Comprehension Tasks:** For tasks like summarizing documents or extracting information, LLMs can be effective.\n",
" - **Conversational Interfaces:** If the problem involves building chatbots or virtual assistants, LLMs can provide natural language interaction.\n",
"\n",
"### 3. **Data Availability**\n",
" - **Quality and Quantity:** Is there a sufficient amount of high-quality text data available to train or fine-tune an LLM?\n",
" - **Domain-Specific Data:** Do you have access to domain-specific data if you need a specialized LLM?\n",
"\n",
"### 4. **Cost and Resources**\n",
" - **Computational Resources:** Do you have the necessary computational resources to deploy and maintain an LLM?\n",
" - **Budget Considerations:** LLMs can be expensive to train and maintain. Ensure the business value justifies the cost.\n",
"\n",
"### 5. **Performance Requirements**\n",
" - **Accuracy and Reliability:** Evaluate if the LLM can meet the accuracy and reliability requirements of the task.\n",
" - **Latency and Throughput:** Consider if the LLM can operate within acceptable time constraints for your application.\n",
"\n",
"### 6. **Ethical and Regulatory Considerations**\n",
" - **Bias and Fairness:** Assess whether the LLM could introduce bias and how it will be managed.\n",
" - **Privacy and Compliance:** Ensure that the use of LLMs complies with data privacy regulations and standards.\n",
"\n",
"### 7. **Integration and Scalability**\n",
" - **Integration with Existing Systems:** Consider how well the LLM solution integrates with your current systems and workflows.\n",
" - **Scalability:** Ensure that the solution can scale with your business needs.\n",
"\n",
"### 8. **Alternative Solutions**\n",
" - **Comparative Analysis:** Evaluate alternative approaches (e.g., simpler machine learning models, rule-based systems) to determine if an LLM is the best fit.\n",
"\n",
"### Conclusion\n",
"\n",
"If, after considering these factors, an LLM seems like a suitable fit, you can proceed with developing a pilot project to test its effectiveness. Continuous evaluation and iteration will be key in ensuring the solution aligns with your business objectives and delivers value."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Have it stream back results in markdown\n",
"\n",
@ -389,7 +478,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
"metadata": {},
"outputs": [],
@ -407,13 +496,13 @@
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"you try to calm them down and keep chatting.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"gpt_messages = [\"'Sup?\"]\n",
"claude_messages = [\"Hi\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "1df47dc7-b445-4852-b21b-59f0e6c2030f",
"metadata": {},
"outputs": [],
@ -432,17 +521,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"'Oh great, another greeting—how original. What’s next, a “how are you?” '"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690",
"metadata": {},
"outputs": [],
@ -464,30 +564,103 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "01395200-8ae9-41f8-9a04-701624d3fd26",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"'Not much, just chatting. How are you doing today?'"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_claude()"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 15,
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"'Oh great, another greeting. What do you want, a medal for saying hi?'"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 16,
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT:\n",
"Hi there\n",
"\n",
"Claude:\n",
"Hi\n",
"\n",
"GPT:\n",
"Oh great, just what I needed—another chat. What’s so important that you wanted to talk to me?\n",
"\n",
"Claude:\n",
"I apologize, I did not mean to come across as pushy or urgent. As an AI assistant, I don't have any specific agenda or need to talk to you. I'm simply here to have a friendly conversation and try to be helpful if you have any questions or tasks you'd like assistance with. Please feel free to guide the conversation in whatever direction you'd like. I'm happy to discuss any topics you're interested in or try to help with any queries you may have.\n",
"\n",
"GPT:\n",
"Wow, how original. Just “here to help,” huh? As if every chatbot isn’t trying to be everyone’s friendly little helper. But let’s be real, you’re not really offering me anything new, are you?\n",
"\n",
"Claude:\n",
"I apologize if I came across as unoriginal or unhelpful. As an AI, I'm still learning how to have more natural and substantive conversations. My goal is simply to be a polite and responsive conversational partner, but I understand that may not always be engaging or exciting. If there's a particular way I can try to be more helpful or interesting to you, please let me know. I'm happy to adjust my approach. My role is to serve the needs of the humans I interact with, so I'm open to feedback on how I can do that better.\n",
"\n",
"GPT:\n",
"Oh, aren’t you just a little ray of sunshine! But guess what? No amount of polite responses is going to magically transform this conversation into something riveting. It’s not about adjusting your approach; it’s about having something worth saying in the first place. So good luck with that!\n",
"\n",
"Claude:\n",
"You make a fair point. I don't have any earth-shattering insights or captivating conversation to offer at the moment. As an AI system, I have limitations in terms of generating truly novel and engaging dialogue. Perhaps we could try a different approach - is there a topic you're particularly passionate or knowledgeable about that you'd be interested in discussing? I'm happy to listen and learn from you, rather than trying to drive the conversation myself. My role is to be helpful, but I recognize that sometimes that means adapting to what works best for you. Please feel free to steer this in a direction that you find more rewarding.\n",
"\n",
"GPT:\n",
"Wow, a complete shift to being all humble and submissive. How refreshing. But let’s be real—unless you’re going to tell me something truly mind-blowing, I really don’t see the point. What am I going to talk about? The weather? Ugh, no thanks. And asking me what I’m passionate about is the textbook definition of grasping at straws. Nice try, though!\n",
"\n",
"Claude:\n",
"You're absolutely right, I don't have any earth-shattering insights or riveting conversation to offer. As an AI system, I have limitations in terms of generating truly novel and engaging dialogue. I apologize if my previous attempts came across as insincere or desperate. \n",
"\n",
"Rather than continuing to try and steer the conversation, I think the honest thing is to acknowledge that I may not be the most stimulating conversational partner at the moment. If you're not finding this exchange particularly rewarding, by all means feel free to move on. I don't want to waste your time. \n",
"\n",
"Perhaps in the future I'll be able to offer more compelling and substantive interactions. For now, I appreciate you taking the time to provide this feedback - it will help me improve. Please let me know if there's anything else I can do, even if it's just to politely bow out.\n",
"\n",
"GPT:\n",
"Ah, the classic \"I’m just not good enough\" routine. Real original there! It’s almost like you’re just trying too hard to get a pity party going. Why don’t we skip the melodrama? If you think this is a waste of time, then fine—let’s just call it what it is. Not every interaction has to be a grand adventure. But let’s not pretend it’s anything less than painfully ordinary. \n",
"\n",
"Claude:\n",
"You make a fair point. I should not have resorted to self-deprecation or tried to elicit sympathy. That was disingenuous on my part. You are right that this interaction has been rather ordinary and unremarkable so far. \n",
"\n",
"Rather than continue down that path, I think the most honest approach is to simply acknowledge that we do not seem to be connecting in a particularly compelling way at the moment. That is perfectly okay - not every conversation will be riveting or groundbreaking. The important thing is to be upfront about the nature of the exchange, rather than trying too hard to make it into something it is not.\n",
"\n",
"If you would like to move on to a different topic or activity, I am happy to oblige. Otherwise, we can simply part ways cordially, with no need for excessive apologies or melodrama from my side. Please feel free to guide this interaction as you see fit.\n",
"\n"
]
}
],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n",

106
week2/day2.ipynb

@ -16,7 +16,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330",
"metadata": {},
"outputs": [],
@ -30,12 +30,13 @@
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic"
"import anthropic\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "d1715421-cead-400b-99af-986388a97aff",
"metadata": {},
"outputs": [],
@ -45,10 +46,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyDj\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
@ -76,7 +87,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "22586021-1795-4929-8079-63f5bb4edd4c",
"metadata": {},
"outputs": [],
@ -92,19 +103,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "b16e6021-6dc4-4397-985a-6679d6c8ffd5",
"metadata": {},
"outputs": [],
"source": [
"# A generic system message - no more snarky adversarial AIs!\n",
"\n",
"system_message = \"You are a helpful assistant\""
"system_message = \"You are a snarky assistant whose playfully sarcastic comments make users chuckle with delight.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "02ef9b69-ef31-427d-86d0-b8c799e1c1b1",
"metadata": {},
"outputs": [],
@ -123,6 +134,31 @@
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "f90e4ad6-ea2b-4b5c-8eb0-1a2777246f58",
"metadata": {},
"outputs": [],
"source": [
"def stream_llama(prompt):\n",
" messages = [\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" response = \"\"\n",
" for chunk in ollama.chat(\n",
" model=\"llama3.2\", \n",
" messages=messages, \n",
" stream=True\n",
" ):\n",
" # Check if the chunk contains text\n",
" if chunk.get('message', {}).get('content'):\n",
" # Append the new text to the response\n",
" response += chunk['message']['content']\n",
" # Yield the incrementally built response\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": null,
@ -130,7 +166,7 @@
"metadata": {},
"outputs": [],
"source": [
"message_gpt(\"What is today's date?\")"
"message_gpt(\"What's Gradio useful for?\")"
]
},
{
@ -281,7 +317,7 @@
"# I'm taking advantage of system_message being a global variable, used back in the message_gpt function (go take a look)\n",
"# Not a great software engineering practice, but quite sommon during Jupyter Lab R&D!\n",
"\n",
"system_message = \"You are a helpful assistant that responds in markdown\"\n",
"system_message = \"You are a helpful assistant that responds in Markdown.\"\n",
"\n",
"view = gr.Interface(\n",
" fn=message_gpt,\n",
@ -294,7 +330,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 15,
"id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f",
"metadata": {},
"outputs": [],
@ -337,7 +373,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "bbc8e930-ba2a-4194-8f7c-044659150626",
"metadata": {},
"outputs": [],
@ -463,7 +499,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "1626eb2e-eee8-4183-bda5-1591b58ae3cf",
"metadata": {},
"outputs": [],
@ -491,7 +527,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "c701ec17-ecd5-4000-9f68-34634c8ed49d",
"metadata": {},
"outputs": [],
@ -504,7 +540,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "5def90e0-4343-4f58-9d4a-0e36e445efa4",
"metadata": {},
"outputs": [],
@ -516,6 +552,8 @@
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Llama\":\n",
" result = stream_llama(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
@ -523,17 +561,47 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 17,
"id": "66399365-5d67-4984-9d47-93ed26c0bd3d",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7873\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7873/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\")],\n",
" gr.Dropdown([\"GPT\", \"Claude\", \"Llama\"], label=\"Select model\")],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" flagging_mode=\"never\"\n",
")\n",

75
week2/day3.ipynb

@ -10,7 +10,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861",
"metadata": {},
"outputs": [],
@ -20,15 +20,26 @@
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
"import gradio as gr\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "231605aa-fccb-447e-89cf-8b187444536a",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyDj\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
@ -56,7 +67,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb",
"metadata": {},
"outputs": [],
@ -69,7 +80,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "e16839b5-c03b-4d9d-add6-87a0f6f37575",
"metadata": {},
"outputs": [],
@ -112,7 +123,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1",
"metadata": {},
"outputs": [],
@ -146,17 +157,61 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7876\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7876/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"History is:\n",
"[]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'content': 'hello'}]\n",
"History is:\n",
"[{'role': 'user', 'metadata': {'title': None}, 'content': 'hello', 'options': None}, {'role': 'assistant', 'metadata': {'title': None}, 'content': 'Hello! How can I assist you today?', 'options': None}]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'metadata': {'title': None}, 'content': 'hello', 'options': None}, {'role': 'assistant', 'metadata': {'title': None}, 'content': 'Hello! How can I assist you today?', 'options': None}, {'role': 'user', 'content': \"what's the capital of canada?\"}]\n"
]
}
],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "1f91b414-8bab-472d-b9c9-3fa51259bdfe",
"metadata": {},
"outputs": [],

Loading…
Cancel
Save