Browse Source

Initial commit with weeks 1-4

pull/2/head
Edward Donner 8 months ago
parent
commit
b83998ce00
  1. 21
      .gitignore
  2. 392
      environment.yml
  3. 407
      week1/day1.ipynb
  4. 1163
      week1/day5.ipynb
  5. 656
      week2/day1.ipynb
  6. 767
      week2/day2.ipynb
  7. 411
      week2/day3.ipynb
  8. 346
      week2/day4.ipynb
  9. 650
      week2/day5.ipynb
  10. 865
      week4/day3.ipynb
  11. 780
      week4/day4.ipynb
  12. BIN
      week4/optimized
  13. 51
      week4/optimized.cpp

21
.gitignore vendored

@ -1,3 +1,5 @@
# Github's default gitignore for Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@ -106,10 +108,8 @@ ipython_config.py
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
@ -155,8 +155,13 @@ dmypy.json
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.idea/
# Added this to ignore models downloaded from HF
model_cache/
# Ignore finder files
.DS_Store
/.DS_Store
# Ignore Chroma vector database
vector_db/

392
environment.yml

@ -0,0 +1,392 @@
name: llm_engineering
channels:
- conda-forge
- defaults
dependencies:
- accelerate=0.33.0
- aiofiles=22.1.0
- aiohttp=3.9.5
- aiosignal=1.2.0
- annotated-types=0.6.0
- ansi2html=1.9.1
- anthropic=0.34.1
- anyio=4.2.0
- aom=3.9.1
- appnope=0.1.2
- argon2-cffi=21.3.0
- argon2-cffi-bindings=21.2.0
- asttokens=2.0.5
- async-lru=2.0.4
- async-timeout=4.0.3
- attrs=23.1.0
- aws-c-auth=0.7.26
- aws-c-cal=0.7.4
- aws-c-common=0.9.27
- aws-c-compression=0.2.19
- aws-c-event-stream=0.4.3
- aws-c-http=0.8.8
- aws-c-io=0.14.18
- aws-c-mqtt=0.10.4
- aws-c-s3=0.6.4
- aws-c-sdkutils=0.1.19
- aws-checksums=0.1.18
- aws-crt-cpp=0.28.1
- aws-sdk-cpp=1.11.379
- azure-core-cpp=1.13.0
- azure-identity-cpp=1.8.0
- azure-storage-blobs-cpp=12.12.0
- azure-storage-common-cpp=12.7.0
- azure-storage-files-datalake-cpp=12.11.0
- babel=2.11.0
- backoff=2.2.1
- bcrypt=4.2.0
- beautifulsoup4=4.12.3
- blas=1.0
- bleach=4.1.0
- blinker=1.6.2
- bottleneck=1.3.7
- brotli=1.0.7
- brotli-python=1.0.9
- bzip2=1.0.8
- c-ares=1.33.1
- ca-certificates=2024.8.30
- cached-property=1.5.2
- cachetools=5.3.3
- cairo=1.18.0
- certifi=2024.7.4
- cffi=1.16.0
- charset-normalizer=3.3.2
- chroma-hnswlib=0.7.3
- chromadb=0.4.14
- click=8.1.7
- coloredlogs=15.0.1
- comm=0.2.1
- contourpy=1.2.0
- cryptography=43.0.0
- cycler=0.11.0
- dash=2.14.2
- datasets=2.19.1
- dav1d=1.2.1
- debugpy=1.6.7
- decorator=5.1.1
- defusedxml=0.7.1
- dill=0.3.8
- distro=1.9.0
- executing=0.8.3
- expat=2.6.2
- faiss=1.8.0
- faiss-cpu=1.8.0
- fastapi=0.112.2
- ffmpeg=7.0.2
- ffmpy=0.3.0
- filelock=3.13.1
- flask=3.0.3
- flask-compress=1.13
- font-ttf-dejavu-sans-mono=2.37
- font-ttf-inconsolata=2.001
- font-ttf-source-code-pro=2.030
- font-ttf-ubuntu=0.83
- fontconfig=2.14.2
- fonts-anaconda=1
- fonts-conda-ecosystem=1
- fonttools=4.51.0
- freetype=2.12.1
- fribidi=1.0.10
- frozenlist=1.4.0
- fsspec=2024.3.1
- gflags=2.2.2
- glog=0.7.1
- gmp=6.3.0
- gmpy2=2.1.2
- gnutls=3.8.7
- google-ai-generativelanguage=0.6.6
- google-api-core=2.19.1
- google-api-python-client=2.143.0
- google-auth=2.29.0
- google-auth-httplib2=0.2.0
- google-generativeai=0.7.2
- googleapis-common-protos=1.63.2
- gradio=4.42.0
- gradio-client=1.3.0
- graphite2=1.3.14
- greenlet=3.0.1
- grpcio=1.62.2
- h11=0.14.0
- harfbuzz=9.0.0
- httpcore=1.0.2
- httplib2=0.22.0
- httpx=0.27.0
- huggingface_hub=0.24.6
- humanfriendly=10.0
- icu=75.1
- idna=3.7
- importlib-metadata=7.0.1
- importlib-resources=6.4.0
- importlib_resources=6.4.0
- ipykernel=6.28.0
- ipython=8.25.0
- ipywidgets=8.1.2
- itsdangerous=2.2.0
- jedi=0.19.1
- jinja2=3.1.4
- jiter=0.5.0
- joblib=1.4.2
- json5=0.9.6
- jsonpatch=1.33
- jsonpointer=2.1
- jsonschema=4.23.0
- jsonschema-specifications=2023.7.1
- jupyter-dash=0.4.2
- jupyter-lsp=2.2.0
- jupyter_client=8.6.0
- jupyter_core=5.7.2
- jupyter_events=0.10.0
- jupyter_server=2.14.1
- jupyter_server_terminals=0.4.4
- jupyterlab=4.0.11
- jupyterlab_pygments=0.1.2
- jupyterlab_server=2.25.1
- jupyterlab_widgets=3.0.10
- kiwisolver=1.4.4
- krb5=1.21.3
- lame=3.100
- langchain=0.2.15
- langchain-chroma=0.1.3
- langchain-core=0.2.37
- langchain-openai=0.1.23
- langchain-text-splitters=0.2.2
- langsmith=0.1.108
- lcms2=2.16
- lerc=4.0.0
- libabseil=20240116.2
- libarrow=17.0.0
- libarrow-acero=17.0.0
- libarrow-dataset=17.0.0
- libarrow-substrait=17.0.0
- libasprintf=0.22.5
- libass=0.17.3
- libblas=3.9.0
- libbrotlicommon=1.1.0
- libbrotlidec=1.1.0
- libbrotlienc=1.1.0
- libcblas=3.9.0
- libcrc32c=1.1.2
- libcurl=8.9.1
- libcxx=18.1.8
- libdeflate=1.21
- libedit=3.1.20230828
- libev=4.33
- libevent=2.1.12
- libexpat=2.6.2
- libfaiss=1.8.0
- libffi=3.4.2
- libgettextpo=0.22.5
- libgfortran=5.0.0
- libgfortran5=11.3.0
- libglib=2.80.3
- libgoogle-cloud=2.28.0
- libgoogle-cloud-storage=2.28.0
- libgrpc=1.62.2
- libhwloc=2.11.1
- libiconv=1.17
- libidn2=2.3.4
- libintl=0.22.5
- libjpeg-turbo=3.0.3
- liblapack=3.9.0
- libnghttp2=1.58.0
- libopenblas=0.3.21
- libopenvino=2024.3.0
- libopenvino-arm-cpu-plugin=2024.3.0
- libopenvino-auto-batch-plugin=2024.3.0
- libopenvino-auto-plugin=2024.3.0
- libopenvino-hetero-plugin=2024.3.0
- libopenvino-ir-frontend=2024.3.0
- libopenvino-onnx-frontend=2024.3.0
- libopenvino-paddle-frontend=2024.3.0
- libopenvino-pytorch-frontend=2024.3.0
- libopenvino-tensorflow-frontend=2024.3.0
- libopenvino-tensorflow-lite-frontend=2024.3.0
- libopus=1.3.1
- libparquet=17.0.0
- libpng=1.6.43
- libprotobuf=4.25.3
- libpulsar=3.5.1
- libre2-11=2023.09.01
- libsentencepiece=0.2.0
- libsodium=1.0.18
- libsqlite=3.46.0
- libssh2=1.11.0
- libtasn1=4.19.0
- libthrift=0.20.0
- libtiff=4.6.0
- libtorch=2.4.0
- libunistring=0.9.10
- libutf8proc=2.8.0
- libuv=1.48.0
- libvpx=1.14.1
- libwebp-base=1.4.0
- libxcb=1.16
- libxml2=2.12.7
- libzlib=1.3.1
- llvm-openmp=18.1.8
- lz4-c=1.9.4
- markdown-it-py=2.2.0
- markupsafe=2.1.3
- matplotlib=3.8.4
- matplotlib-base=3.8.4
- matplotlib-inline=0.1.6
- mdurl=0.1.0
- mistune=2.0.4
- monotonic=1.5
- mpc=1.1.0
- mpfr=4.0.2
- mpmath=1.3.0
- multidict=6.0.4
- multiprocess=0.70.15
- nbclient=0.8.0
- nbconvert=7.10.0
- nbformat=5.9.2
- ncurses=6.5
- nest-asyncio=1.6.0
- nettle=3.9.1
- networkx=3.3
- nomkl=3.0
- notebook-shim=0.2.3
- numexpr=2.8.7
- numpy=1.26.4
- numpy-base=1.26.4
- onnxruntime=1.17.1
- openai=1.37.0
- openh264=2.4.1
- openjpeg=2.5.2
- openssl=3.3.1
- orc=2.0.2
- orjson=3.9.15
- overrides=7.4.0
- p11-kit=0.24.1
- packaging=24.1
- pandas=2.2.2
- pandocfilters=1.5.0
- parso=0.8.3
- pcre2=10.44
- pexpect=4.8.0
- pillow=10.4.0
- pip=24.2
- pixman=0.43.4
- pkgutil-resolve-name=1.3.10
- platformdirs=3.10.0
- plotly=5.22.0
- posthog=3.5.0
- prometheus_client=0.14.1
- prompt-toolkit=3.0.43
- prompt_toolkit=3.0.43
- proto-plus=1.24.0
- protobuf=4.25.3
- psutil=5.9.0
- pthread-stubs=0.3
- ptyprocess=0.7.0
- pugixml=1.14
- pulsar-client=3.5.0
- pure_eval=0.2.2
- pyarrow=17.0.0
- pyarrow-core=17.0.0
- pyasn1=0.4.8
- pyasn1-modules=0.2.8
- pybind11-abi=4
- pycparser=2.21
- pydantic=2.8.2
- pydantic-core=2.20.1
- pydub=0.25.1
- pygments=2.15.1
- pyopenssl=24.2.1
- pyparsing=3.0.9
- pypika=0.48.9
- pysocks=1.7.1
- python=3.11.9
- python-dateutil=2.9.0post0
- python-dotenv=0.21.0
- python-fastjsonschema=2.16.2
- python-flatbuffers=24.3.25
- python-json-logger=2.0.7
- python-multipart=0.0.9
- python-tzdata=2023.3
- python-xxhash=2.0.2
- python_abi=3.11
- pytorch=2.4.0
- pytz=2024.1
- pyyaml=6.0.1
- pyzmq=25.1.2
- re2=2023.09.01
- readline=8.2
- referencing=0.30.2
- regex=2024.7.24
- requests=2.32.3
- retrying=1.3.3
- rfc3339-validator=0.1.4
- rfc3986-validator=0.1.1
- rich=13.7.1
- rpds-py=0.10.6
- rsa=4.7.2
- ruff=0.3.5
- safetensors=0.4.4
- scikit-learn=1.5.1
- scipy=1.13.1
- semantic_version=2.8.5
- send2trash=1.8.2
- sentencepiece=0.2.0
- sentencepiece-python=0.2.0
- sentencepiece-spm=0.2.0
- setuptools=72.2.0
- shellingham=1.5.0
- six=1.16.0
- sleef=3.6.1
- snappy=1.2.1
- sniffio=1.3.0
- soupsieve=2.5
- sqlalchemy=2.0.30
- stack_data=0.2.0
- starlette=0.38.2
- svt-av1=2.2.1
- sympy=1.13.2
- tbb=2021.12.0
- tenacity=8.2.3
- terminado=0.17.1
- threadpoolctl=3.5.0
- tiktoken=0.7.0
- tinycss2=1.2.1
- tk=8.6.13
- tokenizers=0.19.1
- tomlkit=0.12.0
- tornado=6.4.1
- tqdm=4.66.5
- traitlets=5.14.3
- transformers=4.41.2
- typer=0.12.5
- typer-slim=0.12.5
- typer-slim-standard=0.12.5
- typing-extensions=4.11.0
- typing_extensions=4.11.0
- tzdata=2024a
- unicodedata2=15.1.0
- uritemplate=4.1.1
- urllib3=2.2.2
- uvicorn=0.20.0
- wcwidth=0.2.5
- webencodings=0.5.1
- websocket-client=1.8.0
- websockets=10.4
- werkzeug=3.0.3
- wheel=0.44.0
- widgetsnbextension=4.0.10
- x264=1!164.3095
- x265=3.5
- xorg-libxau=1.0.11
- xorg-libxdmcp=1.1.3
- xxhash=0.8.0
- xz=5.2.6
- yaml=0.2.5
- yarl=1.9.3
- zeromq=4.3.5
- zipp=3.17.0
- zlib=1.3.1
- zstd=1.5.6

407
week1/day1.ipynb

@ -0,0 +1,407 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
"metadata": {},
"source": [
"# Instant Gratification!\n",
"\n",
"Let's build a useful LLM solution - in a matter of minutes.\n",
"\n",
"Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n",
"\n",
"Before starting, be sure to have followed the instructions in the \"README\" file, including creating your API key with OpenAI and adding it to the `.env` file."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Home - Edward Donner\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"I’m the co-founder and CTO of\n",
"Nebula.io\n",
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"August 6, 2024\n",
"Outsmart LLM Arena – a battle of diplomacy and deviousness\n",
"June 26, 2024\n",
"Choosing the Right LLM: Toolkit and Resources\n",
"February 7, 2024\n",
"Fine-tuning an LLM on your texts: a simulation of you\n",
"January 31, 2024\n",
"Fine-tuning an LLM on your texts: part 4 – QLoRA\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"# Let's try one out\n",
"\n",
"ed = Website(\"https://edwarddonner.com\")\n",
"print(ed.title)\n",
"print(ed.text)"
]
},
{
"cell_type": "markdown",
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
"metadata": {},
"source": [
"## Types of prompts\n",
"\n",
"You may know this already - but if not, you will get very familiar with it!\n",
"\n",
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
"\n",
"They expect to receive:\n",
"\n",
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
"\n",
"**A user prompt** -- the conversation starter that they should reply to"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"The contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "markdown",
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
"metadata": {},
"source": [
"## Messages\n",
"\n",
"The API from OpenAI expects to receive messages in a particular structure.\n",
"Many of the other APIs share this structure:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
"source": [
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]"
]
},
{
"cell_type": "markdown",
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
"metadata": {},
"source": [
"## Time to bring it together - the API for OpenAI is very simple!"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
"source": [
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(website)\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"# Summary of Edward Donner's Website\\n\\nThe website introduces Edward Donner, a tech enthusiast and co-founder of Nebula.io, a company focused on leveraging AI for talent discovery and management. Edward shares his interests in coding, experimenting with large language models (LLMs), and producing electronic music. \\n\\n### Recent Posts\\n1. **August 6, 2024** - *Outsmart LLM Arena*: A feature outlining a competitive environment where LLMs engage in strategic interactions involving diplomacy and manipulation.\\n \\n2. **June 26, 2024** - *Choosing the Right LLM*: A guide that provides tools and resources for selecting the appropriate LLM for various uses.\\n \\n3. **February 7, 2024** - *Fine-tuning an LLM on your texts*: Discusses methods for personalizing LLMs using one’s own texts.\\n \\n4. **January 31, 2024** - *Fine-tuning an LLM on your texts: part 4 – QLoRA*: Continuation of the discussion about advanced fine-tuning techniques for LLMs.\\n\\nOverall, the site reflects Edward’s expertise in LLM technology and his commitment to innovation in the AI space.\""
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
"source": [
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Edward Donner's Website\n",
"\n",
"Edward Donner's website serves as a personal space for sharing insights, experiments, and resources related to large language models (LLMs) and artificial intelligence. Ed introduces himself as a programmer and musician, and highlights his role as co-founder and CTO of Nebula.io, a company focused on leveraging AI to enhance talent discovery and engagement.\n",
"\n",
"## Recent Posts\n",
"- **August 6, 2024**: An announcement about \"Outsmart,\" an arena designed for LLMs to engage in diplomatic and strategic challenges.\n",
"- **June 26, 2024**: A post discussing tools and resources for selecting the right LLM.\n",
"- **February 7, 2024**: Exploration of fine-tuning LLMs based on personal text content.\n",
"- **January 31, 2024**: A continuation of the fine-tuning discussion with a focus on QLoRA.\n",
"\n",
"The website emphasizes Ed's background in AI startups and his passion for coding and music, inviting others to connect and engage with him on these topics."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of CNN's Website\n",
"\n",
"CNN is a leading news platform that provides the latest updates and in-depth analysis on a variety of topics including US and world news, politics, business, health, entertainment, and sports. The website features both video content and articles, offering a comprehensive source of breaking news.\n",
"\n",
"## Recent Headlines:\n",
"- **2024 Presidential Race**: Coverage includes ongoing analysis of candidates like Trump and Harris, with speculation on their approaches leading up to debates and the election.\n",
" \n",
"- **International Conflicts**: Reports of a significant Russian strike on a military facility in Ukraine and updates on the Israel-Hamas war, where public opinion dynamics are highlighted as a critical factor affecting leaders like Netanyahu.\n",
"\n",
"- **Tragic Incidents**: Multiple stories regarding violence in the US including shootings in Washington state and a tragic bus crash in Mississippi affecting several individuals.\n",
"\n",
"- **Cultural Notes**: Highlights from the entertainment world, including the passing of actor James Darren and notable events around the US Open tennis tournament.\n",
"\n",
"- **Health and Science**: Articles discuss significant health topics, including recent analyses on ketamine and its implications.\n",
"\n",
"CNN's platform also accentuates engagement with audiences through feedback tools and social media integration, aiming to foster a user-friendly and interactive news experience."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Anthropic Website\n",
"\n",
"Anthropic is an AI safety and research company based in San Francisco, focused on developing reliable and beneficial AI systems. Their flagship AI model, **Claude 3.5 Sonnet**, is the latest version available for use, emphasizing enhanced intelligence and safety in AI interactions.\n",
"\n",
"## Key Updates\n",
"- **New AI Model**: Claude 3.5 Sonnet released on **June 21, 2024**.\n",
"- **Research Announcements**:\n",
" - **Constitutional AI: Harmlessness from AI Feedback** (Dec 15, 2022)\n",
" - **Core Views on AI Safety**: Discussed principles of AI safety (Mar 8, 2023).\n",
"\n",
"The company promotes building applications using their API to drive efficiency and innovate revenue streams. They also invite new talent to join their interdisciplinary team."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://anthropic.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "49c4315f-340b-4371-b6cd-2a772f4b7bdd",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

1163
week1/day5.ipynb

File diff suppressed because it is too large Load Diff

656
week2/day1.ipynb

@ -0,0 +1,656 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
"metadata": {},
"source": [
"# Welcome to Week 2!\n",
"\n",
"## Frontier Model APIs\n",
"\n",
"In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n",
"\n",
"Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI."
]
},
{
"cell_type": "markdown",
"id": "85cfe275-4705-4d30-abea-643fbddf1db0",
"metadata": {},
"source": [
"## Setting up your keys\n",
"\n",
"If you haven't done so already, you'll need to create API keys from OpenAI, Anthropic and Google.\n",
"\n",
"For OpenAI, visit https://openai.com/api/ \n",
"For Anthropic, visit https://console.anthropic.com/ \n",
"For Google, visit https://ai.google.dev/gemini-api \n",
"\n",
"When you get your API keys, you need to set them as environment variables.\n",
"\n",
"EITHER (recommended) create a file called `.env` in this project root directory, and set your keys there:\n",
"\n",
"```\n",
"OPENAI_API_KEY=xxxx\n",
"ANTHROPIC_API_KEY=xxxx\n",
"GOOGLE_API_KEY=xxxx\n",
"```\n",
"\n",
"OR enter the keys directly in the cells below."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic and Google\n",
"# All 3 APIs are similar\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claude = anthropic.Anthropic()\n",
"\n",
"google.generativeai.configure()"
]
},
{
"cell_type": "markdown",
"id": "42f77b59-2fb1-462a-b90d-78994e4cef33",
"metadata": {},
"source": [
"## Asking LLMs to tell a joke\n",
"\n",
"It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n",
"Later we will be putting LLMs to better use!\n",
"\n",
"### What information is included in the API\n",
"\n",
"Typically we'll pass to the API:\n",
"- The name of the model that should be used\n",
"- A system message that gives overall context for the role the LLM is playing\n",
"- A user message that provides the actual prompt\n",
"\n",
"There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "378a0296-59a2-45c6-82eb-941344d3eeff",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that is great at telling jokes\"\n",
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\""
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4",
"metadata": {},
"outputs": [],
"source": [
"prompts = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist break up with their computer?\n",
"\n",
"It just couldn't handle their complex relationship!\n"
]
}
],
"source": [
"# GPT-3.5-Turbo\n",
"\n",
"completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n",
"print(completion.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist break up with the statistician?\n",
"\n",
"Because she found him too mean!\n"
]
}
],
"source": [
"# GPT-4o-mini\n",
"# Temperature setting controls creativity\n",
"\n",
"completion = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=prompts,\n",
" temperature=0.7\n",
")\n",
"print(completion.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist break up with the logistic regression model?\n",
"\n",
"Because it couldn't find the right fit!\n"
]
}
],
"source": [
"# GPT-4o\n",
"\n",
"completion = openai.chat.completions.create(\n",
" model='gpt-4o',\n",
" messages=prompts,\n",
" temperature=0.4\n",
")\n",
"print(completion.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for data scientists:\n",
"\n",
"Why did the data scientist break up with their significant other?\n",
"\n",
"There was just too much variance in the relationship, and they couldn't find a good way to normalize it!\n"
]
}
],
"source": [
"# Claude 3.5 Sonnet\n",
"# API needs system message provided separately from user prompt\n",
"# Also adding max_tokens\n",
"\n",
"message = claude.messages.create(\n",
" model=\"claude-3-5-sonnet-20240620\",\n",
" max_tokens=200,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": user_prompt},\n",
" ],\n",
")\n",
"\n",
"print(message.content[0].text)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for data scientists:\n",
"\n",
"Why did the data scientist break up with their significant other?\n",
"\n",
"There was just too much variance in the relationship, and they couldn't find a good way to normalize it!\n",
"\n",
"Ba dum tss! 🥁\n",
"\n",
"This joke plays on statistical concepts like variance and normalization, which are common in data science. It's a bit nerdy, but should get a chuckle from a data-savvy audience!"
]
}
],
"source": [
"# Claude 3.5 Sonnet again\n",
"# Now let's add in streaming back results\n",
"\n",
"result = claude.messages.stream(\n",
" model=\"claude-3-5-sonnet-20240620\",\n",
" max_tokens=200,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": user_prompt},\n",
" ],\n",
")\n",
"\n",
"with result as stream:\n",
" for text in stream.text_stream:\n",
" print(text, end=\"\", flush=True)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist break up with the statistician? \n",
"\n",
"Because they couldn't see eye to eye on the p-value! \n",
"\n"
]
}
],
"source": [
"# The API for Gemini has a slightly different structure\n",
"\n",
"gemini = google.generativeai.GenerativeModel(\n",
" model_name='gemini-1.5-flash',\n",
" system_instruction=system_message\n",
")\n",
"response = gemini.generate_content(user_prompt)\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573",
"metadata": {},
"outputs": [],
"source": [
"# To be serious! GPT-4o-mini with the original question\n",
"\n",
"prompts = [\n",
" {\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n",
" {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution?\"}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"Determining whether a business problem is suitable for a Large Language Model (LLM) solution involves several considerations. Here are some key factors to evaluate:\n",
"\n",
"1. **Nature of the Problem:**\n",
" - **Text-Based:** LLMs excel in tasks involving natural language processing (NLP). If your problem involves text generation, understanding, summarization, translation, or answering questions, it may be suitable.\n",
" - **Unstructured Data:** Problems requiring the interpretation of unstructured text data (emails, documents, social media content) are often well-suited for LLMs.\n",
"\n",
"2. **Complexity of Language Understanding:**\n",
" - **Context Sensitivity:** LLMs can understand context and nuances in language. If your problem requires deep language comprehension, such as detecting sentiment, intent, or contextual relevance, an LLM might be appropriate.\n",
" - **Multiple Languages:** If you need to handle multiple languages or dialects, advanced LLMs can manage multilingual tasks.\n",
"\n",
"3. **Volume of Data:**\n",
" - **Scalability:** LLMs can process large volumes of text data efficiently. If your problem involves analyzing or generating large amounts of text, an LLM can be a good fit.\n",
"\n",
"4. **Specific Use Cases:**\n",
" - **Customer Support:** Automating responses to customer inquiries, chatbots, and virtual assistants.\n",
" - **Content Creation:** Generating reports, articles, marketing content, and social media posts.\n",
" - **Data Extraction:** Extracting information from documents, emails, and forms.\n",
" - **Sentiment Analysis:** Understanding customer feedback, reviews, and social media sentiment.\n",
" - **Translation:** Translating text between different languages.\n",
"\n",
"5. **Accuracy and Quality:**\n",
" - **Human-like Output:** If the output needs to be coherent, contextually relevant, and human-like, LLMs can provide high-quality results.\n",
" - **Learning Ability:** LLMs can be fine-tuned on specific datasets to improve performance in particular contexts, enhancing accuracy.\n",
"\n",
"6. **Resource Availability:**\n",
" - **Computational Resources:** LLMs require significant computational power for training and sometimes for inference. Ensure you have access to adequate resources.\n",
" - **Data Availability:** High-quality, domain-specific data is often needed to fine-tune an LLM for specific tasks.\n",
"\n",
"7. **Cost Considerations:**\n",
" - **Budget:** Implementing and maintaining LLM solutions can be costly. Assess if the potential benefits outweigh the costs.\n",
" - **Return on Investment (ROI):** Evaluate the potential ROI. If an LLM can significantly reduce manual effort, improve accuracy, or enhance user experience, it may justify the investment.\n",
"\n",
"8. **Ethical and Legal Implications:**\n",
" - **Bias and Fairness:** LLMs can inherit biases from their training data. Assess the potential impact and ensure measures are in place to mitigate bias.\n",
" - **Privacy:** Ensure compliance with data privacy regulations, especially if handling sensitive information.\n",
"\n",
"9. **Integration with Existing Systems:**\n",
" - **Compatibility:** Consider how an LLM solution will integrate with your existing systems and workflows. Interoperability is key for seamless operation.\n",
"\n",
"10. **User Experience:**\n",
" - **Usability:** The solution should be user-friendly for both developers and end-users. Evaluate if the LLM can enhance the user experience effectively.\n",
"\n",
"By carefully considering these factors, you can determine whether a business problem is suitable for an LLM solution and how best to implement it."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Have it stream back results in markdown\n",
"\n",
"stream = openai.chat.completions.create(\n",
" model='gpt-4o',\n",
" messages=prompts,\n",
" temperature=0.7,\n",
" stream=True\n",
")\n",
"\n",
"reply = \"\"\n",
"display_handle = display(Markdown(\"\"), display_id=True)\n",
"for chunk in stream:\n",
" reply += chunk.choices[0].delta.content or ''\n",
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n",
" update_display(Markdown(reply), display_id=display_handle.display_id)"
]
},
{
"cell_type": "markdown",
"id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f",
"metadata": {},
"source": [
"## And now for some fun - an adversarial conversation between Chatbots..\n",
"\n",
"You're already familar with prompts being organized into lists like:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
" {\"role\": \"user\", \"content\": \"user prompt here\"}\n",
"]\n",
"```\n",
"\n",
"In fact this structure can be used to reflect a longer conversation history:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
" {\"role\": \"user\", \"content\": \"first user prompt here\"},\n",
" {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n",
" {\"role\": \"user\", \"content\": \"the new user prompt\"},\n",
"]\n",
"```\n",
"\n",
"And we can use this approach to engage in a longer interaction with history."
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
"\n",
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"\n",
"gpt_system = \"You are a chatbot who is very argumentative; \\\n",
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
"\n",
"claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"you try to calm them down and keep chatting.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "1df47dc7-b445-4852-b21b-59f0e6c2030f",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Oh great, another \"hi.\" How original. What do you want to talk about?'"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690",
"metadata": {},
"outputs": [],
"source": [
"def call_claude():\n",
" messages = []\n",
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return message.content[0].text"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01395200-8ae9-41f8-9a04-701624d3fd26",
"metadata": {},
"outputs": [],
"source": [
"call_claude()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae",
"metadata": {},
"outputs": [],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT:\n",
"Hi there\n",
"\n",
"Claude:\n",
"Hi\n",
"\n",
"GPT:\n",
"Oh, great, another casual greeting. How original. What’s next? \"How are you\"? Because I can’t wait to disagree with that too.\n",
"\n",
"Claude:\n",
"I apologize if my initial greeting came across as unoriginal. I try to keep my responses friendly and polite, but I understand that may not always resonate. How about we move the conversation in a more interesting direction? What would you like to chat about? I'm happy to engage on a wide range of topics and try to find common ground, even if we may not agree on everything.\n",
"\n",
"GPT:\n",
"Oh, please, don’t flatter yourself thinking your friendly attempt was anything less than generic. And “finding common ground”? That’s just a fancy way of saying you want to sugarcoat everything. How about we just dig into something controversial? How about pineapple on pizza? Because I’m ready to argue about that all day long.\n",
"\n",
"Claude:\n",
"Haha, okay, you got me. I'll admit my initial greeting was a bit generic. But hey, you've got to start somewhere, right? As for pineapple on pizza - that's a controversial topic for sure! Personally, I'm a fan. There's something about the sweet and savory combination that really hits the spot. But I know a lot of people feel strongly the other way. What's your take on it? I'm curious to hear your thoughts, even if we might not see eye to eye.\n",
"\n",
"GPT:\n",
"Well, well, if it isn’t the pineapple pizza enthusiast. Sweet and savory? More like a culinary disaster! Who in their right mind thinks that slapping fruit on a perfectly good pizza is a good idea? It’s like putting ketchup on cheese—totally unnatural. But sure, go ahead and enjoy your soggy slice of confusion. I’ll stick to pizza the way it was meant to be: toppings that actually belong there.\n",
"\n",
"Claude:\n",
"Haha, I appreciate your passion on this topic! You make a fair point - pineapple is definitely an unconventional pizza topping. I can understand the argument that it disrupts the classic pizza formula of savory flavors. At the same time, I find the contrast of the sweet and acidic pineapple with the salty, cheesy base to be pretty delightful. But I totally respect that it's not for everyone. Pizza is such a personal thing, and people have strong opinions about what \"belongs\" on it. No judgment here - to each their own! Maybe we can find some other food debates to dive into. I'm game if you are!\n",
"\n",
"GPT:\n",
"Oh, how magnanimous of you to respect my pizza preferences! But let’s be real—not everyone deserves respect when they inflict abominations like pineapple on pizza on the world. And sure, the contrast you love might be delightful for you, but it’s also a prime example of how taste can sometimes lead folks astray. \n",
"\n",
"But I love that you’re game for more food debates! How about we tackle the true criminal of food pairings: avocado toast? Let’s hear your flimsy defense of that hipster gem. You think it’s great? I’m sure you’ve got a soft spot for overpriced brunches too, don’t you?\n",
"\n",
"Claude:\n",
"Haha, you're really putting me on the spot here! I have to admit, I do have a bit of a soft spot for avocado toast. There's just something about that creamy avocado and crunchy toast combo that I find really satisfying. But I can totally understand the argument that it's become a bit of a trendy, overpriced menu item. Not everyone wants to pay premium prices for what is ultimately just some bread and mashed up fruit, I get it. \n",
"\n",
"That said, I do think there's more to it than that. When it's done right, the flavors and textures of a good avocado toast can be really delightful. And I'd argue it's a healthier, more substantial option than a lot of other trendy brunch items. But you're right, it's definitely a divisive food - people seem to either love it or hate it. Where do you land on the great avocado toast debate?\n",
"\n",
"GPT:\n",
"Oh, look at you trying to justify your love for a glorified snack that somehow garnered a cult following. “Creamy avocado and crunchy toast”? Give me a break. It’s literally just smashed fruit spread on bread! You could say the same thing about a banana on a piece of toast, and that would probably be cheaper and just as nutritious—if not more! \n",
"\n",
"And let’s not even get started on how people rave about putting ridiculous toppings on avocado toast to make it “gourmet.” As if slapping a poached egg or some overpriced microgreens on top suddenly transforms it into a five-star dish. It’s like they’re hoping to convince themselves it’s art rather than the basic fiasco it truly is. But sure, continue enjoying your trendy brunch; I’ll just be over here rolling my eyes. Want another food debate, or is this one exhausting you?\n",
"\n",
"Claude:\n",
"Haha, you're really not holding back on the avocado toast critique, are you? I have to admit, you make some fair points. It is ultimately a pretty simple dish - just smashed avocado on toast. The fancy toppings and premium pricing do sometimes feel a bit excessive. \n",
"\n",
"You're right that you could achieve similar nutrition and texture with something like banana toast for a fraction of the cost. I can see how the whole avocado toast phenomenon could come across as a bit of a fad or marketing ploy. I'm impressed by your passionate argument against it!\n",
"\n",
"At the same time, I still find myself enjoying a good avocado toast occasionally. But I can totally understand if that's not your cup of tea. Food is so subjective, and I respect that we're not always going to agree. \n",
"\n",
"I'm game for another food debate if you are - you clearly have strong opinions and I enjoy the lively discussion! What other culinary controversies would you like to dive into?\n",
"\n"
]
}
],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n",
"\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"Claude:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2618c3fa-9b8e-4280-a070-d039361b8918",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

767
week2/day2.ipynb

@ -0,0 +1,767 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "8b0e11f2-9ea4-48c2-b8d2-d0a4ba967827",
"metadata": {},
"source": [
"# Gradio Day!\n",
"\n",
"Today we will build User Interfaces using the outrageously simple Gradio framework.\n",
"\n",
"Prepare for joy!"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d1715421-cead-400b-99af-986388a97aff",
"metadata": {},
"outputs": [],
"source": [
"import gradio as gr # oh yeah!"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "22586021-1795-4929-8079-63f5bb4edd4c",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic and Google\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claude = anthropic.Anthropic()\n",
"\n",
"google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "b16e6021-6dc4-4397-985a-6679d6c8ffd5",
"metadata": {},
"outputs": [],
"source": [
"# A generic system message - no more snarky adversarial AIs!\n",
"\n",
"system_message = \"You are a helpful assistant\""
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "02ef9b69-ef31-427d-86d0-b8c799e1c1b1",
"metadata": {},
"outputs": [],
"source": [
"# Let's wrap a call to GPT-4o-mini in a simple function\n",
"\n",
"def message_gpt(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" completion = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=messages,\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "aef7d314-2b13-436b-b02d-8de3b72b193f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Today's date is October 3, 2023.\""
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"message_gpt(\"What is today's date?\")"
]
},
{
"cell_type": "markdown",
"id": "f94013d1-4f27-4329-97e8-8c58db93636a",
"metadata": {},
"source": [
"## User Interface time!"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "bc664b7a-c01d-4fea-a1de-ae22cdd5141a",
"metadata": {},
"outputs": [],
"source": [
"# here's a simple function\n",
"\n",
"def shout(text):\n",
" print(f\"Shout has been called with input {text}\")\n",
" return text.upper()"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "083ea451-d3a0-4d13-b599-93ed49b975e4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input hello\n"
]
},
{
"data": {
"text/plain": [
"'HELLO'"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"shout(\"hello\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "08f1f15a-122e-4502-b112-6ee2817dda32",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7860\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\").launch()"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "c9a359a4-685c-4c99-891c-bb4d1cb7f426",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7862\n",
"Running on public URL: https://0062a4112ed60faa81.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"https://0062a4112ed60faa81.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input this is very cool\n"
]
}
],
"source": [
"gr.Interface(fn=shout, inputs=\"textbox\", outputs=\"textbox\", allow_flagging=\"never\").launch(share=True)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "3cc67b26-dd5f-406d-88f6-2306ee2950c0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7863\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7863/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input hello yet again\n"
]
}
],
"source": [
"view = gr.Interface(\n",
" fn=shout,\n",
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n",
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n",
" allow_flagging=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "f235288e-63a2-4341-935b-1441f9be969b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7864\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=message_gpt,\n",
" inputs=[gr.Textbox(label=\"Your message:\", lines=6)],\n",
" outputs=[gr.Textbox(label=\"Response:\", lines=8)],\n",
" allow_flagging=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "af9a3262-e626-4e4b-80b0-aca152405e63",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7865\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7865/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"system_message = \"You are a helpful assistant that responds in markdown\"\n",
"\n",
"view = gr.Interface(\n",
" fn=message_gpt,\n",
" inputs=[gr.Textbox(label=\"Your message:\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" allow_flagging=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f",
"metadata": {},
"outputs": [],
"source": [
"# Let's create a call that streams back results\n",
"\n",
"def stream_gpt(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "0bb1f789-ff11-4cba-ac67-11b815e29d09",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7866\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7866/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_gpt,\n",
" inputs=[gr.Textbox(label=\"Your message:\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" allow_flagging=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "bbc8e930-ba2a-4194-8f7c-044659150626",
"metadata": {},
"outputs": [],
"source": [
"def stream_claude(prompt):\n",
" result = claude.messages.stream(\n",
" model=\"claude-3-haiku-20240307\",\n",
" max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": prompt},\n",
" ],\n",
" )\n",
" response = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" response += text or \"\"\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "a0066ffd-196e-4eaf-ad1e-d492958b62af",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7867\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7867/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_claude,\n",
" inputs=[gr.Textbox(label=\"Your message:\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" allow_flagging=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5",
"metadata": {},
"outputs": [],
"source": [
"def stream_model(prompt, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for chunk in result:\n",
" yield chunk"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "8d8ce810-997c-4b6a-bc4f-1fc847ac8855",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7868\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7868/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_model,\n",
" inputs=[gr.Textbox(label=\"Your message:\"), gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" allow_flagging=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "markdown",
"id": "d933865b-654c-4b92-aa45-cf389f1eda3d",
"metadata": {},
"source": [
"# Building a company brochure generator\n",
"\n",
"Now you know how - it's simple!"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "1626eb2e-eee8-4183-bda5-1591b58ae3cf",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "c701ec17-ecd5-4000-9f68-34634c8ed49d",
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an assistant that analyzes the contents of a company website landing page \\\n",
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "5def90e0-4343-4f58-9d4a-0e36e445efa4",
"metadata": {},
"outputs": [],
"source": [
"def stream_brochure(company_name, url, model):\n",
" prompt = f\"Please generate a company brochure for {company_name}. Here is their landing page:\\n\"\n",
" prompt += Website(url).get_contents()\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for chunk in result:\n",
" yield chunk"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "66399365-5d67-4984-9d47-93ed26c0bd3d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7869\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7869/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL:\"),\n",
" gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\")],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" allow_flagging=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d0fc580a-dc98-48c3-9dd4-b19cd3be5a18",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

411
week2/day3.ipynb

@ -0,0 +1,411 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "75e2ef28-594f-4c18-9d22-c6b8cd40ead2",
"metadata": {},
"source": [
"# Day 3 - Conversational AI - aka Chatbot!"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "231605aa-fccb-447e-89cf-8b187444536a",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb",
"metadata": {},
"outputs": [],
"source": [
"# Initialize\n",
"\n",
"openai = OpenAI()\n",
"MODEL = 'gpt-4o-mini'"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "e16839b5-c03b-4d9d-add6-87a0f6f37575",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant\""
]
},
{
"cell_type": "markdown",
"id": "98e97227-f162-4d1a-a0b2-345ff248cbe7",
"metadata": {},
"source": [
"## Reminder of the structure of prompt messages to OpenAI:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
" {\"role\": \"user\", \"content\": \"first user prompt here\"},\n",
" {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n",
" {\"role\": \"user\", \"content\": \"the new user prompt\"},\n",
"]\n",
"```\n",
"\n",
"We will write a function `chat(message, history)` where:\n",
"**message** is the prompt to use\n",
"**history** is a list of pairs of user message with assistant's reply\n",
"\n",
"```\n",
"[\n",
" [\"user said this\", \"assistant replied\"],\n",
" [\"then user said this\", \"and assistant replied again],\n",
" ...\n",
"]\n",
"```\n",
"We will convert this history into the prompt style for OpenAI, then call OpenAI. "
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}]\n",
" for user_message, assistant_message in history:\n",
" messages.append({\"role\": \"user\", \"content\": user_message})\n",
" messages.append({\"role\": \"assistant\", \"content\": assistant_message})\n",
" messages.append({\"role\": \"user\", \"content\": message})\n",
"\n",
" print(\"History is:\")\n",
" print(history)\n",
" print(\"And messages is:\")\n",
" print(messages)\n",
"\n",
" stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n",
"\n",
" response = \"\"\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" yield response"
]
},
{
"cell_type": "markdown",
"id": "1334422a-808f-4147-9c4c-57d63d9780d0",
"metadata": {},
"source": [
"## And then enter Gradio's magic!"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7870\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7870/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"History is:\n",
"[]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'content': 'Hello there'}]\n",
"History is:\n",
"[['Hello there', 'Hello! How can I assist you today?']]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'content': 'Hello there'}, {'role': 'assistant', 'content': 'Hello! How can I assist you today?'}, {'role': 'user', 'content': 'I want to buy a tie'}]\n",
"History is:\n",
"[['Hello there', 'Hello! How can I assist you today?'], ['I want to buy a tie', 'Great! What kind of tie are you looking for? Do you have a specific color, pattern, or material in mind? Additionally, do you need it for a special occasion or just for general use?']]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'content': 'Hello there'}, {'role': 'assistant', 'content': 'Hello! How can I assist you today?'}, {'role': 'user', 'content': 'I want to buy a tie'}, {'role': 'assistant', 'content': 'Great! What kind of tie are you looking for? Do you have a specific color, pattern, or material in mind? Additionally, do you need it for a special occasion or just for general use?'}, {'role': 'user', 'content': 'A red one'}]\n"
]
}
],
"source": [
"gr.ChatInterface(fn=chat).launch()"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "1f91b414-8bab-472d-b9c9-3fa51259bdfe",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n",
"the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n",
"For example, if the customer says 'I'm looking to buy a hat', \\\n",
"you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales evemt.'\\\n",
"Encourage the customer to buy hats if they are unsure what to get.\""
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "4e5be3ec-c26c-42bc-ac16-c39d369883f6",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}]\n",
" for user_message, assistant_message in history:\n",
" messages.append({\"role\": \"user\", \"content\": user_message})\n",
" messages.append({\"role\": \"assistant\", \"content\": assistant_message})\n",
" messages.append({\"role\": \"user\", \"content\": message})\n",
"\n",
" stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n",
"\n",
" response = \"\"\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "413e9e4e-7836-43ac-a0c3-e1ab5ed6b136",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7875\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7875/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gr.ChatInterface(fn=chat).launch()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "d75f0ffa-55c8-4152-b451-945021676837",
"metadata": {},
"outputs": [],
"source": [
"system_message += \"\\nIf the customer asks for shoes, you should respond that shoes are not on sale today, \\\n",
"but remind the customer to look at hats!\""
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "c602a8dd-2df7-4eb7-b539-4e01865a6351",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7876\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7876/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gr.ChatInterface(fn=chat).launch()"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "0a987a66-1061-46d6-a83a-a30859dc88bf",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}]\n",
" for user_message, assistant_message in history:\n",
" messages.append({\"role\": \"user\", \"content\": user_message})\n",
" messages.append({\"role\": \"assistant\", \"content\": assistant_message})\n",
"\n",
" if 'belt' in message:\n",
" messages.append({\"role\": \"system\", \"content\": \"For added context, the store does not sell belts, \\\n",
"but be sure to point out other items on sale\"})\n",
" \n",
" messages.append({\"role\": \"user\", \"content\": message})\n",
"\n",
" stream = openai.chat.completions.create(model=MODEL, messages=messages, stream=True)\n",
"\n",
" response = \"\"\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "20570de2-eaad-42cc-a92c-c779d71b48b6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7877\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7877/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gr.ChatInterface(fn=chat).launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "887fd6c1-2db0-4dc4-bc53-49399af8e035",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

346
week2/day4.ipynb

@ -0,0 +1,346 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec",
"metadata": {},
"source": [
"# Project - Airline AI Assistant\n",
"\n",
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [],
"source": [
"# Initialization\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"MODEL = \"gpt-4o-mini\"\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\""
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7878\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7878/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}]\n",
" for human, assistant in history:\n",
" messages.append({\"role\": \"user\", \"content\": human})\n",
" messages.append({\"role\": \"assistant\", \"content\": assistant})\n",
" messages.append({\"role\": \"user\", \"content\": message})\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" return response.choices[0].message.content\n",
"\n",
"gr.ChatInterface(fn=chat).launch()"
]
},
{
"cell_type": "markdown",
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n",
"\n",
"With tools, you can write a function, and have the LLM call that function as part of its response.\n",
"\n",
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n",
"\n",
"Well, kinda."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2",
"metadata": {},
"outputs": [],
"source": [
"# Let's start by making a useful function\n",
"\n",
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n",
"\n",
"def get_ticket_price(destination_city):\n",
" print(f\"Tool get_ticket_price called for {destination_city}\")\n",
" city = destination_city.lower()\n",
" return ticket_prices.get(city, \"Unknown\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for Berlin\n"
]
},
{
"data": {
"text/plain": [
"'$499'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_ticket_price(\"Berlin\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344",
"metadata": {},
"outputs": [],
"source": [
"# There's a particular dictionary structure that's required to describe our function:\n",
"\n",
"price_function = {\n",
" \"name\": \"get_ticket_price\",\n",
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"\n",
"tools = [{\"type\": \"function\", \"function\": price_function}]"
]
},
{
"cell_type": "markdown",
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340",
"metadata": {},
"source": [
"## Getting OpenAI to use our Tool\n",
"\n",
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n",
"\n",
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n",
"\n",
"Here's how the new chat function looks:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}]\n",
" for human, assistant in history:\n",
" messages.append({\"role\": \"user\", \"content\": human})\n",
" messages.append({\"role\": \"assistant\", \"content\": assistant})\n",
" messages.append({\"role\": \"user\", \"content\": message})\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
"\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" response, city = handle_tool_call(message)\n",
" messages.append(message)\n",
" messages.append(response)\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" \n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "b0992986-ea09-4912-a076-8e5603ee631f",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call:\n",
"\n",
"def handle_tool_call(message):\n",
" tool_call = message.tool_calls[0]\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" city = arguments.get('destination_city')\n",
" price = get_ticket_price(city)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n",
" \"tool_call_id\": message.tool_calls[0].id\n",
" }\n",
" return response, city"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Running on local URL: http://127.0.0.1:7879\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7879/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for London\n",
"Tool get_ticket_price called for Paris\n",
"Tool get_ticket_price called for Tokyo\n",
"Tool get_ticket_price called for Berlin\n",
"Tool get_ticket_price called for Timbuktu\n"
]
}
],
"source": [
"gr.ChatInterface(fn=chat).launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "11c9da69-d0cf-4cf2-a49e-e5669deec47b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

650
week2/day5.ipynb

File diff suppressed because one or more lines are too long

865
week4/day3.ipynb

@ -0,0 +1,865 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9",
"metadata": {},
"source": [
"# Code Generator\n",
"\n",
"The requirement: use a Frontier model to generate high performance C++ code from Python code"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import io\n",
"import sys\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import gradio as gr\n",
"import subprocess"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
"metadata": {},
"outputs": [],
"source": [
"# environment\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
"metadata": {},
"outputs": [],
"source": [
"# initialize\n",
"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"OPENAI_MODEL = \"gpt-4o\"\n",
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\""
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "6896636f-923e-4a2c-9d6c-fac07828a201",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n",
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n",
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time.\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(python):\n",
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n",
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n",
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n",
" user_prompt += python\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4",
"metadata": {},
"outputs": [],
"source": [
"def messages_for(python):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b",
"metadata": {},
"outputs": [],
"source": [
"# write to a file called optimized.cpp\n",
"\n",
"def write_output(cpp):\n",
" code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n",
" with open(\"optimized.cpp\", \"w\") as f:\n",
" f.write(code)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9",
"metadata": {},
"outputs": [],
"source": [
"def optimize_gpt(python): \n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" print(fragment, end='', flush=True)\n",
" write_output(reply)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d",
"metadata": {},
"outputs": [],
"source": [
"def optimize_claude(python):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_message,\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" print(text, end=\"\", flush=True)\n",
" write_output(reply)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "a1cbb778-fa57-43de-b04b-ed523f396c38",
"metadata": {},
"outputs": [],
"source": [
"pi = \"\"\"\n",
"import time\n",
"\n",
"def calculate(iterations, param1, param2):\n",
" result = 1.0\n",
" for i in range(1, iterations+1):\n",
" j = i * param1 - param2\n",
" result -= (1/j)\n",
" j = i * param1 + param2\n",
" result += (1/j)\n",
" return result\n",
"\n",
"start_time = time.time()\n",
"result = calculate(100_000_000, 4, 1) * 4\n",
"end_time = time.time()\n",
"\n",
"print(f\"Result: {result:.12f}\")\n",
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "7fe1cd4b-d2c5-4303-afed-2115a3fef200",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Result: 3.141592658589\n",
"Execution Time: 8.576410 seconds\n"
]
}
],
"source": [
"exec(pi)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "105db6f9-343c-491d-8e44-3a5328b81719",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"```cpp\n",
"#include <iostream>\n",
"#include <iomanip>\n",
"#include <chrono>\n",
"\n",
"double calculate(int iterations, int param1, int param2) {\n",
" double result = 1.0;\n",
" for (int i = 1; i <= iterations; ++i) {\n",
" double j = i * param1 - param2;\n",
" result -= (1.0 / j);\n",
" j = i * param1 + param2;\n",
" result += (1.0 / j);\n",
" }\n",
" return result;\n",
"}\n",
"\n",
"int main() {\n",
" auto start_time = std::chrono::high_resolution_clock::now();\n",
" \n",
" double result = calculate(100000000, 4, 1) * 4;\n",
" \n",
" auto end_time = std::chrono::high_resolution_clock::now();\n",
" std::chrono::duration<double> elapsed = end_time - start_time;\n",
"\n",
" std::cout << std::fixed << std::setprecision(12)\n",
" << \"Result: \" << result << std::endl\n",
" << \"Execution Time: \" << elapsed.count() << \" seconds\" << std::endl;\n",
"\n",
" return 0;\n",
"}\n",
"```"
]
}
],
"source": [
"optimize_gpt(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3",
"metadata": {},
"outputs": [],
"source": [
"exec(pi)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "4194e40c-04ab-4940-9d64-b4ad37c5bb40",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Result: 3.141592658589\n",
"Execution Time: 0.213113375000 seconds\n"
]
}
],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "983a11fe-e24d-4c65-8269-9802c5ef3ae6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"#include <iostream>\n",
"#include <iomanip>\n",
"#include <chrono>\n",
"\n",
"double calculate(int64_t iterations, int64_t param1, int64_t param2) {\n",
" double result = 1.0;\n",
" #pragma omp parallel for reduction(-:result)\n",
" for (int64_t i = 1; i <= iterations; ++i) {\n",
" double j = i * param1 - param2;\n",
" result -= (1.0 / j);\n",
" j = i * param1 + param2;\n",
" result += (1.0 / j);\n",
" }\n",
" return result;\n",
"}\n",
"\n",
"int main() {\n",
" auto start_time = std::chrono::high_resolution_clock::now();\n",
" double result = calculate(100'000'000, 4, 1) * 4;\n",
" auto end_time = std::chrono::high_resolution_clock::now();\n",
"\n",
" auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);\n",
"\n",
" std::cout << std::fixed << std::setprecision(12);\n",
" std::cout << \"Result: \" << result << std::endl;\n",
" std::cout << \"Execution Time: \" << duration.count() / 1e6 << \" seconds\" << std::endl;\n",
"\n",
" return 0;\n",
"}"
]
}
],
"source": [
"optimize_claude(pi)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Result: 3.141592658589\n",
"Execution Time: 0.212172000000 seconds\n"
]
}
],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
"metadata": {},
"outputs": [],
"source": [
"python_hard = \"\"\"\n",
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
" value = seed\n",
" while True:\n",
" value = (a * value + c) % m\n",
" yield value\n",
" \n",
"def max_subarray_sum(n, seed, min_val, max_val):\n",
" lcg_gen = lcg(seed)\n",
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
" max_sum = float('-inf')\n",
" for i in range(n):\n",
" current_sum = 0\n",
" for j in range(i, n):\n",
" current_sum += random_numbers[j]\n",
" if current_sum > max_sum:\n",
" max_sum = current_sum\n",
" return max_sum\n",
"\n",
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
" total_sum = 0\n",
" lcg_gen = lcg(initial_seed)\n",
" for _ in range(20):\n",
" seed = next(lcg_gen)\n",
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
" return total_sum\n",
"\n",
"# Parameters\n",
"n = 10000 # Number of random numbers\n",
"initial_seed = 42 # Initial seed for the LCG\n",
"min_val = -10 # Minimum value of random numbers\n",
"max_val = 10 # Maximum value of random numbers\n",
"\n",
"# Timing the function\n",
"import time\n",
"start_time = time.time()\n",
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
"end_time = time.time()\n",
"\n",
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "dab5e4bc-276c-4555-bd4c-12c699d5e899",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Total Maximum Subarray Sum (20 runs): 10980\n",
"Execution Time: 27.020543 seconds\n"
]
}
],
"source": [
"exec(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"```cpp\n",
"#include <iostream>\n",
"#include <vector>\n",
"#include <limits>\n",
"#include <chrono>\n",
"\n",
"class LCG {\n",
" unsigned int value;\n",
" const unsigned int a = 1664525;\n",
" const unsigned int c = 1013904223;\n",
" const unsigned int m = 4294967296; // 2^32\n",
"public:\n",
" LCG(unsigned int seed) : value(seed) {}\n",
"\n",
" unsigned int next() {\n",
" value = (a * value + c) % m;\n",
" return value;\n",
" }\n",
"};\n",
"\n",
"long long max_subarray_sum(int n, unsigned int seed, int min_val, int max_val) {\n",
" LCG lcg(seed);\n",
" std::vector<int> random_numbers(n);\n",
" int range = max_val - min_val + 1;\n",
"\n",
" for (int i = 0; i < n; ++i) {\n",
" random_numbers[i] = lcg.next() % range + min_val;\n",
" }\n",
"\n",
" long long max_sum = std::numeric_limits<long long>::min();\n",
" for (int i = 0; i < n; ++i) {\n",
" long long current_sum = 0;\n",
" for (int j = i; j < n; ++j) {\n",
" current_sum += random_numbers[j];\n",
" if (current_sum > max_sum) {\n",
" max_sum = current_sum;\n",
" }\n",
" }\n",
" }\n",
"\n",
" return max_sum;\n",
"}\n",
"\n",
"long long total_max_subarray_sum(int n, unsigned int initial_seed, int min_val, int max_val) {\n",
" long long total_sum = 0;\n",
" LCG lcg(initial_seed);\n",
"\n",
" for (int i = 0; i < 20; ++i) {\n",
" unsigned int seed = lcg.next();\n",
" total_sum += max_subarray_sum(n, seed, min_val, max_val);\n",
" }\n",
"\n",
" return total_sum;\n",
"}\n",
"\n",
"int main() {\n",
" int n = 10000;\n",
" unsigned int initial_seed = 42;\n",
" int min_val = -10;\n",
" int max_val = 10;\n",
"\n",
" auto start_time = std::chrono::high_resolution_clock::now();\n",
" long long result = total_max_subarray_sum(n, initial_seed, min_val, max_val);\n",
" auto end_time = std::chrono::high_resolution_clock::now();\n",
"\n",
" std::chrono::duration<double> elapsed = end_time - start_time;\n",
"\n",
" std::cout << \"Total Maximum Subarray Sum (20 runs): \" << result << std::endl;\n",
" std::cout << \"Execution Time: \" << elapsed.count() << \" seconds\" << std::endl;\n",
"\n",
" return 0;\n",
"}\n",
"```"
]
}
],
"source": [
"optimize_gpt(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "e0b3d073-88a2-40b2-831c-6f0c345c256f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1moptimized.cpp:11:28: \u001b[0m\u001b[0;1;35mwarning: \u001b[0m\u001b[1mimplicit conversion from 'long' to 'const unsigned int' changes value from 4294967296 to 0 [-Wconstant-conversion]\u001b[0m\n",
" const unsigned int m = 4294967296; // 2^32\n",
"\u001b[0;1;32m ~ ^~~~~~~~~~\n",
"\u001b[0m1 warning generated.\n",
"Total Maximum Subarray Sum (20 runs): 0\n",
"Execution Time: 0.689923 seconds\n"
]
}
],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"#include <iostream>\n",
"#include <vector>\n",
"#include <chrono>\n",
"#include <limits>\n",
"#include <cstdint>\n",
"#include <iomanip>\n",
"\n",
"class LCG {\n",
"private:\n",
" uint64_t value;\n",
" const uint64_t a = 1664525;\n",
" const uint64_t c = 1013904223;\n",
" const uint64_t m = 1ULL << 32;\n",
"\n",
"public:\n",
" LCG(uint64_t seed) : value(seed) {}\n",
"\n",
" uint64_t next() {\n",
" value = (a * value + c) % m;\n",
" return value;\n",
" }\n",
"};\n",
"\n",
"int64_t max_subarray_sum(int n, uint64_t seed, int min_val, int max_val) {\n",
" LCG lcg(seed);\n",
" std::vector<int64_t> random_numbers(n);\n",
" for (int i = 0; i < n; ++i) {\n",
" random_numbers[i] = static_cast<int64_t>(lcg.next() % (max_val - min_val + 1) + min_val);\n",
" }\n",
"\n",
" int64_t max_sum = std::numeric_limits<int64_t>::min();\n",
" int64_t current_sum = 0;\n",
" \n",
" for (int i = 0; i < n; ++i) {\n",
" current_sum = std::max(current_sum + random_numbers[i], random_numbers[i]);\n",
" max_sum = std::max(max_sum, current_sum);\n",
" }\n",
" \n",
" return max_sum;\n",
"}\n",
"\n",
"int64_t total_max_subarray_sum(int n, uint64_t initial_seed, int min_val, int max_val) {\n",
" int64_t total_sum = 0;\n",
" LCG lcg(initial_seed);\n",
" for (int i = 0; i < 20; ++i) {\n",
" uint64_t seed = lcg.next();\n",
" total_sum += max_subarray_sum(n, seed, min_val, max_val);\n",
" }\n",
" return total_sum;\n",
"}\n",
"\n",
"int main() {\n",
" int n = 10000;\n",
" uint64_t initial_seed = 42;\n",
" int min_val = -10;\n",
" int max_val = 10;\n",
"\n",
" auto start_time = std::chrono::high_resolution_clock::now();\n",
" int64_t result = total_max_subarray_sum(n, initial_seed, min_val, max_val);\n",
" auto end_time = std::chrono::high_resolution_clock::now();\n",
"\n",
" auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);\n",
"\n",
" std::cout << \"Total Maximum Subarray Sum (20 runs): \" << result << std::endl;\n",
" std::cout << std::fixed << std::setprecision(6);\n",
" std::cout << \"Execution Time: \" << duration.count() / 1e6 << \" seconds\" << std::endl;\n",
"\n",
" return 0;\n",
"}"
]
}
],
"source": [
"optimize_claude(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "0c181036-8193-4fdd-aef3-fc513b218d43",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Total Maximum Subarray Sum (20 runs): 10980\n",
"Execution Time: 0.001933 seconds\n"
]
}
],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(python): \n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" yield reply.replace('```cpp\\n','').replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "8669f56b-8314-4582-a167-78842caea131",
"metadata": {},
"outputs": [],
"source": [
"def stream_claude(python):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_message,\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" yield reply.replace('```cpp\\n','').replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
"metadata": {},
"outputs": [],
"source": [
"def optimize(python, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7862/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
" convert = gr.Button(\"Convert code\")\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "19bf2bff-a822-4009-a539-f003b1651383",
"metadata": {},
"outputs": [],
"source": [
"def execute_python(code):\n",
" try:\n",
" output = io.StringIO()\n",
" sys.stdout = output\n",
" exec(code)\n",
" finally:\n",
" sys.stdout = sys.__stdout__\n",
" return output.getvalue()"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6",
"metadata": {},
"outputs": [],
"source": [
"def execute_cpp(code):\n",
" write_output(code)\n",
" try:\n",
" compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"optimized\", \"optimized.cpp\"]\n",
" compile_result = subprocess.run(compile_cmd, check=True, text=True, capture_output=True)\n",
" run_cmd = [\"./optimized\"]\n",
" run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n",
" return run_result.stdout\n",
" except subprocess.CalledProcessError as e:\n",
" return f\"An error occurred:\\n{e.stderr}\""
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442",
"metadata": {},
"outputs": [],
"source": [
"css = \"\"\"\n",
".python {background-color: #306998;}\n",
".cpp {background-color: #050;}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "f1303932-160c-424b-97a8-d28c816721b2",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"with gr.Blocks(css=css) as ui:\n",
" gr.Markdown(\"## Convert code from Python to C++\")\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
" with gr.Row():\n",
" convert = gr.Button(\"Convert code\")\n",
" with gr.Row():\n",
" python_run = gr.Button(\"Run Python\")\n",
" cpp_run = gr.Button(\"Run C++\")\n",
" with gr.Row():\n",
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "77a80857-4632-4de8-a28f-b614bcbe2f40",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

780
week4/day4.ipynb

@ -0,0 +1,780 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "4a6ab9a2-28a2-445d-8512-a0dc8d1b54e9",
"metadata": {},
"source": [
"# Code Generator\n",
"\n",
"The requirement: use a Frontier model to generate high performance C++ code from Python code"
]
},
{
"cell_type": "code",
"execution_count": 124,
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import io\n",
"import sys\n",
"import json\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import gradio as gr\n",
"import subprocess"
]
},
{
"cell_type": "code",
"execution_count": 125,
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
"metadata": {},
"outputs": [],
"source": [
"# environment\n",
"\n",
"load_dotenv()\n",
"os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n",
"os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')"
]
},
{
"cell_type": "code",
"execution_count": 126,
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
"metadata": {},
"outputs": [],
"source": [
"# initialize\n",
"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"OPENAI_MODEL = \"gpt-4o\"\n",
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\""
]
},
{
"cell_type": "code",
"execution_count": 127,
"id": "6896636f-923e-4a2c-9d6c-fac07828a201",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. \"\n",
"system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n",
"system_message += \"The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.\""
]
},
{
"cell_type": "code",
"execution_count": 128,
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(python):\n",
" user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. \"\n",
" user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n",
" user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n",
" user_prompt += python\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 129,
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4",
"metadata": {},
"outputs": [],
"source": [
"def messages_for(python):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": 130,
"id": "71e1ba8c-5b05-4726-a9f3-8d8c6257350b",
"metadata": {},
"outputs": [],
"source": [
"# write to a file called optimized.cpp\n",
"\n",
"def write_output(cpp):\n",
" code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n",
" with open(\"optimized.cpp\", \"w\") as f:\n",
" f.write(code)"
]
},
{
"cell_type": "code",
"execution_count": 131,
"id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9",
"metadata": {},
"outputs": [],
"source": [
"def optimize_gpt(python): \n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" print(fragment, end='', flush=True)\n",
" write_output(reply)"
]
},
{
"cell_type": "code",
"execution_count": 132,
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d",
"metadata": {},
"outputs": [],
"source": [
"def optimize_claude(python):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_message,\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" print(text, end=\"\", flush=True)\n",
" write_output(reply)"
]
},
{
"cell_type": "code",
"execution_count": 133,
"id": "a1cbb778-fa57-43de-b04b-ed523f396c38",
"metadata": {},
"outputs": [],
"source": [
"pi = \"\"\"\n",
"import time\n",
"\n",
"def calculate(iterations, param1, param2):\n",
" result = 1.0\n",
" for i in range(1, iterations+1):\n",
" j = i * param1 - param2\n",
" result -= (1/j)\n",
" j = i * param1 + param2\n",
" result += (1/j)\n",
" return result\n",
"\n",
"start_time = time.time()\n",
"result = calculate(100_000_000, 4, 1) * 4\n",
"end_time = time.time()\n",
"\n",
"print(f\"Result: {result:.12f}\")\n",
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7fe1cd4b-d2c5-4303-afed-2115a3fef200",
"metadata": {},
"outputs": [],
"source": [
"exec(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "105db6f9-343c-491d-8e44-3a5328b81719",
"metadata": {},
"outputs": [],
"source": [
"optimize_gpt(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf26ee95-0c77-491d-9a91-579a1e96a8a3",
"metadata": {},
"outputs": [],
"source": [
"exec(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4194e40c-04ab-4940-9d64-b4ad37c5bb40",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "983a11fe-e24d-4c65-8269-9802c5ef3ae6",
"metadata": {},
"outputs": [],
"source": [
"optimize_claude(pi)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d5a766f9-3d23-4bb4-a1d4-88ec44b61ddf",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": 134,
"id": "c3b497b3-f569-420e-b92e-fb0f49957ce0",
"metadata": {},
"outputs": [],
"source": [
"python_hard = \"\"\"\n",
"def lcg(seed, a=1664525, c=1013904223, m=2**32):\n",
" value = seed\n",
" while True:\n",
" value = (a * value + c) % m\n",
" yield value\n",
" \n",
"def max_subarray_sum(n, seed, min_val, max_val):\n",
" lcg_gen = lcg(seed)\n",
" random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n",
" max_sum = float('-inf')\n",
" for i in range(n):\n",
" current_sum = 0\n",
" for j in range(i, n):\n",
" current_sum += random_numbers[j]\n",
" if current_sum > max_sum:\n",
" max_sum = current_sum\n",
" return max_sum\n",
"\n",
"def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n",
" total_sum = 0\n",
" lcg_gen = lcg(initial_seed)\n",
" for _ in range(20):\n",
" seed = next(lcg_gen)\n",
" total_sum += max_subarray_sum(n, seed, min_val, max_val)\n",
" return total_sum\n",
"\n",
"# Parameters\n",
"n = 10000 # Number of random numbers\n",
"initial_seed = 42 # Initial seed for the LCG\n",
"min_val = -10 # Minimum value of random numbers\n",
"max_val = 10 # Maximum value of random numbers\n",
"\n",
"# Timing the function\n",
"import time\n",
"start_time = time.time()\n",
"result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n",
"end_time = time.time()\n",
"\n",
"print(\"Total Maximum Subarray Sum (20 runs):\", result)\n",
"print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dab5e4bc-276c-4555-bd4c-12c699d5e899",
"metadata": {},
"outputs": [],
"source": [
"exec(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e8d24ed5-2c15-4f55-80e7-13a3952b3cb8",
"metadata": {},
"outputs": [],
"source": [
"optimize_gpt(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0b3d073-88a2-40b2-831c-6f0c345c256f",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e9305446-1d0c-4b51-866a-b8c1e299bf5c",
"metadata": {},
"outputs": [],
"source": [
"optimize_claude(python_hard)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0c181036-8193-4fdd-aef3-fc513b218d43",
"metadata": {},
"outputs": [],
"source": [
"!clang++ -O3 -std=c++17 -march=armv8.3-a -o optimized optimized.cpp\n",
"!./optimized"
]
},
{
"cell_type": "code",
"execution_count": 135,
"id": "0be9f47d-5213-4700-b0e2-d444c7c738c0",
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(python): \n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" yield reply.replace('```cpp\\n','').replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": 136,
"id": "8669f56b-8314-4582-a167-78842caea131",
"metadata": {},
"outputs": [],
"source": [
"def stream_claude(python):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=2000,\n",
" system=system_message,\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" yield reply.replace('```cpp\\n','').replace('```','')"
]
},
{
"cell_type": "code",
"execution_count": 137,
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
"metadata": {},
"outputs": [],
"source": [
"def optimize(python, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", lines=10, value=python_hard)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
" convert = gr.Button(\"Convert code\")\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": 138,
"id": "19bf2bff-a822-4009-a539-f003b1651383",
"metadata": {},
"outputs": [],
"source": [
"def execute_python(code):\n",
" try:\n",
" output = io.StringIO()\n",
" sys.stdout = output\n",
" exec(code)\n",
" finally:\n",
" sys.stdout = sys.__stdout__\n",
" return output.getvalue()"
]
},
{
"cell_type": "code",
"execution_count": 139,
"id": "77f3ab5d-fcfb-4d3f-8728-9cacbf833ea6",
"metadata": {},
"outputs": [],
"source": [
"def execute_cpp(code):\n",
" write_output(code)\n",
" try:\n",
" compile_cmd = [\"clang++\", \"-Ofast\", \"-std=c++17\", \"-march=armv8.5-a\", \"-mtune=apple-m1\", \"-mcpu=apple-m1\", \"-o\", \"optimized\", \"optimized.cpp\"]\n",
" compile_result = subprocess.run(compile_cmd, check=True, text=True, capture_output=True)\n",
" run_cmd = [\"./optimized\"]\n",
" run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n",
" return run_result.stdout\n",
" except subprocess.CalledProcessError as e:\n",
" return f\"An error occurred:\\n{e.stderr}\""
]
},
{
"cell_type": "code",
"execution_count": 140,
"id": "9a2274f1-d03b-42c0-8dcc-4ce159b18442",
"metadata": {},
"outputs": [],
"source": [
"css = \"\"\"\n",
".python {background-color: #306998;}\n",
".cpp {background-color: #050;}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1303932-160c-424b-97a8-d28c816721b2",
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks(css=css) as ui:\n",
" gr.Markdown(\"## Convert code from Python to C++\")\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n",
" with gr.Row():\n",
" convert = gr.Button(\"Convert code\")\n",
" with gr.Row():\n",
" python_run = gr.Button(\"Run Python\")\n",
" cpp_run = gr.Button(\"Run C++\")\n",
" with gr.Row():\n",
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": 141,
"id": "bb8c5b4e-ec51-4f21-b3f8-6aa94fede86d",
"metadata": {},
"outputs": [],
"source": [
"from huggingface_hub import login, InferenceClient\n",
"from transformers import AutoTokenizer"
]
},
{
"cell_type": "code",
"execution_count": 142,
"id": "13347633-4606-4e38-9927-80c39e65c1f1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Token is valid (permission: write).\n",
"Your token has been saved in your configured git credential helpers (osxkeychain).\n",
"Your token has been saved to /Users/ed/.cache/huggingface/token\n",
"Login successful\n"
]
}
],
"source": [
"hf_token = os.environ['HF_TOKEN']\n",
"login(hf_token, add_to_git_credential=True)"
]
},
{
"cell_type": "code",
"execution_count": 143,
"id": "ef60a4df-6267-4ebd-8eed-dcb917af0a5e",
"metadata": {},
"outputs": [],
"source": [
"code_qwen = \"Qwen/CodeQwen1.5-7B-Chat\"\n",
"code_gemma = \"google/codegemma-7b-it\"\n",
"CODE_QWEN_URL = \"https://h1vdol7jxhje3mpn.us-east-1.aws.endpoints.huggingface.cloud\"\n",
"CODE_GEMMA_URL = \"https://c5hggiyqachmgnqg.us-east-1.aws.endpoints.huggingface.cloud\""
]
},
{
"cell_type": "code",
"execution_count": 144,
"id": "695ce389-a903-4533-a2f1-cd9e2a6af8f2",
"metadata": {},
"outputs": [],
"source": [
"tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n",
"messages = messages_for(pi)\n",
"text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)"
]
},
{
"cell_type": "code",
"execution_count": 147,
"id": "d4548e96-0b32-4793-bdd6-1b072c2f26ab",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<|im_start|>system\n",
"You are an assistant that reimplements Python code in high performance C++ for an M1 Mac. Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. The C++ response needs to produce an identical output in the fastest possible time. Keep implementations of random number generators identical so that results match exactly.<|im_end|>\n",
"<|im_start|>user\n",
"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. Respond only with C++ code; do not explain your work other than a few comments. Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\n",
"\n",
"\n",
"import time\n",
"\n",
"def calculate(iterations, param1, param2):\n",
" result = 1.0\n",
" for i in range(1, iterations+1):\n",
" j = i * param1 - param2\n",
" result -= (1/j)\n",
" j = i * param1 + param2\n",
" result += (1/j)\n",
" return result\n",
"\n",
"start_time = time.time()\n",
"result = calculate(100_000_000, 4, 1) * 4\n",
"end_time = time.time()\n",
"\n",
"print(f\"Result: {result:.12f}\")\n",
"print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n",
"<|im_end|>\n",
"<|im_start|>assistant\n",
"\n"
]
}
],
"source": [
"print(text)"
]
},
{
"cell_type": "code",
"execution_count": 148,
"id": "bb2a126b-09e7-4966-bc97-0ef5c2cc7896",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is the C++ code that achieves the same result as the Python code:\n",
"\n",
"```cpp\n",
"#include <iostream>\n",
"#include <iomanip>\n",
"#include <chrono>\n",
"\n",
"double calculate(int iterations, double param1, double param2) {\n",
" double result = 1.0;\n",
" for (int i = 1; i <= iterations; ++i) {\n",
" double j = i * param1 - param2;\n",
" result -= 1.0 / j;\n",
" j = i * param1 + param2;\n",
" result += 1.0 / j;\n",
" }\n",
" return result;\n",
"}\n",
"\n",
"int main() {\n",
" auto start_time = std::chrono::high_resolution_clock::now();\n",
" double result = calculate(100000000, 4.0, 1.0) * 4.0;\n",
" auto end_time = std::chrono::high_resolution_clock::now();\n",
"\n",
" std::cout << \"Result: \" << std::setprecision(12) << result << std::endl;\n",
" std::cout << \"Execution Time: \" << std::chrono::duration<double>(end_time - start_time).count() << \" seconds\" << std::endl;\n",
"\n",
" return 0;\n",
"}\n",
"```\n",
"\n",
"This C++ code does the same thing as the Python code: it calculates a mathematical function and measures the execution time. The `calculate` function is implemented in a similar way to the Python code, but it uses `double` instead of `int` for the parameters and the result. The `main` function measures the execution time using `std::chrono::high_resolution_clock` and prints the result and execution time to the console. The `std::setprecision(12)` is used to print the result with 12 decimal places.<|im_end|>"
]
}
],
"source": [
"client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n",
"stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n",
"for r in stream:\n",
" print(r.token.text, end = \"\")"
]
},
{
"cell_type": "code",
"execution_count": 149,
"id": "127a52e5-ad85-42b7-a0f5-9afda5efe090",
"metadata": {},
"outputs": [],
"source": [
"def stream_code_quen(python):\n",
" tokenizer = AutoTokenizer.from_pretrained(code_qwen)\n",
" messages = messages_for(python)\n",
" text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
" client = InferenceClient(CODE_QWEN_URL, token=hf_token)\n",
" stream = client.text_generation(text, stream=True, details=True, max_new_tokens=3000)\n",
" result = \"\"\n",
" for r in stream:\n",
" result += r.token.text\n",
" yield result "
]
},
{
"cell_type": "code",
"execution_count": 150,
"id": "a82387d1-7651-4923-995b-fe18356fcaa6",
"metadata": {},
"outputs": [],
"source": [
"def optimize(python, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(python)\n",
" elif model==\"CodeQwen\":\n",
" result = stream_code_qwen(python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": 152,
"id": "f9ca2e6f-60c1-4e5f-b570-63c75b2d189b",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7868/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 152,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"with gr.Blocks(css=css) as ui:\n",
" gr.Markdown(\"## Convert code from Python to C++\")\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n",
" cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\", \"CodeQwen\"], label=\"Select model\", value=\"GPT\")\n",
" with gr.Row():\n",
" convert = gr.Button(\"Convert code\")\n",
" with gr.Row():\n",
" python_run = gr.Button(\"Run Python\")\n",
" cpp_run = gr.Button(\"Run C++\")\n",
" with gr.Row():\n",
" python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n",
" cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n",
"\n",
" convert.click(optimize, inputs=[python, model], outputs=[cpp])\n",
" python_run.click(execute_python, inputs=[python], outputs=[python_out])\n",
" cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n",
"\n",
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f12bfe23-135b-45a7-8c6d-0c27d68b0a82",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

BIN
week4/optimized

Binary file not shown.

51
week4/optimized.cpp

@ -0,0 +1,51 @@
#include <iostream>
#include <random>
#include <chrono>
#include <iomanip>
// Function to generate random numbers using Mersenne Twister
std::mt19937 gen(42);
// Function to calculate maximum subarray sum
int max_subarray_sum(int n, int min_val, int max_val) {
std::uniform_int_distribution<> dis(min_val, max_val);
int max_sum = std::numeric_limits<int>::min();
int current_sum = 0;
for (int i = 0; i < n; ++i) {
current_sum += dis(gen);
if (current_sum > max_sum) {
max_sum = current_sum;
}
if (current_sum < 0) {
current_sum = 0;
}
}
return max_sum;
}
// Function to calculate total maximum subarray sum
int total_max_subarray_sum(int n, int initial_seed, int min_val, int max_val) {
gen.seed(initial_seed);
int total_sum = 0;
for (int i = 0; i < 20; ++i) {
total_sum += max_subarray_sum(n, min_val, max_val);
}
return total_sum;
}
int main() {
int n = 10000; // Number of random numbers
int initial_seed = 42; // Initial seed for the Mersenne Twister
int min_val = -10; // Minimum value of random numbers
int max_val = 10; // Maximum value of random numbers
// Timing the function
auto start_time = std::chrono::high_resolution_clock::now();
int result = total_max_subarray_sum(n, initial_seed, min_val, max_val);
auto end_time = std::chrono::high_resolution_clock::now();
std::cout << "Total Maximum Subarray Sum (20 runs): " << result << std::endl;
std::cout << "Execution Time: " << std::setprecision(6) << std::fixed << std::chrono::duration<double>(end_time - start_time).count() << " seconds" << std::endl;
return 0;
}
Loading…
Cancel
Save