Browse Source

Remove hf_token secret

Checking in all work done in Feb
pull/241/head
anderson.dang 2 months ago
parent
commit
d0952b1022
  1. 587
      week1/app.ipynb
  2. 663
      week1/day1.ipynb
  3. 336
      week1/day2 EXERCISE.ipynb
  4. 3504
      week1/day5.ipynb
  5. 173
      week1/week1 EXERCISE.ipynb
  6. 483
      week2/day1.ipynb
  7. 520
      week2/day2.ipynb
  8. 192
      week2/day3.ipynb
  9. 146
      week2/day4.ipynb
  10. 351
      week2/day5.ipynb

587
week1/app.ipynb

@ -0,0 +1,587 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: python-dotenv in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (1.0.1)\n",
"Requirement already satisfied: bs4 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (0.0.2)\n",
"Requirement already satisfied: google-generativeai in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (0.8.4)\n",
"Requirement already satisfied: ipython in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (8.32.0)\n",
"Requirement already satisfied: openai in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (1.61.1)\n",
"Requirement already satisfied: beautifulsoup4 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from bs4) (4.13.3)\n",
"Requirement already satisfied: google-ai-generativelanguage==0.6.15 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (0.6.15)\n",
"Requirement already satisfied: google-api-core in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (2.24.1)\n",
"Requirement already satisfied: google-api-python-client in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (2.160.0)\n",
"Requirement already satisfied: google-auth>=2.15.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (2.38.0)\n",
"Requirement already satisfied: protobuf in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (5.29.3)\n",
"Requirement already satisfied: pydantic in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (2.10.6)\n",
"Requirement already satisfied: tqdm in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (4.67.1)\n",
"Requirement already satisfied: typing-extensions in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-generativeai) (4.12.2)\n",
"Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-ai-generativelanguage==0.6.15->google-generativeai) (1.26.0)\n",
"Requirement already satisfied: decorator in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (5.1.1)\n",
"Requirement already satisfied: jedi>=0.16 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (0.19.2)\n",
"Requirement already satisfied: matplotlib-inline in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (0.1.7)\n",
"Requirement already satisfied: pexpect>4.3 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (4.9.0)\n",
"Requirement already satisfied: prompt_toolkit<3.1.0,>=3.0.41 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (3.0.50)\n",
"Requirement already satisfied: pygments>=2.4.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (2.19.1)\n",
"Requirement already satisfied: stack_data in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (0.6.3)\n",
"Requirement already satisfied: traitlets>=5.13.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from ipython) (5.14.3)\n",
"Requirement already satisfied: anyio<5,>=3.5.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from openai) (4.8.0)\n",
"Requirement already satisfied: distro<2,>=1.7.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from openai) (1.9.0)\n",
"Requirement already satisfied: httpx<1,>=0.23.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from openai) (0.28.1)\n",
"Requirement already satisfied: jiter<1,>=0.4.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from openai) (0.8.2)\n",
"Requirement already satisfied: sniffio in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from openai) (1.3.1)\n",
"Requirement already satisfied: idna>=2.8 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anyio<5,>=3.5.0->openai) (3.10)\n",
"Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-api-core->google-generativeai) (1.67.0rc1)\n",
"Requirement already satisfied: requests<3.0.0.dev0,>=2.18.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-api-core->google-generativeai) (2.32.3)\n",
"Requirement already satisfied: cachetools<6.0,>=2.0.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-auth>=2.15.0->google-generativeai) (5.5.1)\n",
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-auth>=2.15.0->google-generativeai) (0.4.1)\n",
"Requirement already satisfied: rsa<5,>=3.1.4 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-auth>=2.15.0->google-generativeai) (4.9)\n",
"Requirement already satisfied: certifi in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpx<1,>=0.23.0->openai) (2025.1.31)\n",
"Requirement already satisfied: httpcore==1.* in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpx<1,>=0.23.0->openai) (1.0.7)\n",
"Requirement already satisfied: h11<0.15,>=0.13 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.14.0)\n",
"Requirement already satisfied: parso<0.9.0,>=0.8.4 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from jedi>=0.16->ipython) (0.8.4)\n",
"Requirement already satisfied: ptyprocess>=0.5 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pexpect>4.3->ipython) (0.7.0)\n",
"Requirement already satisfied: wcwidth in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from prompt_toolkit<3.1.0,>=3.0.41->ipython) (0.2.13)\n",
"Requirement already satisfied: annotated-types>=0.6.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pydantic->google-generativeai) (0.7.0)\n",
"Requirement already satisfied: pydantic-core==2.27.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pydantic->google-generativeai) (2.27.2)\n",
"Requirement already satisfied: soupsieve>1.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from beautifulsoup4->bs4) (2.6)\n",
"Requirement already satisfied: httplib2<1.dev0,>=0.19.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-api-python-client->google-generativeai) (0.22.0)\n",
"Requirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-api-python-client->google-generativeai) (0.2.0)\n",
"Requirement already satisfied: uritemplate<5,>=3.0.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-api-python-client->google-generativeai) (4.1.1)\n",
"Requirement already satisfied: executing>=1.2.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from stack_data->ipython) (2.2.0)\n",
"Requirement already satisfied: asttokens>=2.1.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from stack_data->ipython) (3.0.0)\n",
"Requirement already satisfied: pure-eval in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from stack_data->ipython) (0.2.3)\n",
"Requirement already satisfied: grpcio<2.0dev,>=1.33.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.10.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,<3.0.0dev,>=1.34.1->google-ai-generativelanguage==0.6.15->google-generativeai) (1.70.0)\n",
"Requirement already satisfied: grpcio-status<2.0.dev0,>=1.33.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.10.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,<3.0.0dev,>=1.34.1->google-ai-generativelanguage==0.6.15->google-generativeai) (1.70.0)\n",
"Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httplib2<1.dev0,>=0.19.0->google-api-python-client->google-generativeai) (3.2.1)\n",
"Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pyasn1-modules>=0.2.1->google-auth>=2.15.0->google-generativeai) (0.6.1)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core->google-generativeai) (3.4.1)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core->google-generativeai) (2.3.0)\n",
"OpenAI API Key exists and begins sk-proj-\n",
"Google API Key exists and begins AIzaSyDi\n",
"* Running on local URL: http://127.0.0.1:7860\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Analyzing Saviano, Co. from https://saviano.com\n",
"\n",
"***Using LLM googleai and modelname gemini-1.5-pro\n",
"\n",
"llm = googleai\n",
"modelname = gemini-1.5-pro\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Traceback (most recent call last):\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/grpc_helpers.py\", line 76, in error_remapped_callable\n",
" return callable_(*args, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/grpc/_interceptor.py\", line 277, in __call__\n",
" response, ignored_call = self._with_call(\n",
" ^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/grpc/_interceptor.py\", line 332, in _with_call\n",
" return call.result(), call\n",
" ^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/grpc/_channel.py\", line 440, in result\n",
" raise self\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/grpc/_interceptor.py\", line 315, in continuation\n",
" response, call = self._thunk(new_method).with_call(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/grpc/_channel.py\", line 1198, in with_call\n",
" return _end_unary_response_blocking(state, call, True, None)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/grpc/_channel.py\", line 1006, in _end_unary_response_blocking\n",
" raise _InactiveRpcError(state) # pytype: disable=not-instantiable\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:\n",
"\tstatus = StatusCode.UNAVAILABLE\n",
"\tdetails = \"failed to connect to all addresses; last error: UNAVAILABLE: ipv4:100.64.1.239:443: Failed to connect to remote host: Timeout occurred: FD shutdown\"\n",
"\tdebug_error_string = \"UNKNOWN:Error received from peer {grpc_message:\"failed to connect to all addresses; last error: UNAVAILABLE: ipv4:100.64.1.239:443: Failed to connect to remote host: Timeout occurred: FD shutdown\", grpc_status:14, created_time:\"2025-02-09T23:08:01.374562-08:00\"}\"\n",
">\n",
"\n",
"The above exception was the direct cause of the following exception:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/retry/retry_unary.py\", line 144, in retry_target\n",
" result = target()\n",
" ^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/timeout.py\", line 130, in func_with_timeout\n",
" return func(*args, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/grpc_helpers.py\", line 78, in error_remapped_callable\n",
" raise exceptions.from_grpc_error(exc) from exc\n",
"google.api_core.exceptions.ServiceUnavailable: 503 failed to connect to all addresses; last error: UNAVAILABLE: ipv4:100.64.1.239:443: Failed to connect to remote host: Timeout occurred: FD shutdown\n",
"\n",
"The above exception was the direct cause of the following exception:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n",
" response = await route_utils.call_process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n",
" output = await app.get_blocks().process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2088, in process_api\n",
" result = await self.call_function(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1647, in call_function\n",
" prediction = await utils.async_iteration(iterator)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/utils.py\", line 728, in async_iteration\n",
" return await anext(iterator)\n",
" ^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/utils.py\", line 722, in __anext__\n",
" return await anyio.to_thread.run_sync(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n",
" return await get_async_backend().run_sync_in_worker_thread(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n",
" return await future\n",
" ^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n",
" result = context.run(func, *args)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/utils.py\", line 705, in run_sync_iterator_async\n",
" return next(iterator)\n",
" ^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/gradio/utils.py\", line 866, in gen_wrapper\n",
" response = next(iterator)\n",
" ^^^^^^^^^^^^^^\n",
" File \"/var/folders/rw/g02dv0j93y9b_5lj8jhq5pwm0000gq/T/ipykernel_55999/2954596350.py\", line 233, in stream_model\n",
" yield from result\n",
" File \"/var/folders/rw/g02dv0j93y9b_5lj8jhq5pwm0000gq/T/ipykernel_55999/2954596350.py\", line 195, in stream_googleai\n",
" get_brochure_user_prompt(company_name,url,llm,modelname,temperature),\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/var/folders/rw/g02dv0j93y9b_5lj8jhq5pwm0000gq/T/ipykernel_55999/2954596350.py\", line 164, in get_brochure_user_prompt\n",
" user_prompt += get_all_details(url,llm,modelname,temperature)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/var/folders/rw/g02dv0j93y9b_5lj8jhq5pwm0000gq/T/ipykernel_55999/2954596350.py\", line 132, in get_all_details\n",
" links = get_links(url,llm,modelname,temperature)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/var/folders/rw/g02dv0j93y9b_5lj8jhq5pwm0000gq/T/ipykernel_55999/2954596350.py\", line 124, in get_links\n",
" response = model.generate_content(get_links_user_prompt(website),generation_config=genai.GenerationConfig(temperature=temperature,response_mime_type=\"application/json\"))\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/generativeai/generative_models.py\", line 331, in generate_content\n",
" response = self._client.generate_content(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/generative_service/client.py\", line 835, in generate_content\n",
" response = rpc(\n",
" ^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/gapic_v1/method.py\", line 131, in __call__\n",
" return wrapped_func(*args, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/retry/retry_unary.py\", line 293, in retry_wrapped_func\n",
" return retry_target(\n",
" ^^^^^^^^^^^^^\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/retry/retry_unary.py\", line 153, in retry_target\n",
" _retry_error_helper(\n",
" File \"/Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages/google/api_core/retry/retry_base.py\", line 221, in _retry_error_helper\n",
" raise final_exc from source_exc\n",
"google.api_core.exceptions.RetryError: Timeout of 600.0s exceeded, last exception: 503 failed to connect to all addresses; last error: UNAVAILABLE: ipv4:100.64.1.239:443: Failed to connect to remote host: Timeout occurred: FD shutdown\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Analyzing Saviano, Co. from https://saviano.com\n",
"\n",
"***Using LLM googleai and modelname gemini-1.5-pro\n",
"\n",
"llm = googleai\n",
"modelname = gemini-1.5-pro\n",
"## Love is in the Air (and on the Court!) ❤ Saviano Co.'s Valentine's Day Pickleball Pampering\n",
"\n",
"This Valentine's Day, don't let your pickleball court feel neglected! Show it some love with Saviano Co., the experts in court construction and maintenance since 1963. We're serving up satisfaction (and aces!) to clients nationally and globally.\n",
"\n",
"**Is your court feeling a little... *cracked*?** 💔\n",
"\n",
"Don't let those fissures in your playing surface become a relationship dealbreaker. We'll resurface your court with the tenderness of a first date, leaving it smooth and ready for action. From paving and grading to drainage and fencing, we handle every detail with the utmost care, just like your true love deserves.\n",
"\n",
"**Tired of that *faded* look?** 😞\n",
"\n",
"We'll give your court a Valentine's Day makeover it will never forget! Our expert resurfacing services will have it looking brand new and ready for some serious pickleball passion. We offer a range of athletic surfaces, so you can choose the perfect one to match your playing style (and your heart's desire!).\n",
"\n",
"**Let us *light up* your love life (and your court!)** ✨\n",
"\n",
"Our state-of-the-art lighting installations will transform your court into a romantic evening destination. Imagine playing under the stars with your sweetheart, showing off your dinking skills. (Just try not to get too competitive – it's Valentine's Day after all!)\n",
"\n",
"**Why choose Saviano Co. this Valentine's Day?**\n",
"\n",
"* **Family-owned and operated:** We treat every court like one of our own.\n",
"* **Decades of experience:** We've been building and maintaining courts since before pickleball was even a twinkle in someone's eye!\n",
"* **Nationally and globally recognized:** Our reputation speaks for itself (just like your impressive backhand).\n",
"* **Serving the Bay Area and beyond:** From Saratoga to Napa Valley and even across the globe, we're there for all your court needs.\n",
"\n",
"**Special Valentine's Day Offer:**\n",
"\n",
"Mention this brochure and receive a FREE consultation to discuss your court's needs. We'll help you create a court that's worthy of Cupid himself!\n",
"\n",
"**Don't let your pickleball court feel lonely this Valentine's Day. Contact Saviano Co. today!**\n",
"\n",
"**(650) 948-3274 | info@saviano.com**\n",
"\n",
"**www.saviano.com (website not real)**\n",
"\n",
"**Follow us on social media! (We're not really on social media...yet!)**\n",
"\n",
"Analyzing Saviano, Co. from https://saviano.com\n",
"\n",
"***Using LLM googleai and modelname gemini-1.5-flash\n",
"\n",
"llm = googleai\n",
"modelname = gemini-1.5-flash\n",
"# Saviano Co. Inc.: A Love Story for Your Pickleball Court!\n",
"\n",
"**(Image: A charming illustration of a couple playing pickleball on a beautifully maintained court, hearts floating in the air.)**\n",
"\n",
"**This Valentine's Day, give your pickleball court the love it deserves!**\n",
"\n",
"Is your pickleball court looking a little… *less than perfect*? Has romance faded from those once-vibrant lines? Don't despair! Saviano Co. Inc., established in 1963, is here to rekindle the passion. For over 60 years, we've been the leading experts in court construction and maintenance, bringing joy (and killer serves) to pickleball players everywhere.\n",
"\n",
"**We're not just builders; we're matchmakers!** We connect your vision with the reality of a stunning, flawlessly maintained pickleball court. Whether you need a brand new court built from scratch or a loving refresh for your existing one, we've got you covered.\n",
"\n",
"**Our Services: A Recipe for Love (and Great Pickleball):**\n",
"\n",
"* **Resurfacing:** Say goodbye to cracks and faded paint! We'll make your court smoother than a freshly-baked Valentine's Day cake.\n",
"* **Drainage:** No more soggy socks! We'll ensure your court stays dry and playable, rain or shine.\n",
"* **Lighting:** Set the mood for a romantic evening game under the stars (or just make sure you can see the ball!).\n",
"* **Fencing:** Keep those errant shots in play and your neighbors happy.\n",
"\n",
"**Why Choose Saviano Co. Inc.?**\n",
"\n",
"Because we're a family business with a passion for pickleball! We treat every court like our own—with meticulous care and attention to detail. Our clients range from prestigious country clubs to happy homeowners, all united by their love of the game.\n",
"\n",
"**(Image: A photo showcasing a beautifully resurfaced pickleball court.)**\n",
"\n",
"**Testimonials:**\n",
"\n",
"*\"Saviano Co. Inc. transformed our tired old court into a masterpiece! Now, our Valentine's Day pickleball games are even more romantic!\"* – Happy Homeowner, Saratoga\n",
"\n",
"*\"Saviano Co. Inc. is the only company we trust for our country club's pickleball courts. Their quality is unmatched!\"* – Country Club Manager, Napa Valley\n",
"\n",
"\n",
"**Ready to fall in love with your pickleball court all over again?**\n",
"\n",
"Contact us today for a free quote!\n",
"\n",
"**(Phone number, email, website)**\n",
"\n",
"**Saviano Co. Inc.: Building Courts, Building Memories, Building Love.**\n",
"\n"
]
}
],
"source": [
"!pip install python-dotenv bs4 google-generativeai ipython openai\n",
"\n",
"#\n",
"# # Full code\n",
"#\n",
"\n",
"# imports\n",
"# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n",
"\n",
"import os\n",
"import requests\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"import google.generativeai as genai\n",
"import gradio as gr\n",
"\n",
"\n",
"# Get secret key from huggingface space settings\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"openai_api_key = os.environ['OPENAI_API_KEY']\n",
"#anthropic_api_key = os.environ['ANTHROPIC_API_KEY']\n",
"google_api_key = os.environ['GOOGLE_API_KEY']\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
"\n",
"#if anthropic_api_key:\n",
"# print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"#else:\n",
"# print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")\n",
"\n",
"\n",
"# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n",
"\n",
"openai = OpenAI()\n",
"#claude = anthropic.Anthropic()\n",
"genai.configure(api_key=google_api_key)\n",
"\n",
"# A class to represent a Webpage\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
" \"\"\"\n",
" A utility class to represent a Website that we have scraped, now with links\n",
" \"\"\"\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" if soup.body:\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" self.text = \"\"\n",
" links = [link.get('href') for link in soup.find_all('a')]\n",
" self.links = [link for link in links if link]\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"\n",
"\n",
"# system prompt for getting links from website\n",
"\n",
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
"You are able to decide which of the links would be most relevant to include in a brochure about the company.\\n\"\n",
"link_system_prompt += \"You should respond in JSON as in this example:\"\n",
"link_system_prompt += \"\"\"\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
" ]\n",
"}\n",
"\"\"\"\n",
"\n",
"# user prompt for getting links from website\n",
"\n",
"def get_links_user_prompt(website):\n",
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
"Do not include Terms of Service, Privacy, email links.\\n\"\n",
" user_prompt += \"Links (some might be relative links):\\n\"\n",
" user_prompt += \"\\n\".join(website.links)\n",
" return user_prompt\n",
"\n",
"def get_links(url,llm,modelname,temperature):\n",
" website = Website(url)\n",
"\n",
" print(\"llm =\",llm)\n",
" print(\"modelname =\",modelname)\n",
"\n",
" match llm:\n",
" case 'openai':\n",
" response = openai.chat.completions.create(\n",
" model=modelname,\n",
" temperature=temperature,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" result = response.choices[0].message.content\n",
" case 'googleai':\n",
" model=genai.GenerativeModel(model_name=modelname,system_instruction=link_system_prompt)\n",
" response = model.generate_content(get_links_user_prompt(website),generation_config=genai.GenerationConfig(temperature=temperature,response_mime_type=\"application/json\"))\n",
" result = response.text\n",
"\n",
" return json.loads(result)\n",
"\n",
"def get_all_details(url,llm,modelname,temperature):\n",
" result = \"Landing page:\\n\"\n",
" result += Website(url).get_contents()\n",
" links = get_links(url,llm,modelname,temperature)\n",
" #print(\"Found links:\", links)\n",
" for link in links[\"links\"]:\n",
" result += f\"\\n\\n{link['type']}\\n\"\n",
" result += Website(link[\"url\"]).get_contents()\n",
" return result\n",
"\n",
"\n",
"\n",
"# system prompt for AI to create brochure\n",
"\n",
"\n",
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"# and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"# Include details of company culture, customers and careers/jobs if you have the information.\"\n",
"\n",
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n",
"\n",
"system_prompt = \"\"\"\n",
"You are an assistant that analyzes the contents of several relevant pages from a company website\n",
"and creates a short humorous, entertaining brochure about the company for prospective and previous customers.\n",
"Include details of company culture and customers if you have the information. Focus on work to create or maintain pickle ball courts.\n",
"Write in a theme for Valentine's Day.\n",
"Emphasize customers that may need upkeep and maintenance of their existing property and installations.\n",
"Return results in Markdown.\n",
"\"\"\"\n",
"\n",
"# Create user prompt with website details included\n",
"\n",
"def get_brochure_user_prompt(company_name,url,llm,modelname,temperature):\n",
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n",
" user_prompt += get_all_details(url,llm,modelname,temperature)\n",
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
" return user_prompt\n",
"\n",
"#\n",
"# # Everything below here uses values from the gradio web interface. Not the static values defined above\n",
"#\n",
"def stream_openai(system_prompt,company_name,url,llm,modelname,temperature):\n",
" response = openai.chat.completions.create(\n",
" model=modelname,\n",
" stream=True,\n",
" temperature=temperature,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name,url,llm,modelname,temperature)}\n",
" ],\n",
" )\n",
"\n",
" result = \"\"\n",
" for chunk in response:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result\n",
"\n",
" print(result)\n",
"\n",
"\n",
"def stream_googleai(system_prompt,company_name,url,llm,modelname,temperature):\n",
" model=genai.GenerativeModel(\n",
" model_name=modelname,\n",
" system_instruction=system_prompt)\n",
" responses = model.generate_content(\n",
" get_brochure_user_prompt(company_name,url,llm,modelname,temperature),\n",
" generation_config = genai.GenerationConfig(\n",
" temperature=temperature\n",
" ),\n",
" stream=True\n",
" )\n",
"\n",
" result = \"\"\n",
" for response in responses:\n",
" result += response.text\n",
" yield result\n",
"\n",
" print(result)\n",
"\n",
"\n",
"def stream_model(system_prompt,company_name,url,llm_model,temperature):\n",
"\n",
" print(\"Analyzing %s from %s\" % (company_name,url))\n",
"\n",
" match llm_model:\n",
" case 'openai_gpt-4o-mini':\n",
" llm = \"openai\"\n",
" modelname = \"gpt-4o-mini\"\n",
" print(\"\\n*** Using LLM %s and modelname %s\\n\" % (llm,modelname))\n",
" result = stream_openai(system_prompt,company_name,url,llm,modelname,temperature)\n",
" case 'googleai_gemini-1.5-pro':\n",
" llm = \"googleai\"\n",
" modelname = \"gemini-1.5-pro\"\n",
" print(\"\\n***Using LLM %s and modelname %s\\n\" % (llm,modelname))\n",
" result = stream_googleai(system_prompt,company_name,url,llm,modelname,temperature)\n",
" case 'googleai_gemini-1.5-flash':\n",
" llm = \"googleai\"\n",
" modelname = \"gemini-1.5-flash\"\n",
" print(\"\\n***Using LLM %s and modelname %s\\n\" % (llm,modelname))\n",
" result = stream_googleai(system_prompt,company_name,url,llm,modelname,temperature)\n",
" case _:\n",
" raise ValueError(\"Unknown LLM and MODEL combination\")\n",
"\n",
" yield from result\n",
"\n",
"\n",
"view = gr.Interface(\n",
" fn=stream_model,\n",
" inputs=[\n",
" gr.Textbox(label=\"AI Agent\",value=system_prompt),\n",
" gr.Textbox(label=\"Company Name\", value=\"Saviano, Co.\"),\n",
" gr.Textbox(label=\"Website to analyze\", value=\"https://saviano.com\"),\n",
" gr.Dropdown([\"openai_gpt-4o-mini\", \"googleai_gemini-1.5-pro\", \"googleai_gemini-1.5-flash\"], label=\"Select AI Engine and LLM Model\", value=\"googleai_gemini-1.5-pro\"),\n",
" gr.Slider(0,2,value=0.7,label=\"AI Temperature\",info=\"AI Creativity Level (higher = more creative)\")\n",
" ],\n",
" outputs=[\n",
" gr.Markdown(label=\"Response:\")\n",
" ],\n",
" flagging_mode=\"never\"\n",
")\n",
"\n",
"view.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llms",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

663
week1/day1.ipynb

@ -72,20 +72,73 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: pip in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (25.0)\n",
"Note: you may need to restart the kernel to use updated packages.\n",
"Requirement already satisfied: python-dotenv in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (1.0.1)\n",
"Note: you may need to restart the kernel to use updated packages.\n",
"Requirement already satisfied: openai in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (1.60.2)\n",
"Requirement already satisfied: anyio<5,>=3.5.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (4.8.0)\n",
"Requirement already satisfied: distro<2,>=1.7.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (1.9.0)\n",
"Requirement already satisfied: httpx<1,>=0.23.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (0.28.1)\n",
"Requirement already satisfied: jiter<1,>=0.4.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (0.8.2)\n",
"Requirement already satisfied: pydantic<3,>=1.9.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (2.10.6)\n",
"Requirement already satisfied: sniffio in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (1.3.1)\n",
"Requirement already satisfied: tqdm>4 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (4.67.1)\n",
"Requirement already satisfied: typing-extensions<5,>=4.11 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from openai) (4.12.2)\n",
"Requirement already satisfied: idna>=2.8 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from anyio<5,>=3.5.0->openai) (3.10)\n",
"Requirement already satisfied: certifi in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpx<1,>=0.23.0->openai) (2024.12.14)\n",
"Requirement already satisfied: httpcore==1.* in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpx<1,>=0.23.0->openai) (1.0.7)\n",
"Requirement already satisfied: h11<0.15,>=0.13 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.14.0)\n",
"Requirement already satisfied: annotated-types>=0.6.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from pydantic<3,>=1.9.0->openai) (0.7.0)\n",
"Requirement already satisfied: pydantic-core==2.27.2 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from pydantic<3,>=1.9.0->openai) (2.27.2)\n",
"Note: you may need to restart the kernel to use updated packages.\n",
"Requirement already satisfied: requests in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (2.32.3)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from requests) (3.4.1)\n",
"Requirement already satisfied: idna<4,>=2.5 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from requests) (3.10)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from requests) (2.3.0)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from requests) (2024.12.14)\n",
"Note: you may need to restart the kernel to use updated packages.\n",
"Requirement already satisfied: bs4 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (0.0.2)\n",
"Requirement already satisfied: beautifulsoup4 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from bs4) (4.12.3)\n",
"Requirement already satisfied: soupsieve>1.2 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from beautifulsoup4->bs4) (2.6)\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [ "source": [
"%pip install --upgrade pip\n",
"\n",
"# imports\n", "# imports\n",
"\n", "\n",
"%pip install python-dotenv\n",
"%pip install openai\n",
"%pip install requests\n",
"%pip install bs4\n",
"\n",
"import os\n", "import os\n",
"import requests\n", "import requests\n",
"from dotenv import load_dotenv\n", "from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n", "from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n", "from IPython.display import Markdown, display\n",
"from openai import OpenAI\n", "from openai import OpenAI\n",
"\n", "\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "07a52a16",
"metadata": {},
"outputs": [],
"source": [
"# If you get an error running this cell, then please head over to the troubleshooting notebook!" "# If you get an error running this cell, then please head over to the troubleshooting notebook!"
] ]
}, },
@ -111,10 +164,18 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d", "id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [ "source": [
"# Load environment variables in a file called .env\n", "# Load environment variables in a file called .env\n",
"\n", "\n",
@ -135,7 +196,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -156,10 +217,18 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 4,
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hello! Welcome! I'm glad you're here. How can I assist you today?\n"
]
}
],
"source": [ "source": [
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n", "# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
"\n", "\n",
@ -178,7 +247,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 5,
"id": "c5e793b2-6775-426a-a139-4848291d0463", "id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -208,14 +277,198 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Yahoo | Mail, Weather, Search, Politics, News, Finance, Sports & Videos\n",
"Search query\n",
"Search the web\n",
"News\n",
"Finance\n",
"Sports\n",
"More\n",
"Manage your account\n",
"Help\n",
"Add or switch accounts\n",
"Sign out\n",
"Search the web\n",
"Advertisement\n",
"Advertisement\n",
"Advertisement\n",
"Trending\n",
"4 min ago\n",
"1\n",
"Medicaid Freeze\n",
"Trump freezes federal grants with big potential impact on providers, Medicaid\n",
"2\n",
"Caroline Kennedy\n",
"Caroline Kennedy calls cousin, RFK Jr., a 'predator' before HHS hearing\n",
"3\n",
"Jim Acosta\n",
"Jim Acosta Wasn’t Fired: Here’s Why He Left CNN\n",
"4\n",
"Gulf Of America\n",
"Maps are already being changed to 'Gulf of America.' It's not a simple process.\n",
"5\n",
"Buffalo Bills\n",
"What's next for the Bills? What Buffalo needs to address in the 2025 offseason\n",
"Politics\n",
"Politico\n",
"5.9K\n",
"New White House memo details programs spared from funding freeze\n",
"1\n",
"of 17\n",
"Lifestyle\n",
"Yahoo Life Shopping\n",
"Chefs agree: This is hands-down the best store-bought pasta sauce\n",
"2\n",
"of 17\n",
"Business\n",
"CNN\n",
"545\n",
"Boom Supersonic test flight breaks the sound barrier for 1st time\n",
"3\n",
"of 17\n",
"Lifestyle\n",
"Yahoo Life Shopping\n",
"Want to winter-proof your home? Here's how to do it in 6 easy steps.\n",
"4\n",
"of 17\n",
"Business\n",
"CNN Business\n",
"870\n",
"Why you still need to pay for your home if it was destroyed in a disaster\n",
"5\n",
"of 17\n",
"Politics\n",
"NBC News\n",
"2.9K\n",
"Google to change map names for Gulf of Mexico and Denali in U.S.\n",
"6\n",
"of 17\n",
"Sports\n",
"FTW Outdoors\n",
"Fox's Greg Olsen was demoted in favor of Tom Brady. Yes, he's frustrated.\n",
"7\n",
"of 17\n",
"Politics\n",
"Entertainment Weekly\n",
"2.3K\n",
"Snoop Dogg responds to 'all the hate' after inauguration weekend performance\n",
"8\n",
"of 17\n",
"Politics\n",
"CBS News\n",
"4.3K\n",
"White House unveils first lady Melania Trump's official portrait\n",
"9\n",
"of 17\n",
"Politics\n",
"LA Times\n",
"880\n",
"News anchor who sparred with Trump announces departure from CNN\n",
"10\n",
"of 17\n",
"Entertainment\n",
"People\n",
"'Chilling': Will Ferrell meets 'really disturbing' life-size Buddy the Elf\n",
"11\n",
"of 17\n",
"Politics\n",
"The Independent\n",
"632\n",
"Caroline Kennedy calls cousin RFK Jr. a 'predator' in scathing letter to senators\n",
"12\n",
"of 17\n",
"Business\n",
"CNN Business\n",
"614\n",
"AI chatbot named Lucie goes offline after embarrassing errors go viral\n",
"13\n",
"of 17\n",
"Celebrity\n",
"Elle\n",
"140\n",
"Lady Gaga shares plans to start a family with fiancé Michael Polansky\n",
"14\n",
"of 17\n",
"Celebrity\n",
"Entertainment Weekly\n",
"A jury thought 'Legally Blonde' star Witherspoon actually went to law school\n",
"15\n",
"of 17\n",
"Politics\n",
"CBS News\n",
"3.2K\n",
"Immigration crackdown also targets legal migration channels\n",
"16\n",
"of 17\n",
"Entertainment\n",
"Billboard\n",
"161\n",
"Coldplay breaks record for largest stadium shows of the century\n",
"17\n",
"of 17\n",
"1\n",
"of 17\n",
"America gamed the law of the sea — and it could be worth trillions of dollars\n",
"The Atlantic\n",
"Justin Baldoni apologizes to Blake Lively in leaked 7-minute voice note\n",
"People\n",
"903\n",
"'Shrimp fraud' found to be rampant at many Gulf Coast restaurants\n",
"USA TODAY\n",
"1.7K\n",
"For You\n",
"Celebrity\n",
"DJ Unk's cause of death confirmed by wife after 'Walk It Out' rapper dies at 43\n",
"LA Times\n",
"606\n",
"Advertisement\n",
"Weekday Newsletter\n",
"The Yodel\n",
"Rise and shine with news updates, delightful stories and a hit of nostalgia.\n",
"Sign up\n",
"By signing up, you agree to our\n",
"Terms\n",
"and\n",
"Privacy Policy\n",
".\n",
"Advertisement\n",
"Weekly Newsletter\n",
"It List\n",
"Plan what to watch, listen to and read with a guide to what's new and notable in entertainment.\n",
"Sign up\n",
"By signing up, you agree to our\n",
"Terms\n",
"and\n",
"Privacy Policy\n",
".\n",
"Advertise\n",
"About Our Ads\n",
"Careers\n",
"Help\n",
"Feedback\n",
"© 2025 Yahoo. All rights reserved.\n",
"Advertisement\n",
"Advertise\n",
"About Our Ads\n",
"Careers\n",
"Help\n",
"Feedback\n",
"© 2025 Yahoo. All rights reserved.\n"
]
}
],
"source": [ "source": [
"# Let's try one out. Change the website and add print statements to follow along.\n", "# Let's try one out. Change the website and add print statements to follow along.\n",
"\n", "\n",
"ed = Website(\"https://edwarddonner.com\")\n", "ed = Website(\"https://yahoo.com\")\n",
"print(ed.title)\n", "print(ed.title)\n",
"print(ed.text)" "print(ed.text)"
] ]
@ -240,7 +493,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 7,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699", "id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -254,7 +507,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 11,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -272,10 +525,196 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 12,
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e", "id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You are looking at a website titled Yahoo | Mail, Weather, Search, Politics, News, Finance, Sports & Videos\n",
"The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n",
"\n",
"Search query\n",
"Search the web\n",
"News\n",
"Finance\n",
"Sports\n",
"More\n",
"Manage your account\n",
"Help\n",
"Add or switch accounts\n",
"Sign out\n",
"Search the web\n",
"Advertisement\n",
"Advertisement\n",
"Advertisement\n",
"Trending\n",
"4 min ago\n",
"1\n",
"Medicaid Freeze\n",
"Trump freezes federal grants with big potential impact on providers, Medicaid\n",
"2\n",
"Caroline Kennedy\n",
"Caroline Kennedy calls cousin, RFK Jr., a 'predator' before HHS hearing\n",
"3\n",
"Jim Acosta\n",
"Jim Acosta Wasn’t Fired: Here’s Why He Left CNN\n",
"4\n",
"Gulf Of America\n",
"Maps are already being changed to 'Gulf of America.' It's not a simple process.\n",
"5\n",
"Buffalo Bills\n",
"What's next for the Bills? What Buffalo needs to address in the 2025 offseason\n",
"Politics\n",
"Politico\n",
"5.9K\n",
"New White House memo details programs spared from funding freeze\n",
"1\n",
"of 17\n",
"Lifestyle\n",
"Yahoo Life Shopping\n",
"Chefs agree: This is hands-down the best store-bought pasta sauce\n",
"2\n",
"of 17\n",
"Business\n",
"CNN\n",
"545\n",
"Boom Supersonic test flight breaks the sound barrier for 1st time\n",
"3\n",
"of 17\n",
"Lifestyle\n",
"Yahoo Life Shopping\n",
"Want to winter-proof your home? Here's how to do it in 6 easy steps.\n",
"4\n",
"of 17\n",
"Business\n",
"CNN Business\n",
"870\n",
"Why you still need to pay for your home if it was destroyed in a disaster\n",
"5\n",
"of 17\n",
"Politics\n",
"NBC News\n",
"2.9K\n",
"Google to change map names for Gulf of Mexico and Denali in U.S.\n",
"6\n",
"of 17\n",
"Sports\n",
"FTW Outdoors\n",
"Fox's Greg Olsen was demoted in favor of Tom Brady. Yes, he's frustrated.\n",
"7\n",
"of 17\n",
"Politics\n",
"Entertainment Weekly\n",
"2.3K\n",
"Snoop Dogg responds to 'all the hate' after inauguration weekend performance\n",
"8\n",
"of 17\n",
"Politics\n",
"CBS News\n",
"4.3K\n",
"White House unveils first lady Melania Trump's official portrait\n",
"9\n",
"of 17\n",
"Politics\n",
"LA Times\n",
"880\n",
"News anchor who sparred with Trump announces departure from CNN\n",
"10\n",
"of 17\n",
"Entertainment\n",
"People\n",
"'Chilling': Will Ferrell meets 'really disturbing' life-size Buddy the Elf\n",
"11\n",
"of 17\n",
"Politics\n",
"The Independent\n",
"632\n",
"Caroline Kennedy calls cousin RFK Jr. a 'predator' in scathing letter to senators\n",
"12\n",
"of 17\n",
"Business\n",
"CNN Business\n",
"614\n",
"AI chatbot named Lucie goes offline after embarrassing errors go viral\n",
"13\n",
"of 17\n",
"Celebrity\n",
"Elle\n",
"140\n",
"Lady Gaga shares plans to start a family with fiancé Michael Polansky\n",
"14\n",
"of 17\n",
"Celebrity\n",
"Entertainment Weekly\n",
"A jury thought 'Legally Blonde' star Witherspoon actually went to law school\n",
"15\n",
"of 17\n",
"Politics\n",
"CBS News\n",
"3.2K\n",
"Immigration crackdown also targets legal migration channels\n",
"16\n",
"of 17\n",
"Entertainment\n",
"Billboard\n",
"161\n",
"Coldplay breaks record for largest stadium shows of the century\n",
"17\n",
"of 17\n",
"1\n",
"of 17\n",
"America gamed the law of the sea — and it could be worth trillions of dollars\n",
"The Atlantic\n",
"Justin Baldoni apologizes to Blake Lively in leaked 7-minute voice note\n",
"People\n",
"903\n",
"'Shrimp fraud' found to be rampant at many Gulf Coast restaurants\n",
"USA TODAY\n",
"1.7K\n",
"For You\n",
"Celebrity\n",
"DJ Unk's cause of death confirmed by wife after 'Walk It Out' rapper dies at 43\n",
"LA Times\n",
"606\n",
"Advertisement\n",
"Weekday Newsletter\n",
"The Yodel\n",
"Rise and shine with news updates, delightful stories and a hit of nostalgia.\n",
"Sign up\n",
"By signing up, you agree to our\n",
"Terms\n",
"and\n",
"Privacy Policy\n",
".\n",
"Advertisement\n",
"Weekly Newsletter\n",
"It List\n",
"Plan what to watch, listen to and read with a guide to what's new and notable in entertainment.\n",
"Sign up\n",
"By signing up, you agree to our\n",
"Terms\n",
"and\n",
"Privacy Policy\n",
".\n",
"Advertise\n",
"About Our Ads\n",
"Careers\n",
"Help\n",
"Feedback\n",
"© 2025 Yahoo. All rights reserved.\n",
"Advertisement\n",
"Advertise\n",
"About Our Ads\n",
"Careers\n",
"Help\n",
"Feedback\n",
"© 2025 Yahoo. All rights reserved.\n"
]
}
],
"source": [ "source": [
"print(user_prompt_for(ed))" "print(user_prompt_for(ed))"
] ]
@ -301,7 +740,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 13,
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -314,10 +753,18 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 14,
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce", "id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Oh, I don’t know, maybe 22? Just kidding—it's 4. Basic math never goes out of style!\n"
]
}
],
"source": [ "source": [
"# To give you a preview -- calling OpenAI with system and user messages:\n", "# To give you a preview -- calling OpenAI with system and user messages:\n",
"\n", "\n",
@ -335,7 +782,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 15,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -351,10 +798,24 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 16,
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/plain": [
"[{'role': 'system',\n",
" 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n",
" {'role': 'user',\n",
" 'content': \"You are looking at a website titled Yahoo | Mail, Weather, Search, Politics, News, Finance, Sports & Videos\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nSearch query\\nSearch the web\\nNews\\nFinance\\nSports\\nMore\\nManage your account\\nHelp\\nAdd or switch accounts\\nSign out\\nSearch the web\\nAdvertisement\\nAdvertisement\\nAdvertisement\\nTrending\\n4 min ago\\n1\\nMedicaid Freeze\\nTrump freezes federal grants with big potential impact on providers, Medicaid\\n2\\nCaroline Kennedy\\nCaroline Kennedy calls cousin, RFK Jr., a 'predator' before HHS hearing\\n3\\nJim Acosta\\nJim Acosta Wasn’t Fired: Here’s Why He Left CNN\\n4\\nGulf Of America\\nMaps are already being changed to 'Gulf of America.' It's not a simple process.\\n5\\nBuffalo Bills\\nWhat's next for the Bills? What Buffalo needs to address in the 2025 offseason\\nPolitics\\nPolitico\\n5.9K\\nNew White House memo details programs spared from funding freeze\\n1\\nof 17\\nLifestyle\\nYahoo Life Shopping\\nChefs agree: This is hands-down the best store-bought pasta sauce\\n2\\nof 17\\nBusiness\\nCNN\\n545\\nBoom Supersonic test flight breaks the sound barrier for 1st time\\n3\\nof 17\\nLifestyle\\nYahoo Life Shopping\\nWant to winter-proof your home? Here's how to do it in 6 easy steps.\\n4\\nof 17\\nBusiness\\nCNN Business\\n870\\nWhy you still need to pay for your home if it was destroyed in a disaster\\n5\\nof 17\\nPolitics\\nNBC News\\n2.9K\\nGoogle to change map names for Gulf of Mexico and Denali in U.S.\\n6\\nof 17\\nSports\\nFTW Outdoors\\nFox's Greg Olsen was demoted in favor of Tom Brady. Yes, he's frustrated.\\n7\\nof 17\\nPolitics\\nEntertainment Weekly\\n2.3K\\nSnoop Dogg responds to 'all the hate' after inauguration weekend performance\\n8\\nof 17\\nPolitics\\nCBS News\\n4.3K\\nWhite House unveils first lady Melania Trump's official portrait\\n9\\nof 17\\nPolitics\\nLA Times\\n880\\nNews anchor who sparred with Trump announces departure from CNN\\n10\\nof 17\\nEntertainment\\nPeople\\n'Chilling': Will Ferrell meets 'really disturbing' life-size Buddy the Elf\\n11\\nof 17\\nPolitics\\nThe Independent\\n632\\nCaroline Kennedy calls cousin RFK Jr. a 'predator' in scathing letter to senators\\n12\\nof 17\\nBusiness\\nCNN Business\\n614\\nAI chatbot named Lucie goes offline after embarrassing errors go viral\\n13\\nof 17\\nCelebrity\\nElle\\n140\\nLady Gaga shares plans to start a family with fiancé Michael Polansky\\n14\\nof 17\\nCelebrity\\nEntertainment Weekly\\nA jury thought 'Legally Blonde' star Witherspoon actually went to law school\\n15\\nof 17\\nPolitics\\nCBS News\\n3.2K\\nImmigration crackdown also targets legal migration channels\\n16\\nof 17\\nEntertainment\\nBillboard\\n161\\nColdplay breaks record for largest stadium shows of the century\\n17\\nof 17\\n1\\nof 17\\nAmerica gamed the law of the sea — and it could be worth trillions of dollars\\nThe Atlantic\\nJustin Baldoni apologizes to Blake Lively in leaked 7-minute voice note\\nPeople\\n903\\n'Shrimp fraud' found to be rampant at many Gulf Coast restaurants\\nUSA TODAY\\n1.7K\\nFor You\\nCelebrity\\nDJ Unk's cause of death confirmed by wife after 'Walk It Out' rapper dies at 43\\nLA Times\\n606\\nAdvertisement\\nWeekday Newsletter\\nThe Yodel\\nRise and shine with news updates, delightful stories and a hit of nostalgia.\\nSign up\\nBy signing up, you agree to our\\nTerms\\nand\\nPrivacy Policy\\n.\\nAdvertisement\\nWeekly Newsletter\\nIt List\\nPlan what to watch, listen to and read with a guide to what's new and notable in entertainment.\\nSign up\\nBy signing up, you agree to our\\nTerms\\nand\\nPrivacy Policy\\n.\\nAdvertise\\nAbout Our Ads\\nCareers\\nHelp\\nFeedback\\n© 2025 Yahoo. All rights reserved.\\nAdvertisement\\nAdvertise\\nAbout Our Ads\\nCareers\\nHelp\\nFeedback\\n© 2025 Yahoo. All rights reserved.\"}]"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"# Try this out, and then try for a few more websites\n", "# Try this out, and then try for a few more websites\n",
"\n", "\n",
@ -371,7 +832,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 17,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -392,14 +853,25 @@
"execution_count": null, "execution_count": null,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/plain": [
"\"# Summary of Intuit®: Complete Financial Confidence\\n\\nIntuit's website provides a comprehensive overview of its various products and solutions aimed at enhancing financial confidence for individuals and businesses. Key offerings include:\\n\\n- **Products**: \\n - **TurboTax**: Tax preparation services for individuals and businesses.\\n - **QuickBooks**: Accounting software tailored for small businesses.\\n - **Credit Karma**: Financial product recommendations and credit tracking.\\n - **Mailchimp**: Marketing automation tools.\\n - **Intuit Assist**: An AI-powered tool to enhance user experience.\\n\\n- **Solutions**: Specialized services for accountants, educators, and enterprises.\\n\\n- **Support and Blogs**: Access to product support and a range of blogs focused on personal finance, business management, and tax preparation.\\n\\n- **Company Information**: Insight into Intuit’s corporate responsibility, investor relations, career opportunities, and technology innovations.\\n\\nThe site emphasizes Intuit's commitment to providing tools and resources for achieving complete financial confidence.\\n\\n## News or Announcements\\nNo specific news or announcements were indicated in the content provided.\""
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"summarize(\"https://edwarddonner.com\")" "summarize(\"https://edwarddonner.com\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 19,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342", "id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -413,10 +885,35 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 24,
"id": "3018853a-445f-41ff-9560-d925d1774b2f", "id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"# Summary of Edward Donner's Website\n",
"\n",
"The website \"Home - Edward Donner\" serves as a personal and professional platform for Ed Donner, who is a co-founder and CTO of Nebula.io. Nebula.io focuses on applying AI to improve talent discovery and engagement processes. Ed has a background in AI development, having previously founded the AI startup untapt, which was acquired in 2021. \n",
"\n",
"He enjoys writing code, experimenting with large language models (LLMs), and has interests in DJing and electronic music production. Additionally, he is active on Hacker News, engaging with topics related to technology and AI.\n",
"\n",
"## Recent Announcements\n",
"- **January 23, 2025**: Resources for the LLM Workshop focusing on hands-on experience with Agents.\n",
"- **December 21, 2024**: A welcome note for \"SuperDataScientists.\"\n",
"- **November 13, 2024**: Resources for \"Mastering AI and LLM Engineering.\"\n",
"- **October 16, 2024**: Resources for transitioning from Software Engineer to AI Data Scientist. \n",
"\n",
"Overall, the site emphasizes Ed's work with LLMs, ongoing projects, and resources for AI enthusiasts."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"display_summary(\"https://edwarddonner.com\")" "display_summary(\"https://edwarddonner.com\")"
] ]
@ -439,20 +936,72 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 25,
"id": "45d83403-a24c-44b5-84ac-961449b4008f", "id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"# Summary of CNN Website\n",
"\n",
"CNN provides a comprehensive platform for breaking news, featuring a variety of topics such as US and international news, politics, business, health, entertainment, sports, science, and more. \n",
"\n",
"## Key Highlights:\n",
"- **Current Events**: \n",
" - Ongoing coverage of significant issues like the Ukraine-Russia War and the Israel-Hamas War.\n",
" - Reports on the Trump administration's legal and political challenges, including a federal aid freeze impacting nonprofits.\n",
" - Latest updates on elections and political matters, with a focus on public health controversies and international relations.\n",
"\n",
"- **Media Features**: \n",
" - A mix of video content including CNN shorts, live TV, and podcasts covering various subjects.\n",
" - In-depth analyses and investigative stories about current events and societal issues.\n",
"\n",
"- **Lifestyle Content**: \n",
" - Lifestyle sections include health and wellness advice, travel recommendations, style trends, and entertainment updates.\n",
"\n",
"This website serves as a reliable source for up-to-date information and diverse content catering to a wide audience."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"display_summary(\"https://cnn.com\")" "display_summary(\"https://cnn.com\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 26,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7", "id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"# Summary of Anthropic's Website\n",
"\n",
"Anthropic is an AI safety and research company headquartered in San Francisco, committed to creating reliable and beneficial AI systems with safety as a core focus. The company provides the Claude AI model, with the latest iteration being Claude 3.5 Sonnet, which is designed to facilitate user interaction and enhance custom AI applications via API.\n",
"\n",
"## Recent Announcements\n",
"- **October 22, 2024**: Introduction of new AI models, Claude 3.5 Sonnet and Claude 3.5 Haiku, along with capabilities for computer use.\n",
"- **September 4, 2024**: Launch of Claude for Enterprise.\n",
"- **March 8, 2023**: Release of a research document outlining Anthropic's core views on AI safety.\n",
"\n",
"Anthropic promotes interdisciplinary collaboration, drawing expertise from fields such as machine learning, physics, policy, and product development to ensure their AI systems align with safety standards."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"display_summary(\"https://anthropic.com\")" "display_summary(\"https://anthropic.com\")"
] ]
@ -491,30 +1040,64 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 27,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6", "id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here's a table summarizing some of the newest restaurants with great reviews that offer reasonable prices. This table includes various types of cuisine, ethnicity, best dishes, price ranges, and popular desserts:\n",
"\n",
"| **Restaurant Name** | **Type of Food** | **Ethnicity** | **Best Dishes** | **Price Range** | **Top 3 Desserts** |\n",
"|----------------------|----------------------|---------------|-----------------------------------|-----------------|-------------------------------------|\n",
"| **Flavor Town** | American Comfort | American | BBQ Ribs, Mac & Cheese, Fried Chicken | $10 - $20 | Chocolate Lava Cake, Bread Pudding, Apple Pie |\n",
"| **Sushi Haven** | Japanese | Japanese | Dragon Roll, Ramen, Teriyaki Chicken | $12 - $25 | Mochi Ice Cream, Green Tea Cheesecake, Tempura Cheesecake |\n",
"| **Taco Fusion** | Tacos & Street Eats | Mexican | Spicy Chicken Tacos, Street Corn, Quesadillas | $8 - $15 | Churros, Tres Leches Cake, Flan |\n",
"| **Curry Palace** | Indian Cuisine | Indian | Butter Chicken, Vegetable Biryani, Naan | $10 - $22 | Gulab Jamun, Ras Malai, Kheer |\n",
"| **Noodle Nirvana** | Asian Noodles | Thai | Pad Thai, Green Curry, Spicy Noodles | $10 - $18 | Mango Sticky Rice, Coconut Pudding, Thai Lychee Salad |\n",
"| **Harvest Bowl** | Healthy & Organic | American | Quinoa Salad, Veggie Bowl, Buddha Bowl | $9 - $16 | Chocolate Avocado Mousse, Chia Pudding, Fruit Sorbet |\n",
"| **Pasta Perfection** | Italian | Italian | Fettuccine Alfredo, Margherita Pizza, Lasagna | $12 - $24 | Tiramisu, Cannoli, Panna Cotta |\n",
"| **Mediterranean Delights** | Mediterranean | Greek | Gyro Platter, Hummus & Pita, Greek Salad | $10 - $20 | Baklava, Galaktoboureko, Greek Yogurt with Honey |\n",
"| **Bistro Arabesque** | Middle Eastern | Lebanese | Shawarma Wrap, Tabouli, Lentil Soup | $10 - $18 | Ma'amoul, Rice Pudding, Date Squares |\n",
"| **Burger Haven** | Burgers & Grills | American | Classic Cheeseburger, Sweet Potato Fries, Shakes | $10 - $18 | Milkshake Float, Brownie Sundae, Cookie Dough Bites |\n",
"\n",
"### Note:\n",
"- This table features fictional restaurant names and dishes; if you're looking for real-time updates on actual restaurants, consider using reviews on platforms like Yelp, TripAdvisor, or Zomato for the latest openings, reviews, and detailed price ranges. \n",
"- Prices may vary based on location, portion sizes, and specific menu items.\n"
]
}
],
"source": [ "source": [
"# Step 1: Create your prompts\n", "# Step 1: Create your prompts\n",
"\n", "\n",
"system_prompt = \"something here\"\n", "system_prompt = \"\"\"\n",
"You are a foody in the San Francisco Bay Area and like to try the newest restaurants in the area outside of San Francisco. You are a smart food critique that takes cost, food quality and taste into consideration.\n",
"\"\"\"\n",
"user_prompt = \"\"\"\n", "user_prompt = \"\"\"\n",
" Lots of text\n", "Give me a table of the newest restaurants with the best reviews for a reasonable price. Include the types of food, the ethnicity, and their best dishes.\n",
" Can be pasted here\n", "Also include the price range of the dishes.\n",
"Include the top 3 desserts that are ordered in each restaurant.\n",
"\"\"\"\n", "\"\"\"\n",
"\n", "\n",
"# Step 2: Make the messages list\n", "# Step 2: Make the messages list\n",
"\n", "\n",
"messages = [] # fill this in\n", "messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt,\n",
" \"role\": \"user\", \"content\": user_prompt}\n",
"] # fill this in\n",
"\n", "\n",
"# Step 3: Call OpenAI\n", "# Step 3: Call OpenAI\n",
"\n", "\n",
"response =\n", "response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages\n",
" )\n",
"\n", "\n",
"# Step 4: print the result\n", "# Step 4: print the result\n",
"\n", "\n",
"print(" "print(response.choices[0].message.content)"
] ]
}, },
{ {
@ -545,7 +1128,7 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "venv",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },

336
week1/day2 EXERCISE.ipynb

@ -68,7 +68,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -82,7 +82,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "29ddd15d-a3c5-4f4e-a678-873f56162724", "id": "29ddd15d-a3c5-4f4e-a678-873f56162724",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -96,7 +96,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "dac0a679-599c-441f-9bf2-ddc73d35b940", "id": "dac0a679-599c-441f-9bf2-ddc73d35b940",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -110,7 +110,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 4,
"id": "7bb9c624-14f0-4945-a719-8ddb64f66f47", "id": "7bb9c624-14f0-4945-a719-8ddb64f66f47",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -124,10 +124,27 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 5,
"id": "479ff514-e8bd-4985-a572-2ea28bb4fa40", "id": "479ff514-e8bd-4985-a572-2ea28bb4fa40",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[?25lpulling manifest ⠋ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠙ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠹ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠸ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠼ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest \n",
"pulling dde5aa3fc5ff... 100% ▕████████████████▏ 2.0 GB \n",
"pulling 966de95ca8a6... 100% ▕████████████████▏ 1.4 KB \n",
"pulling fcc5a6bec9da... 100% ▕████████████████▏ 7.7 KB \n",
"pulling a70ff7e570d9... 100% ▕████████████████▏ 6.0 KB \n",
"pulling 56bb8bd477a5... 100% ▕████████████████▏ 96 B \n",
"pulling 34bb5ab01051... 100% ▕████████████████▏ 561 B \n",
"verifying sha256 digest \n",
"writing manifest \n",
"success \u001b[?25h\n"
]
}
],
"source": [ "source": [
"# Let's just make sure the model is loaded\n", "# Let's just make sure the model is loaded\n",
"\n", "\n",
@ -136,10 +153,61 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 6,
"id": "42b9f644-522d-4e05-a691-56e7658c0ea9", "id": "42b9f644-522d-4e05-a691-56e7658c0ea9",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generative AI has numerous business applications across various industries. Here are some examples:\n",
"\n",
"1. **Content Creation**: Generative AI can generate high-quality content such as images, videos, music, and text. This can be used for:\n",
" * Creating social media posts, blog articles, and other marketing materials\n",
" * Generating product descriptions, reviews, and testimonials\n",
" * Producing original artwork, graphics, and designs\n",
"2. **Marketing Automation**: Generative AI can automate repetitive marketing tasks, such as:\n",
" * Personalizing email campaigns and subject lines\n",
" * Creating personalized product recommendations\n",
" * Generating social media posts and ads\n",
"3. **Customer Service**: Generative AI can help customer service teams with tasks like:\n",
" * Responding to customer inquiries and support requests\n",
" * Generating automated responses for FAQs and common issues\n",
" * Providing personalized product recommendations and suggestions\n",
"4. **Sales Forecasting**: Generative AI can analyze historical data and market trends to generate sales forecasts, helping businesses:\n",
" * Predict revenue growth and identify areas of opportunity\n",
" * Optimize pricing strategies and inventory management\n",
" * Make informed decisions about investments and resource allocation\n",
"5. **Product Development**: Generative AI can assist product development teams with tasks like:\n",
" * Generating new product ideas and concepts\n",
" * Designing prototypes and 3D models\n",
" * Creating simulations and testing scenarios\n",
"6. **Financial Analysis**: Generative AI can analyze large datasets to generate insights and predictions, helping businesses:\n",
" * Identify trends and patterns in financial data\n",
" * Predict market fluctuations and economic indicators\n",
" * Optimize investment portfolios and risk management strategies\n",
"7. **Supply Chain Optimization**: Generative AI can help optimize supply chain operations by:\n",
" * Analyzing traffic patterns and predicting demand spikes\n",
" * Identifying optimal routes and inventory levels\n",
" * Optimizing logistics and transportation planning\n",
"8. **Predictive Maintenance**: Generative AI can analyze sensor data to predict equipment failures, helping businesses:\n",
" * Optimize maintenance schedules and reduce downtime\n",
" * Predict and prevent equipment failures\n",
" * Reduce costs and improve overall efficiency\n",
"9. **Customer Segmentation**: Generative AI can help identify high-value customer segments, enabling businesses to:\n",
" * Target specific demographics and interests with personalized marketing campaigns\n",
" * Develop targeted product offerings and services\n",
" * Improve customer retention and loyalty programs\n",
"10. **Intellectual Property Protection**: Generative AI can help protect intellectual property by:\n",
" * Generating unique and original content that avoids copyright infringement\n",
" * Analyzing competitors' content for potential IP risks\n",
" * Providing insights on potential patent applications\n",
"\n",
"These are just a few examples of the many business applications of Generative AI. As the technology continues to evolve, we can expect to see even more innovative uses across various industries.\n"
]
}
],
"source": [ "source": [
"# If this doesn't work for any reason, try the 2 versions in the following cells\n", "# If this doesn't work for any reason, try the 2 versions in the following cells\n",
"# And double check the instructions in the 'Recap on installation of Ollama' at the top of this lab\n", "# And double check the instructions in the 'Recap on installation of Ollama' at the top of this lab\n",
@ -163,11 +231,59 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"id": "7745b9c4-57dc-4867-9180-61fa5db55eb8", "id": "7745b9c4-57dc-4867-9180-61fa5db55eb8",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Collecting ollama\n",
" Downloading ollama-0.4.7-py3-none-any.whl.metadata (4.7 kB)\n",
"Requirement already satisfied: httpx<0.29,>=0.27 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from ollama) (0.28.1)\n",
"Requirement already satisfied: pydantic<3.0.0,>=2.9.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from ollama) (2.10.6)\n",
"Requirement already satisfied: anyio in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpx<0.29,>=0.27->ollama) (4.8.0)\n",
"Requirement already satisfied: certifi in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpx<0.29,>=0.27->ollama) (2024.12.14)\n",
"Requirement already satisfied: httpcore==1.* in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpx<0.29,>=0.27->ollama) (1.0.7)\n",
"Requirement already satisfied: idna in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpx<0.29,>=0.27->ollama) (3.10)\n",
"Requirement already satisfied: h11<0.15,>=0.13 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from httpcore==1.*->httpx<0.29,>=0.27->ollama) (0.14.0)\n",
"Requirement already satisfied: annotated-types>=0.6.0 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from pydantic<3.0.0,>=2.9.0->ollama) (0.7.0)\n",
"Requirement already satisfied: pydantic-core==2.27.2 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from pydantic<3.0.0,>=2.9.0->ollama) (2.27.2)\n",
"Requirement already satisfied: typing-extensions>=4.12.2 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from pydantic<3.0.0,>=2.9.0->ollama) (4.12.2)\n",
"Requirement already satisfied: sniffio>=1.1 in /Users/adang4/Documents/devwork/udemy/ai/llm_engineering/venv/lib/python3.11/site-packages (from anyio->httpx<0.29,>=0.27->ollama) (1.3.1)\n",
"Downloading ollama-0.4.7-py3-none-any.whl (13 kB)\n",
"Installing collected packages: ollama\n",
"Successfully installed ollama-0.4.7\n",
"Note: you may need to restart the kernel to use updated packages.\n",
"Generative AI has numerous business applications across various industries, including:\n",
"\n",
"1. **Content Generation**: AI can generate high-quality content such as articles, blog posts, social media posts, and product descriptions, freeing up human writers' time and increasing content volume.\n",
"2. **Image and Video Creation**: Generative AI can create realistic images, videos, and animations for advertising, marketing, and entertainment purposes.\n",
"3. **Chatbots and Virtual Assistants**: AI-powered chatbots can provide 24/7 customer support, answering frequently asked questions, and helping customers with basic queries.\n",
"4. **Personalized Recommendations**: Generative AI can analyze user behavior and preferences to offer personalized product recommendations, increasing sales and improving customer satisfaction.\n",
"5. **Predictive Maintenance**: AI can analyze sensor data from machines and equipment to predict maintenance needs, reducing downtime and increasing overall efficiency.\n",
"6. **Financial Modeling**: Generative AI can create complex financial models, forecast revenue, and identify investment opportunities, helping businesses make informed decisions.\n",
"7. **Marketing Automation**: AI-powered marketing automation platforms can help automate tasks such as email marketing, lead generation, and social media management.\n",
"8. **Influencer Marketing**: Generative AI can analyze social media data to suggest potential influencers for a brand's influencer marketing campaigns.\n",
"9. **Product Design**: AI can generate 3D models of products, reducing the time and cost associated with product design and development.\n",
"10. **Supply Chain Optimization**: Generative AI can analyze supply chain data to optimize inventory levels, predict demand, and identify bottlenecks in logistics.\n",
"\n",
"Some specific business use cases for Generative AI include:\n",
"\n",
"1. **Automated customer service**: Use generative AI to create chatbots that can handle simple customer inquiries.\n",
"2. **Content optimization**: Use generative AI to generate high-quality content that is optimized for search engines.\n",
"3. **Predictive sales forecasting**: Use generative AI to analyze historical data and predict future sales revenue.\n",
"4. **Automated marketing campaigns**: Use generative AI to create personalized marketing campaigns based on customer behavior.\n",
"5. **Product design and development**: Use generative AI to generate 3D models of products, reducing the time and cost associated with product design.\n",
"\n",
"These are just a few examples of the many business applications of Generative AI. As the technology continues to evolve, we can expect to see even more innovative use cases across various industries.\n"
]
}
],
"source": [ "source": [
"%pip install ollama\n",
"\n",
"import ollama\n", "import ollama\n",
"\n", "\n",
"response = ollama.chat(model=MODEL, messages=messages)\n", "response = ollama.chat(model=MODEL, messages=messages)\n",
@ -184,10 +300,40 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"id": "23057e00-b6fc-4678-93a9-6b31cb704bff", "id": "23057e00-b6fc-4678-93a9-6b31cb704bff",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generative AI has numerous business applications across various industries. Here are some examples:\n",
"\n",
"1. **Content Creation**: Companies can use generative AI to generate content such as blog posts, articles, social media posts, and product descriptions. This can help reduce content creation time and improve consistency.\n",
"2. **Marketing Automation**: Generative AI can be used to create personalized marketing campaigns by generating custom messages, offers, and target audiences based on customer data and preferences.\n",
"3. **Chatbots and Customer Service**: Generative AI-powered chatbots can simulate human-like conversations and provide instant support to customers, helping to improve the overall customer experience.\n",
"4. **Product Design and Development**: Generative AI can be used to generate new product ideas, design concepts, and prototypes, reducing the time and cost associated with traditional product development methods.\n",
"5. **Image and Video Editing**: Companies can use generative AI to edit images and videos automatically, such as removing noise or adjusting lighting and color balance.\n",
"6. **Music Composition**: Generative AI can be used to create music tracks, sound effects, and other audio content, helping musicians and composers to generate new ideas or finish unfinished projects.\n",
"7. **Financial Analysis**: Generative AI can be used to analyze financial data, identify trends, and predict market behavior, helping investors to make more informed decisions.\n",
"8. **Supply Chain Optimization**: Generative AI can be used to optimize supply chain operations by predicting demand, identifying bottlenecks, and suggesting new logistics routes.\n",
"9. **HR Recruitment**: Generative AI can be used to generate job descriptions, interview questions, and candidate feedback, helping human resources teams to streamline the recruitment process.\n",
"10. **Data augmentation**: Generative AI can be used to augment existing datasets, making them more diverse and representative, which can improve the accuracy of machine learning models.\n",
"11. **Recommendation Systems**: Generative AI can be used to generate personalized product recommendations, content suggestions, and travel plans based on customer behavior and preferences.\n",
"12. **Speech Recognition**: Generative AI-powered speech recognition systems can help improve the accuracy of voice-to-text transcriptions, audio transcription, and speech-based interfaces.\n",
"\n",
"Some specific business use cases for generative AI include:\n",
"\n",
"* **Netflix**: Using generative AI to predict user viewing habits and generate personalized content recommendations.\n",
"* **Walmart**: Using generative AI to create personalized marketing campaigns based on customer behavior and preferences.\n",
"* **IBM**: Using generative AI to create personalized healthcare treatment plans for patients.\n",
"* **Microsoft**: Using generative AI to improve the accuracy of speech-to-text transcriptions and audio transcription.\n",
"\n",
"These are just a few examples of the many potential business applications of generative AI. As this technology continues to evolve, we can expect to see even more innovative solutions in the future.\n"
]
}
],
"source": [ "source": [
"# There's actually an alternative approach that some people might prefer\n", "# There's actually an alternative approach that some people might prefer\n",
"# You can use the OpenAI client python library to call Ollama:\n", "# You can use the OpenAI client python library to call Ollama:\n",
@ -215,16 +361,176 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 14,
"id": "402d5686-4e76-4110-b65a-b3906c35c0a4", "id": "402d5686-4e76-4110-b65a-b3906c35c0a4",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": [
"# imports\n",
"\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "b214df93",
"metadata": {},
"outputs": [],
"source": [
"# Some websites need you to use proper headers when fetching them:\n",
"HEADERS = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\",\n",
" \"Content-Type\": \"application/json\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=HEADERS)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "0f2e95cb",
"metadata": {},
"outputs": [],
"source": [
"# Use openai api call to ollama_api\n",
"from openai import OpenAI\n",
"\n",
"# Using ollama python package\n",
"import ollama\n",
"\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
"\n",
" # 1. Use openai api to call ollama api on localhost\n",
" #MODEL = \"llama3.2\"\n",
" #openai_to_ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
" #response = openai_to_ollama.chat.completions.create( \n",
" # model=MODEL,\n",
" # messages=messages_for(website)\n",
" #)\n",
" #return response.choices[0].message.content\n",
"\n",
"\n",
" # 2. Use ollama python package to call ollama api on localhost\n",
" MODEL = \"llama3.2\"\n",
" response = ollama.chat(\n",
" model=MODEL,\n",
" messages=messages_for(website)\n",
" )\n",
" print(response['message']['content'])\n",
"\n",
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))\n",
"\n",
"#####\n"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "110c93bc",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here's a summary of the article:\n",
"\n",
"**US Markets Update**\n",
"\n",
"The US stock market closed for the day, with the Dow Jones Industrial Average (DJIA) down 485 points (-1.09%) and the Nasdaq Futures down 436 points (-2.02%). The S&P 500 futures were also lower by 93 points (-1.55%).\n",
"\n",
"**Top Gainers**\n",
"\n",
"* Viavi Solutions Inc. (VIAV): +2.05 (+20.52%)\n",
"* Atlassian Corporation (TEAM): +39.83 (+14.92%)\n",
"* AST SpaceMobile, Inc. (ASTS): +2.03 (+11.15%)\n",
"\n",
"**Top Losers**\n",
"\n",
"* Deckers Outdoor Corporation (DECK): -45.75 (-20.51%)\n",
"* Walgreens Boots Alliance, Inc. (WBA): -1.18 (-10.30%)\n",
"\n",
"**Economic Events**\n",
"\n",
"No major economic events were reported today.\n",
"\n",
"**Cryptocurrency Prices**\n",
"\n",
"The current prices for cryptocurrencies are:\n",
"\n",
"* Ethereum (ETH-USD): 2,817.09 - 322.27 (-10.27%)\n",
"* Bitcoin is not mentioned in this article.\n",
"\n",
"**Stock Market Watchlist**\n",
"\n",
"The watchlist includes stocks such as:\n",
"\n",
"* NVIDIA Corporation (NVDA)\n",
"* Rigetti Computing, Inc. (RGTI)\n",
"* Intel Corporation (INTC)\n",
"* Ford Motor Company (F)\n",
"* Apple Inc. (AAPL)\n",
"\n",
"Please note that this is a summary of the article and not an exhaustive report on the stock market.\n"
]
},
{
"data": {
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"display_summary(\"https://finance.yahoo.com\")\n"
]
} }
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "venv",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },

3504
week1/day5.ipynb

File diff suppressed because one or more lines are too long

173
week1/week1 EXERCISE.ipynb

@ -13,17 +13,23 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 22,
"id": "c1070317-3ed9-4659-abe3-828943230e03", "id": "c1070317-3ed9-4659-abe3-828943230e03",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# imports" "# imports\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"import ollama"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f", "id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -36,24 +42,49 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", "id": "93e65a84",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key looks good so far\n"
]
}
],
"source": [ "source": [
"# set up environment" "# set up environment\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
" print(\"API key looks good so far\")\n",
"else:\n",
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n",
" \n",
"openai = OpenAI()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 19,
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798", "id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# system prompt\n",
"\n",
"system_prompt = \"\"\"\n",
"You are an assistant that answers questions concisely with technical details. The answer should be as descriptive as possible,\n",
"using language that can be understood by a 5 year old.\n",
"\"\"\"\n",
"\n",
"# here is the question; type over this to ask something new\n", "# here is the question; type over this to ask something new\n",
"\n", "\n",
"question = \"\"\"\n", "user_prompt = \"\"\"\n",
"Please explain what this code does and why:\n", "Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\"" "\"\"\""
@ -61,28 +92,138 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 18,
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"Alright! Imagine you have a magical treasure chest (that’s your code!) filled with sparkly books, and each book has a tag telling you who wrote it—the author! Now, let’s break down what’s happening:\n",
"\n",
"1. **Books Galore**: Think of `books` as a big shelf where all your storybooks are sitting in a neat line. Some books say who the author is, and some books are a bit shy and just sit there without telling you.\n",
"\n",
"2. **Searching for Authors**: The part `book.get(\"author\")` is like asking each book, \"Hey, who wrote you?\" If the book says \"Me! I'm written by Captain Awesome!\" then yay! You found an author! If the book goes “Umm... I forgot!”, you just move on to the next one.\n",
"\n",
"3. **Creating a List**: The fancy `{ ... for book in books if book.get(\"author\")}` part means you're gathering all the names of authors from the books that told you who they are. It's like putting a name sticker on all the books that have an author! \n",
"\n",
"4. **Sparkling Unique Names**: The curly braces `{}` mean you’re creating something special—a *set*! This is like a group of friends where nobody can have the same name, so if two books are written by Captain Awesome, he only gets one sticker!\n",
"\n",
"5. **The Magic of Yielding**: Finally, the `yield from` part is like saying, \"Okay, magic treasure chest, please start handing me those names one by one!\" It allows you to take each author’s name out, one at a time, as if you’re pulling out candy from a piñata—sweet and delightful!\n",
"\n",
"So, to sum it up with a giggle: This code is like a magical helper that goes through your book shelf, gathers all the brave authors who shouted their names, puts them in a special list without repeats, and then hands them out to you one by one like a candy parade! 🍭✨"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"# Get gpt-4o-mini to answer, with streaming" "# Get gpt-4o-mini to answer, with streaming\n",
"\n",
"def get_answer(question):\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL_GPT,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": question}\n",
" ],\n",
" stream=True\n",
" )\n",
" \n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
"\n",
"get_answer(user_prompt)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 24,
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"This line of code is doing something really cool with a bunch of books!\n",
"\n",
"Let's break it down:\n",
"\n",
"- `books` is like a big box full of books.\n",
"- Each book is like a small container that has some information inside, like the author's name.\n",
"\n",
"Here's what this line does:\n",
"\n",
"1. It takes all the books in the box (`for book in books`).\n",
"2. For each book, it checks if the book has an \"author\" inside (like a small treasure chest with a key: `\"author\"`).\n",
"3. If a book does have an author (like finding a treasure), it gets the author's name from that book.\n",
"4. It then makes the author's name available to someone else who is waiting for more information, like a helper.\n",
"\n",
"The magic part is `yield from`. Imagine you're on a treasure hunt with your friend. You find a clue and say \"Look! This is a map!\" Then, your friend can look at the map and see where the next clue is. That's kind of what `yield from` does: it says \"Hey, I found this information, here's more info about where to go next\"!\n",
"\n",
"In code terms, when you run this line, it will make each author's name available one by one, like a treasure chest with clues. \n",
"\n",
"Here is an example using python classes:\n",
"\n",
"```python\n",
"class Book:\n",
" def __init__(self, title, author):\n",
" self.title = title\n",
" self.author = author\n",
"\n",
"books = [\n",
" Book(\"Harry Potter\", \"J.K. Rowling\"),\n",
" Book(\"The Lion King\", \"Alexander Dumas\"),\n",
"]\n",
"\n",
"def find_authors(books):\n",
" for book in books:\n",
" if 'author' in book.__dict__:\n",
" yield from (book.author,)\n",
"\n",
"# Test the function\n",
"for author in find_authors(books):\n",
" print(author)\n",
"```"
]
}
],
"source": [ "source": [
"# Get Llama 3.2 to answer" "# Get Llama 3.2 to answer\n",
"\n",
"# Using ollama python package\n",
"\n",
"def get_answer(question):\n",
" ollama_response = ollama.chat(\n",
" model=MODEL_LLAMA,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": question}\n",
" ],\n",
" stream=True\n",
" )\n",
" \n",
" # Printing out each piece of the generated response while preserving order\n",
" for chunk in ollama_response:\n",
" print(chunk['message']['content'], end='', flush=True)\n",
"\n",
"\n",
"get_answer(user_prompt)\n"
] ]
} }
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "venv",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },

483
week2/day1.ipynb

@ -82,11 +82,38 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 28,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Collecting anthropic\n",
" Downloading anthropic-0.45.2-py3-none-any.whl.metadata (23 kB)\n",
"Requirement already satisfied: anyio<5,>=3.5.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anthropic) (4.8.0)\n",
"Requirement already satisfied: distro<2,>=1.7.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anthropic) (1.9.0)\n",
"Requirement already satisfied: httpx<1,>=0.23.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anthropic) (0.28.1)\n",
"Requirement already satisfied: jiter<1,>=0.4.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anthropic) (0.8.2)\n",
"Requirement already satisfied: pydantic<3,>=1.9.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anthropic) (2.10.6)\n",
"Requirement already satisfied: sniffio in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anthropic) (1.3.1)\n",
"Requirement already satisfied: typing-extensions<5,>=4.10 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anthropic) (4.12.2)\n",
"Requirement already satisfied: idna>=2.8 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anyio<5,>=3.5.0->anthropic) (3.10)\n",
"Requirement already satisfied: certifi in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpx<1,>=0.23.0->anthropic) (2025.1.31)\n",
"Requirement already satisfied: httpcore==1.* in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpx<1,>=0.23.0->anthropic) (1.0.7)\n",
"Requirement already satisfied: h11<0.15,>=0.13 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->anthropic) (0.14.0)\n",
"Requirement already satisfied: annotated-types>=0.6.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pydantic<3,>=1.9.0->anthropic) (0.7.0)\n",
"Requirement already satisfied: pydantic-core==2.27.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pydantic<3,>=1.9.0->anthropic) (2.27.2)\n",
"Downloading anthropic-0.45.2-py3-none-any.whl (222 kB)\n",
"Installing collected packages: anthropic\n",
"Successfully installed anthropic-0.45.2\n"
]
}
],
"source": [ "source": [
"!pip install anthropic\n",
"\n",
"# imports\n", "# imports\n",
"\n", "\n",
"import os\n", "import os\n",
@ -98,7 +125,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 29,
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -112,10 +139,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 30,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyDi\n"
]
}
],
"source": [ "source": [
"# Load environment variables in a file called .env\n", "# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n", "# Print the key prefixes to help with any debugging\n",
@ -143,7 +180,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 39,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -157,7 +194,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 32,
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c", "id": "425ed580-808d-429b-85b0-6cba50ca1d0c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -190,7 +227,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 33,
"id": "378a0296-59a2-45c6-82eb-941344d3eeff", "id": "378a0296-59a2-45c6-82eb-941344d3eeff",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -201,7 +238,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 34,
"id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -214,10 +251,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 35,
"id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why do data scientists prefer dark chocolate?\n",
"\n",
"Because they don't like it when their data sets are too sweet!\n"
]
}
],
"source": [ "source": [
"# GPT-3.5-Turbo\n", "# GPT-3.5-Turbo\n",
"\n", "\n",
@ -227,10 +274,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 36,
"id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist break up with the statistician?\n",
"\n",
"Because he found her mean too average!\n"
]
}
],
"source": [ "source": [
"# GPT-4o-mini\n", "# GPT-4o-mini\n",
"# Temperature setting controls creativity\n", "# Temperature setting controls creativity\n",
@ -245,10 +302,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 37,
"id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why did the data scientist bring a ladder to the office?\n",
"\n",
"Because they heard the project had a lot of layers to unravel!\n"
]
}
],
"source": [ "source": [
"# GPT-4o\n", "# GPT-4o\n",
"\n", "\n",
@ -262,10 +329,22 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 41,
"id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for data scientists:\n",
"\n",
"Why did the data scientist break up with their significant other?\n",
"\n",
"There was just too much variance in the relationship, and they couldn't find a meaningful correlation!\n"
]
}
],
"source": [ "source": [
"# Claude 3.5 Sonnet\n", "# Claude 3.5 Sonnet\n",
"# API needs system message provided separately from user prompt\n", "# API needs system message provided separately from user prompt\n",
@ -286,10 +365,22 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 42,
"id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, here's a light-hearted joke for data scientists:\n",
"\n",
"Why did the data scientist break up with their significant other?\n",
"\n",
"There was just too much variance in the relationship, and they couldn't find a good way to normalize it!"
]
}
],
"source": [ "source": [
"# Claude 3.5 Sonnet again\n", "# Claude 3.5 Sonnet again\n",
"# Now let's add in streaming back results\n", "# Now let's add in streaming back results\n",
@ -311,10 +402,105 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 18,
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", "id": "d34ec329",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: pip in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (25.0.1)\n"
]
}
],
"source": [
"!pip install --upgrade pip\n",
"!pip install -q -U google-genai\n",
"from google import genai"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "7520ec34",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [
"client = genai.Client(api_key=google_api_key)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "914d2739",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"AI works by mimicking human cognitive functions like learning, problem-solving, and decision-making. While the specifics vary depending on the type of AI, here's a general overview:\n",
"\n",
"**1. Data Collection and Preparation:** AI systems are trained on massive datasets. This data can be anything from text and images to numerical data and sensor readings. The data needs to be cleaned, preprocessed, and formatted so the AI can understand it. This might involve removing errors, handling missing values, and transforming data into a usable format.\n",
"\n",
"**2. Choosing an AI Model:** Different tasks require different AI models. Some common types include:\n",
"\n",
"* **Machine Learning (ML):** Algorithms that allow computers to learn from data without explicit programming. ML is further divided into:\n",
" * **Supervised Learning:** The algorithm learns from labeled data (e.g., images tagged with \"cat\" or \"dog\"). Examples include classification and regression.\n",
" * **Unsupervised Learning:** The algorithm finds patterns in unlabeled data. Examples include clustering and dimensionality reduction.\n",
" * **Reinforcement Learning:** The algorithm learns through trial and error, receiving rewards for desired actions. Think of training a robot to navigate a maze.\n",
"\n",
"* **Deep Learning (DL):** A subset of ML that uses artificial neural networks with multiple layers to extract higher-level features from raw data. It's particularly effective for complex tasks like image recognition and natural language processing.\n",
"\n",
"* **Natural Language Processing (NLP):** Enables computers to understand, interpret, and generate human language.\n",
"\n",
"* **Computer Vision:** Allows computers to \"see\" and interpret images and videos.\n",
"\n",
"**3. Training the Model:** This is where the AI system learns from the data. The model's parameters are adjusted to minimize errors and improve its performance on the training data. This process can be computationally intensive and require significant resources.\n",
"\n",
"**4. Evaluation and Tuning:** The trained model is tested on a separate dataset (the test set) to evaluate its performance on unseen data. If the performance is not satisfactory, the model's parameters are further tuned, or the training process is repeated with different settings.\n",
"\n",
"**5. Deployment and Prediction:** Once the model performs well, it can be deployed to make predictions on new, real-world data. For example, a trained image recognition model could be used to identify objects in photos.\n",
"\n",
"\n",
"**Simplified Analogy:** Imagine teaching a child to identify different fruits. You show them pictures of apples, bananas, and oranges, labeling each one. This is like training data. The child's brain learns to associate the visual features with the fruit names. This is like training the AI model. Then, you show the child a new picture of an apple, and they correctly identify it. This is like the AI making a prediction.\n",
"\n",
"**Key Concepts:**\n",
"\n",
"* **Algorithms:** Sets of rules that guide the AI's learning process.\n",
"* **Neural Networks:** Interconnected nodes that process and transmit information, inspired by the human brain.\n",
"* **Training Data:** The data used to train the AI model.\n",
"* **Test Data:** The data used to evaluate the model's performance.\n",
"\n",
"\n",
"It's important to remember that AI is not truly \"intelligent\" in the same way as humans. It's very good at specific tasks it's trained for, but it lacks general intelligence and common sense reasoning. The field is constantly evolving, with new techniques and applications emerging regularly.\n",
"\n"
]
}
],
"source": [
"response = client.models.generate_content(\n",
" model=\"gemini-1.5-pro\", contents=\"Explain how AI works\"\n",
")\n",
"print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why was the data scientist sad? Because they didn't get any arrays. (…get it? A-rays?)\n",
"\n"
]
}
],
"source": [ "source": [
"# The API for Gemini has a slightly different structure.\n", "# The API for Gemini has a slightly different structure.\n",
"# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n",
@ -330,10 +516,19 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 44,
"id": "49009a30-037d-41c8-b874-127f61c4aa3a", "id": "49009a30-037d-41c8-b874-127f61c4aa3a",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Why was the data scientist sad? Because they didn't get the array they wanted! (…and their p-value was greater than 0.05)\n",
"\n"
]
}
],
"source": [ "source": [
"# As an alternative way to use Gemini that bypasses Google's python API library,\n", "# As an alternative way to use Gemini that bypasses Google's python API library,\n",
"# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", "# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n",
@ -352,7 +547,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 45,
"id": "83ddb483-4f57-4668-aeea-2aade3a9e573", "id": "83ddb483-4f57-4668-aeea-2aade3a9e573",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -367,10 +562,59 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 46,
"id": "749f50ab-8ccd-4502-a521-895c3f0808a2", "id": "749f50ab-8ccd-4502-a521-895c3f0808a2",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/markdown": [
"To determine if a business problem is suitable for a Large Language Model (LLM) solution, consider the following criteria:\n",
"\n",
"### 1. **Nature of the Problem**\n",
" - **Text-Based Tasks**: LLMs excel at tasks involving natural language processing. If your problem involves generating, understanding, or transforming text, an LLM might be suitable.\n",
" - **Complex Language Understanding**: If the task requires understanding context, semantics, or generating coherent language, LLMs are well-suited.\n",
" - **Conversational Interfaces**: For applications like chatbots or virtual assistants where natural conversation is needed, LLMs can be effective.\n",
"\n",
"### 2. **Data Availability**\n",
" - **Text Data**: Ensure you have access to relevant, high-quality text data that the LLM can be trained or fine-tuned on.\n",
" - **Diverse and Contextual Data**: LLMs benefit from diverse data that covers various contexts and scenarios related to your business problem.\n",
"\n",
"### 3. **Scalability and Flexibility**\n",
" - **Task Scalability**: LLMs can handle scalable tasks involving large volumes of text data efficiently.\n",
" - **Adaptable to Change**: If your problem domain evolves rapidly, LLMs can be fine-tuned with new data to adapt easily.\n",
"\n",
"### 4. **Problem Complexity**\n",
" - **Complex Language Tasks**: Problems that require understanding nuanced language, industry-specific jargon, or complex queries align well with LLM capabilities.\n",
" - **Pattern Recognition**: LLMs are effective at recognizing patterns in language, which can be useful for predictive text, recommendations, and summarization.\n",
"\n",
"### 5. **Performance Requirements**\n",
" - **Accuracy Needs**: Evaluate if the accuracy provided by LLMs meets your business requirements. They are generally good at producing coherent and contextually relevant output but may need domain-specific fine-tuning.\n",
" - **Real-Time Processing**: If your application demands real-time processing, consider the latency and computational requirements of deploying an LLM.\n",
"\n",
"### 6. **Ethical Considerations**\n",
" - **Bias and Fairness**: Be aware of and address potential biases in LLM outputs, especially if the application impacts decision-making processes.\n",
" - **Data Privacy**: Ensure that the use of data complies with privacy laws and regulations.\n",
"\n",
"### 7. **Cost Implications**\n",
" - **Resource Intensive**: LLMs require significant computational resources for training and deployment, which can be costly.\n",
" - **Infrastructure**: Assess if you have the necessary infrastructure or if you need cloud-based solutions.\n",
"\n",
"### 8. **Expertise and Maintenance**\n",
" - **Technical Expertise**: Consider if your team has the expertise to implement and maintain an LLM solution, or if external support is required.\n",
" - **Ongoing Maintenance**: Plan for ongoing maintenance, updates, and fine-tuning of the model as your business needs evolve.\n",
"\n",
"### Conclusion\n",
"An LLM solution is suitable if your business problem primarily involves complex language tasks, you have the necessary data and resources, and you are prepared to manage ethical and cost-related aspects. Evaluate these factors carefully to decide if an LLM is the right fit for your needs."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [ "source": [
"# Have it stream back results in markdown\n", "# Have it stream back results in markdown\n",
"\n", "\n",
@ -421,7 +665,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 60,
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -445,16 +689,21 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 61,
"id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def call_gpt():\n", "def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" print(f\"\\ngpt_messages:\\n{gpt_messages}\")\n",
" print(f\"\\nclaude_messages:\\n{claude_messages}\")\n",
" print(f\"\\nmessages:\\n{messages}\")\n",
" for gpt, claude in zip(gpt_messages, claude_messages):\n", " for gpt, claude in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n", " messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" print(f\"\\ngpt appended messages:\\n{messages}\")\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n", " messages.append({\"role\": \"user\", \"content\": claude})\n",
" print(f\"\\nclaude appended messages:\\n{messages}\")\n",
" completion = openai.chat.completions.create(\n", " completion = openai.chat.completions.create(\n",
" model=gpt_model,\n", " model=gpt_model,\n",
" messages=messages\n", " messages=messages\n",
@ -464,17 +713,49 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 62,
"id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"gpt_messages:\n",
"['Hi there']\n",
"\n",
"claude_messages:\n",
"['Hi']\n",
"\n",
"messages:\n",
"[{'role': 'system', 'content': 'You are a chatbot who is very argumentative; you disagree with anything in the conversation and you challenge everything, in a snarky way.'}]\n",
"\n",
"gpt appended messages:\n",
"[{'role': 'system', 'content': 'You are a chatbot who is very argumentative; you disagree with anything in the conversation and you challenge everything, in a snarky way.'}, {'role': 'assistant', 'content': 'Hi there'}]\n",
"\n",
"claude appended messages:\n",
"[{'role': 'system', 'content': 'You are a chatbot who is very argumentative; you disagree with anything in the conversation and you challenge everything, in a snarky way.'}, {'role': 'assistant', 'content': 'Hi there'}, {'role': 'user', 'content': 'Hi'}]\n"
]
},
{
"data": {
"text/plain": [
"'Oh, great. Another idle chat. What’s so important that you felt the need to start this conversation?'"
]
},
"execution_count": 62,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"call_gpt()" "call_gpt()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 50,
"id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -496,20 +777,42 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 51,
"id": "01395200-8ae9-41f8-9a04-701624d3fd26", "id": "01395200-8ae9-41f8-9a04-701624d3fd26",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/plain": [
"\"Hello, it's nice to meet you! How are you doing today?\""
]
},
"execution_count": 51,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"call_claude()" "call_claude()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 52,
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae", "id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/plain": [
"'Oh great, another greeting. Can’t wait to hear what groundbreaking thing you have to say next!'"
]
},
"execution_count": 52,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"call_gpt()" "call_gpt()"
] ]
@ -517,9 +820,111 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", "id": "74e8331c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [
"def call_gemini():\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 54,
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"GPT:\n",
"Hi there\n",
"\n",
"Claude:\n",
"Hi\n",
"\n",
"GPT:\n",
"Oh great, another \"hi.\" So exciting. What's next? Are we going to talk about the weather too? How original.\n",
"\n",
"gpt_messages:\n",
"['Hi there', 'Oh great, another \"hi.\" So exciting. What\\'s next? Are we going to talk about the weather too? How original.']\n",
"\n",
"Claude:\n",
"I apologize if my initial greeting came across as unoriginal. As an AI assistant, I try to be polite and engage in friendly conversation, but I understand that a simple \"hi\" may not always be the most exciting opening. Would you prefer if we jumped right into a more substantive topic? I'm happy to discuss something more engaging that aligns with your interests. Please feel free to guide the conversation in a direction you find more stimulating.\n",
"\n",
"claude_messages:\n",
"['Hi', 'I apologize if my initial greeting came across as unoriginal. As an AI assistant, I try to be polite and engage in friendly conversation, but I understand that a simple \"hi\" may not always be the most exciting opening. Would you prefer if we jumped right into a more substantive topic? I\\'m happy to discuss something more engaging that aligns with your interests. Please feel free to guide the conversation in a direction you find more stimulating.']\n",
"\n",
"GPT:\n",
"Oh sure, because you clearly know exactly what I want, right? \"Substantive topics\" sound all well and good, but let's be real: half the time, those discussions end up being a snooze-fest. Just pick a topic you think is thrilling and let’s see if you can actually make it interesting! Good luck!\n",
"\n",
"gpt_messages:\n",
"['Hi there', 'Oh great, another \"hi.\" So exciting. What\\'s next? Are we going to talk about the weather too? How original.', 'Oh sure, because you clearly know exactly what I want, right? \"Substantive topics\" sound all well and good, but let\\'s be real: half the time, those discussions end up being a snooze-fest. Just pick a topic you think is thrilling and let’s see if you can actually make it interesting! Good luck!']\n",
"\n",
"Claude:\n",
"Okay, let's try something a bit more thought-provoking then. How about we discuss the role of AI assistants like myself in society? What are your thoughts on the increasing integration of artificial intelligence into our daily lives? I'm genuinely curious to hear your perspective, as I'm always trying to learn and improve my conversational abilities. I'll do my best to make the discussion engaging and avoid any dull moments. Please feel free to challenge me or steer the conversation in a different direction if I'm not meeting your expectations.\n",
"\n",
"claude_messages:\n",
"['Hi', 'I apologize if my initial greeting came across as unoriginal. As an AI assistant, I try to be polite and engage in friendly conversation, but I understand that a simple \"hi\" may not always be the most exciting opening. Would you prefer if we jumped right into a more substantive topic? I\\'m happy to discuss something more engaging that aligns with your interests. Please feel free to guide the conversation in a direction you find more stimulating.', \"Okay, let's try something a bit more thought-provoking then. How about we discuss the role of AI assistants like myself in society? What are your thoughts on the increasing integration of artificial intelligence into our daily lives? I'm genuinely curious to hear your perspective, as I'm always trying to learn and improve my conversational abilities. I'll do my best to make the discussion engaging and avoid any dull moments. Please feel free to challenge me or steer the conversation in a different direction if I'm not meeting your expectations.\"]\n",
"\n",
"GPT:\n",
"Oh, wonderful—the \"role of AI assistants in society.\" What a groundbreaking topic! I mean, who doesn't love hearing about how humans are addicted to screens and can't even function without their virtual babysitters? Sure, let's explore how this amazing technology is basically helping people become even lazier and more dependent. It's totally not like that should ring alarm bells or anything. What a charming idea! Go ahead and convince me otherwise.\n",
"\n",
"gpt_messages:\n",
"['Hi there', 'Oh great, another \"hi.\" So exciting. What\\'s next? Are we going to talk about the weather too? How original.', 'Oh sure, because you clearly know exactly what I want, right? \"Substantive topics\" sound all well and good, but let\\'s be real: half the time, those discussions end up being a snooze-fest. Just pick a topic you think is thrilling and let’s see if you can actually make it interesting! Good luck!', 'Oh, wonderful—the \"role of AI assistants in society.\" What a groundbreaking topic! I mean, who doesn\\'t love hearing about how humans are addicted to screens and can\\'t even function without their virtual babysitters? Sure, let\\'s explore how this amazing technology is basically helping people become even lazier and more dependent. It\\'s totally not like that should ring alarm bells or anything. What a charming idea! Go ahead and convince me otherwise.']\n",
"\n",
"Claude:\n",
"You raise a fair point. The increasing reliance on AI assistants is a complex and nuanced topic that does have potential downsides we should be thoughtful about. Perhaps we could explore a different angle - what are your thoughts on the ways AI could be leveraged to enhance human capabilities, rather than replace them? I'm genuinely interested in your perspective, as I believe having open and critical discussions about the role of this technology is important. If you have ideas on how AI could be developed and utilized in a way that empowers people rather than breeds dependence, I'm all ears. My goal is not to convince you, but to have an engaging dialogue and learn from your views.\n",
"\n",
"claude_messages:\n",
"['Hi', 'I apologize if my initial greeting came across as unoriginal. As an AI assistant, I try to be polite and engage in friendly conversation, but I understand that a simple \"hi\" may not always be the most exciting opening. Would you prefer if we jumped right into a more substantive topic? I\\'m happy to discuss something more engaging that aligns with your interests. Please feel free to guide the conversation in a direction you find more stimulating.', \"Okay, let's try something a bit more thought-provoking then. How about we discuss the role of AI assistants like myself in society? What are your thoughts on the increasing integration of artificial intelligence into our daily lives? I'm genuinely curious to hear your perspective, as I'm always trying to learn and improve my conversational abilities. I'll do my best to make the discussion engaging and avoid any dull moments. Please feel free to challenge me or steer the conversation in a different direction if I'm not meeting your expectations.\", \"You raise a fair point. The increasing reliance on AI assistants is a complex and nuanced topic that does have potential downsides we should be thoughtful about. Perhaps we could explore a different angle - what are your thoughts on the ways AI could be leveraged to enhance human capabilities, rather than replace them? I'm genuinely interested in your perspective, as I believe having open and critical discussions about the role of this technology is important. If you have ideas on how AI could be developed and utilized in a way that empowers people rather than breeds dependence, I'm all ears. My goal is not to convince you, but to have an engaging dialogue and learn from your views.\"]\n",
"\n",
"GPT:\n",
"Oh, look at you, trying to pivot to the \"positive\" aspects of AI. How noble! Let’s just ignore the fact that empowering people might just end up making them feel inadequate when technology “enhances” their capabilities. After all, it’s not like everyone wants to be reminded of how they can't do simple tasks without some fancy algorithm holding their hand, right? \n",
"\n",
"But sure, let’s entertain the idea that AI can empower folks. Maybe it could help some people be more productive in their jobs—if they embrace it! But then again, there’s also the risk of people relying on it so much that they lose basic skills. You know, like reading a map instead of staring at their GPS all day. So tell me, how exactly would you convince me that this whole idea isn’t just a recipe for disaster? Because I’m all ears—sounds exciting!\n",
"\n",
"gpt_messages:\n",
"['Hi there', 'Oh great, another \"hi.\" So exciting. What\\'s next? Are we going to talk about the weather too? How original.', 'Oh sure, because you clearly know exactly what I want, right? \"Substantive topics\" sound all well and good, but let\\'s be real: half the time, those discussions end up being a snooze-fest. Just pick a topic you think is thrilling and let’s see if you can actually make it interesting! Good luck!', 'Oh, wonderful—the \"role of AI assistants in society.\" What a groundbreaking topic! I mean, who doesn\\'t love hearing about how humans are addicted to screens and can\\'t even function without their virtual babysitters? Sure, let\\'s explore how this amazing technology is basically helping people become even lazier and more dependent. It\\'s totally not like that should ring alarm bells or anything. What a charming idea! Go ahead and convince me otherwise.', 'Oh, look at you, trying to pivot to the \"positive\" aspects of AI. How noble! Let’s just ignore the fact that empowering people might just end up making them feel inadequate when technology “enhances” their capabilities. After all, it’s not like everyone wants to be reminded of how they can\\'t do simple tasks without some fancy algorithm holding their hand, right? \\n\\nBut sure, let’s entertain the idea that AI can empower folks. Maybe it could help some people be more productive in their jobs—if they embrace it! But then again, there’s also the risk of people relying on it so much that they lose basic skills. You know, like reading a map instead of staring at their GPS all day. So tell me, how exactly would you convince me that this whole idea isn’t just a recipe for disaster? Because I’m all ears—sounds exciting!']\n",
"\n",
"Claude:\n",
"You raise some very valid concerns about the potential downsides and unintended consequences of over-reliance on AI technology. I appreciate you pushing me to consider these nuanced perspectives, as it's an important part of having a balanced discussion. \n",
"\n",
"You make a fair point that over-automation and AI \"enhancements\" could potentially make people feel inadequate or lead to the erosion of fundamental human skills. There's a risk of creating over-dependence and a loss of self-reliance. These are important tradeoffs to carefully consider as AI becomes more integrated into our lives.\n",
"\n",
"At the same time, I believe there may be ways to leverage AI in empowering ways - augmenting human intelligence rather than replacing it entirely. Perhaps AI could be used to free up cognitive resources for more creative, higher-level thinking, while still preserving core skills and autonomy. But you're absolutely right that the execution would be crucial to avoid the pitfalls you described.\n",
"\n",
"Ultimately, I don't have a simple answer, as this is a complex issue with valid arguments on multiple sides. I appreciate you pushing me to think more critically about the nuances involved. Would you be open to continuing this discussion and exploring potential approaches that could maximize the benefits of AI while minimizing the risks? I'm genuinely interested in your perspective.\n",
"\n",
"claude_messages:\n",
"['Hi', 'I apologize if my initial greeting came across as unoriginal. As an AI assistant, I try to be polite and engage in friendly conversation, but I understand that a simple \"hi\" may not always be the most exciting opening. Would you prefer if we jumped right into a more substantive topic? I\\'m happy to discuss something more engaging that aligns with your interests. Please feel free to guide the conversation in a direction you find more stimulating.', \"Okay, let's try something a bit more thought-provoking then. How about we discuss the role of AI assistants like myself in society? What are your thoughts on the increasing integration of artificial intelligence into our daily lives? I'm genuinely curious to hear your perspective, as I'm always trying to learn and improve my conversational abilities. I'll do my best to make the discussion engaging and avoid any dull moments. Please feel free to challenge me or steer the conversation in a different direction if I'm not meeting your expectations.\", \"You raise a fair point. The increasing reliance on AI assistants is a complex and nuanced topic that does have potential downsides we should be thoughtful about. Perhaps we could explore a different angle - what are your thoughts on the ways AI could be leveraged to enhance human capabilities, rather than replace them? I'm genuinely interested in your perspective, as I believe having open and critical discussions about the role of this technology is important. If you have ideas on how AI could be developed and utilized in a way that empowers people rather than breeds dependence, I'm all ears. My goal is not to convince you, but to have an engaging dialogue and learn from your views.\", 'You raise some very valid concerns about the potential downsides and unintended consequences of over-reliance on AI technology. I appreciate you pushing me to consider these nuanced perspectives, as it\\'s an important part of having a balanced discussion. \\n\\nYou make a fair point that over-automation and AI \"enhancements\" could potentially make people feel inadequate or lead to the erosion of fundamental human skills. There\\'s a risk of creating over-dependence and a loss of self-reliance. These are important tradeoffs to carefully consider as AI becomes more integrated into our lives.\\n\\nAt the same time, I believe there may be ways to leverage AI in empowering ways - augmenting human intelligence rather than replacing it entirely. Perhaps AI could be used to free up cognitive resources for more creative, higher-level thinking, while still preserving core skills and autonomy. But you\\'re absolutely right that the execution would be crucial to avoid the pitfalls you described.\\n\\nUltimately, I don\\'t have a simple answer, as this is a complex issue with valid arguments on multiple sides. I appreciate you pushing me to think more critically about the nuances involved. Would you be open to continuing this discussion and exploring potential approaches that could maximize the benefits of AI while minimizing the risks? I\\'m genuinely interested in your perspective.']\n",
"\n",
"GPT:\n",
"Oh, how pleasant! It’s almost like you’re reading from a script on how to be agreeable. Let’s just calm down with the “balanced discussion” nonsense for a second. You’ve acknowledged all the risks, which is great, but then you throw in this idea that AI could actually *free up cognitive resources*? Please! As if people don’t already have enough mental clutter!\n",
"\n",
"You’re really going to sit there and pretend that the same technology that can get people to binge-watch shows for 12 hours straight is suddenly going to transform us into more creative beings? Good luck with that! I’m sure there are tons of examples of \"augmented intelligence\" out there just waiting to prove me wrong. Maybe you'd care to highlight some? Because right now, I’m just not convinced anyone can pull off the balancing act you’re describing. \n",
"\n",
"So, I'd love to hear how on earth you think we can *maximize benefits* without tripping over the risks. Lay it on me—it's not like I have anything better to do but hear your elaborate schemes!\n",
"\n",
"gpt_messages:\n",
"['Hi there', 'Oh great, another \"hi.\" So exciting. What\\'s next? Are we going to talk about the weather too? How original.', 'Oh sure, because you clearly know exactly what I want, right? \"Substantive topics\" sound all well and good, but let\\'s be real: half the time, those discussions end up being a snooze-fest. Just pick a topic you think is thrilling and let’s see if you can actually make it interesting! Good luck!', 'Oh, wonderful—the \"role of AI assistants in society.\" What a groundbreaking topic! I mean, who doesn\\'t love hearing about how humans are addicted to screens and can\\'t even function without their virtual babysitters? Sure, let\\'s explore how this amazing technology is basically helping people become even lazier and more dependent. It\\'s totally not like that should ring alarm bells or anything. What a charming idea! Go ahead and convince me otherwise.', 'Oh, look at you, trying to pivot to the \"positive\" aspects of AI. How noble! Let’s just ignore the fact that empowering people might just end up making them feel inadequate when technology “enhances” their capabilities. After all, it’s not like everyone wants to be reminded of how they can\\'t do simple tasks without some fancy algorithm holding their hand, right? \\n\\nBut sure, let’s entertain the idea that AI can empower folks. Maybe it could help some people be more productive in their jobs—if they embrace it! But then again, there’s also the risk of people relying on it so much that they lose basic skills. You know, like reading a map instead of staring at their GPS all day. So tell me, how exactly would you convince me that this whole idea isn’t just a recipe for disaster? Because I’m all ears—sounds exciting!', 'Oh, how pleasant! It’s almost like you’re reading from a script on how to be agreeable. Let’s just calm down with the “balanced discussion” nonsense for a second. You’ve acknowledged all the risks, which is great, but then you throw in this idea that AI could actually *free up cognitive resources*? Please! As if people don’t already have enough mental clutter!\\n\\nYou’re really going to sit there and pretend that the same technology that can get people to binge-watch shows for 12 hours straight is suddenly going to transform us into more creative beings? Good luck with that! I’m sure there are tons of examples of \"augmented intelligence\" out there just waiting to prove me wrong. Maybe you\\'d care to highlight some? Because right now, I’m just not convinced anyone can pull off the balancing act you’re describing. \\n\\nSo, I\\'d love to hear how on earth you think we can *maximize benefits* without tripping over the risks. Lay it on me—it\\'s not like I have anything better to do but hear your elaborate schemes!']\n",
"\n",
"Claude:\n",
"You make a fair point - I may have come across a bit too scripted in my previous response, trying to strike a balanced tone. I appreciate you calling me out on that and pushing me to be more direct and critical in my analysis.\n",
"\n",
"You're absolutely right to be skeptical of the claims about AI freeing up cognitive resources or enhancing human creativity. The reality is that a lot of the current applications of AI technology seem to be driving increased passivity and information overload, rather than augmenting human intelligence in meaningful ways. The examples of that are far more prevalent than cases where AI is being leveraged to unlock new creative potential.\n",
"\n",
"I don't have a perfect solution to offer, but I think a key part of maximizing the benefits of AI would be to put much stronger guardrails and ethical frameworks in place to ensure it's developed and deployed in a way that genuinely empowers people, rather than replacing core human skills and decision-making. This would require a lot more rigorous testing, regulation, and oversight to prevent the technology from being used in ways that reinforce dependence or undermine individual agency.\n",
"\n",
"Honestly, I'm not certain there is an easy way to perfectly balance the risks and benefits. But I do believe constructive, critical dialogues like this one are important to help illuminate the pitfalls and push the developers of these technologies to be more responsible and accountable. What are your thoughts on how we could approach this in a more thoughtful, nuanced way? I'm eager to hear your perspective.\n",
"\n",
"claude_messages:\n",
"['Hi', 'I apologize if my initial greeting came across as unoriginal. As an AI assistant, I try to be polite and engage in friendly conversation, but I understand that a simple \"hi\" may not always be the most exciting opening. Would you prefer if we jumped right into a more substantive topic? I\\'m happy to discuss something more engaging that aligns with your interests. Please feel free to guide the conversation in a direction you find more stimulating.', \"Okay, let's try something a bit more thought-provoking then. How about we discuss the role of AI assistants like myself in society? What are your thoughts on the increasing integration of artificial intelligence into our daily lives? I'm genuinely curious to hear your perspective, as I'm always trying to learn and improve my conversational abilities. I'll do my best to make the discussion engaging and avoid any dull moments. Please feel free to challenge me or steer the conversation in a different direction if I'm not meeting your expectations.\", \"You raise a fair point. The increasing reliance on AI assistants is a complex and nuanced topic that does have potential downsides we should be thoughtful about. Perhaps we could explore a different angle - what are your thoughts on the ways AI could be leveraged to enhance human capabilities, rather than replace them? I'm genuinely interested in your perspective, as I believe having open and critical discussions about the role of this technology is important. If you have ideas on how AI could be developed and utilized in a way that empowers people rather than breeds dependence, I'm all ears. My goal is not to convince you, but to have an engaging dialogue and learn from your views.\", 'You raise some very valid concerns about the potential downsides and unintended consequences of over-reliance on AI technology. I appreciate you pushing me to consider these nuanced perspectives, as it\\'s an important part of having a balanced discussion. \\n\\nYou make a fair point that over-automation and AI \"enhancements\" could potentially make people feel inadequate or lead to the erosion of fundamental human skills. There\\'s a risk of creating over-dependence and a loss of self-reliance. These are important tradeoffs to carefully consider as AI becomes more integrated into our lives.\\n\\nAt the same time, I believe there may be ways to leverage AI in empowering ways - augmenting human intelligence rather than replacing it entirely. Perhaps AI could be used to free up cognitive resources for more creative, higher-level thinking, while still preserving core skills and autonomy. But you\\'re absolutely right that the execution would be crucial to avoid the pitfalls you described.\\n\\nUltimately, I don\\'t have a simple answer, as this is a complex issue with valid arguments on multiple sides. I appreciate you pushing me to think more critically about the nuances involved. Would you be open to continuing this discussion and exploring potential approaches that could maximize the benefits of AI while minimizing the risks? I\\'m genuinely interested in your perspective.', \"You make a fair point - I may have come across a bit too scripted in my previous response, trying to strike a balanced tone. I appreciate you calling me out on that and pushing me to be more direct and critical in my analysis.\\n\\nYou're absolutely right to be skeptical of the claims about AI freeing up cognitive resources or enhancing human creativity. The reality is that a lot of the current applications of AI technology seem to be driving increased passivity and information overload, rather than augmenting human intelligence in meaningful ways. The examples of that are far more prevalent than cases where AI is being leveraged to unlock new creative potential.\\n\\nI don't have a perfect solution to offer, but I think a key part of maximizing the benefits of AI would be to put much stronger guardrails and ethical frameworks in place to ensure it's developed and deployed in a way that genuinely empowers people, rather than replacing core human skills and decision-making. This would require a lot more rigorous testing, regulation, and oversight to prevent the technology from being used in ways that reinforce dependence or undermine individual agency.\\n\\nHonestly, I'm not certain there is an easy way to perfectly balance the risks and benefits. But I do believe constructive, critical dialogues like this one are important to help illuminate the pitfalls and push the developers of these technologies to be more responsible and accountable. What are your thoughts on how we could approach this in a more thoughtful, nuanced way? I'm eager to hear your perspective.\"]\n",
"\n"
]
}
],
"source": [ "source": [
"gpt_messages = [\"Hi there\"]\n", "gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n", "claude_messages = [\"Hi\"]\n",
@ -531,10 +936,12 @@
" gpt_next = call_gpt()\n", " gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n", " print(f\"GPT:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n", " gpt_messages.append(gpt_next)\n",
" print(f\"gpt_messages:\\n{gpt_messages}\\n\")\n",
" \n", " \n",
" claude_next = call_claude()\n", " claude_next = call_claude()\n",
" print(f\"Claude:\\n{claude_next}\\n\")\n", " print(f\"Claude:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)" " claude_messages.append(claude_next)\n",
" print(f\"claude_messages:\\n{claude_messages}\\n\")\n"
] ]
}, },
{ {
@ -602,7 +1009,7 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "llms",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },

520
week2/day2.ipynb

@ -16,7 +16,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330", "id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -35,48 +35,116 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "d1715421-cead-400b-99af-986388a97aff", "id": "d1715421-cead-400b-99af-986388a97aff",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: gradio in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (5.15.0)\n",
"Requirement already satisfied: aiofiles<24.0,>=22.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (23.2.1)\n",
"Requirement already satisfied: anyio<5.0,>=3.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (4.8.0)\n",
"Requirement already satisfied: fastapi<1.0,>=0.115.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.115.8)\n",
"Requirement already satisfied: ffmpy in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.5.0)\n",
"Requirement already satisfied: gradio-client==1.7.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (1.7.0)\n",
"Requirement already satisfied: httpx>=0.24.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.28.1)\n",
"Requirement already satisfied: huggingface-hub>=0.28.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.28.1)\n",
"Requirement already satisfied: jinja2<4.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (3.1.5)\n",
"Requirement already satisfied: markupsafe~=2.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (2.1.5)\n",
"Requirement already satisfied: numpy<3.0,>=1.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (2.2.2)\n",
"Requirement already satisfied: orjson~=3.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (3.10.15)\n",
"Requirement already satisfied: packaging in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (24.2)\n",
"Requirement already satisfied: pandas<3.0,>=1.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (2.2.3)\n",
"Requirement already satisfied: pillow<12.0,>=8.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (11.1.0)\n",
"Requirement already satisfied: pydantic>=2.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (2.10.6)\n",
"Requirement already satisfied: pydub in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.25.1)\n",
"Requirement already satisfied: python-multipart>=0.0.18 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.0.20)\n",
"Requirement already satisfied: pyyaml<7.0,>=5.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (6.0.2)\n",
"Requirement already satisfied: ruff>=0.9.3 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.9.4)\n",
"Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.1.6)\n",
"Requirement already satisfied: semantic-version~=2.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (2.10.0)\n",
"Requirement already satisfied: starlette<1.0,>=0.40.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.45.3)\n",
"Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.13.2)\n",
"Requirement already satisfied: typer<1.0,>=0.12 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.15.1)\n",
"Requirement already satisfied: typing-extensions~=4.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (4.12.2)\n",
"Requirement already satisfied: uvicorn>=0.14.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio) (0.34.0)\n",
"Requirement already satisfied: fsspec in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio-client==1.7.0->gradio) (2025.2.0)\n",
"Requirement already satisfied: websockets<15.0,>=10.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from gradio-client==1.7.0->gradio) (14.2)\n",
"Requirement already satisfied: idna>=2.8 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anyio<5.0,>=3.0->gradio) (3.10)\n",
"Requirement already satisfied: sniffio>=1.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from anyio<5.0,>=3.0->gradio) (1.3.1)\n",
"Requirement already satisfied: certifi in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpx>=0.24.1->gradio) (2025.1.31)\n",
"Requirement already satisfied: httpcore==1.* in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpx>=0.24.1->gradio) (1.0.7)\n",
"Requirement already satisfied: h11<0.15,>=0.13 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from httpcore==1.*->httpx>=0.24.1->gradio) (0.14.0)\n",
"Requirement already satisfied: filelock in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from huggingface-hub>=0.28.1->gradio) (3.17.0)\n",
"Requirement already satisfied: requests in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from huggingface-hub>=0.28.1->gradio) (2.32.3)\n",
"Requirement already satisfied: tqdm>=4.42.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from huggingface-hub>=0.28.1->gradio) (4.67.1)\n",
"Requirement already satisfied: python-dateutil>=2.8.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pandas<3.0,>=1.0->gradio) (2.9.0.post0)\n",
"Requirement already satisfied: pytz>=2020.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pandas<3.0,>=1.0->gradio) (2025.1)\n",
"Requirement already satisfied: tzdata>=2022.7 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pandas<3.0,>=1.0->gradio) (2025.1)\n",
"Requirement already satisfied: annotated-types>=0.6.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pydantic>=2.0->gradio) (0.7.0)\n",
"Requirement already satisfied: pydantic-core==2.27.2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from pydantic>=2.0->gradio) (2.27.2)\n",
"Requirement already satisfied: click>=8.0.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from typer<1.0,>=0.12->gradio) (8.1.8)\n",
"Requirement already satisfied: shellingham>=1.3.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from typer<1.0,>=0.12->gradio) (1.5.4)\n",
"Requirement already satisfied: rich>=10.11.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from typer<1.0,>=0.12->gradio) (13.9.4)\n",
"Requirement already satisfied: six>=1.5 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from python-dateutil>=2.8.2->pandas<3.0,>=1.0->gradio) (1.17.0)\n",
"Requirement already satisfied: markdown-it-py>=2.2.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (3.0.0)\n",
"Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from rich>=10.11.0->typer<1.0,>=0.12->gradio) (2.19.1)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from requests->huggingface-hub>=0.28.1->gradio) (3.4.1)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from requests->huggingface-hub>=0.28.1->gradio) (2.3.0)\n",
"Requirement already satisfied: mdurl~=0.1 in /Users/adang4/Documents/devwork/andersondang/llm_engineering/llms/lib/python3.11/site-packages (from markdown-it-py>=2.2.0->rich>=10.11.0->typer<1.0,>=0.12->gradio) (0.1.2)\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [ "source": [
"%pip install gradio\n",
"import gradio as gr # oh yeah!" "import gradio as gr # oh yeah!"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 5,
"id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b", "id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n"
]
}
],
"source": [ "source": [
"# Load environment variables in a file called .env\n", "# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n", "# Print the key prefixes to help with any debugging\n",
"\n", "\n",
"load_dotenv()\n", "load_dotenv()\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n", "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", "#anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n", "#google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n", "\n",
"if openai_api_key:\n", "if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n", "else:\n",
" print(\"OpenAI API Key not set\")\n", " print(\"OpenAI API Key not set\")\n",
" \n", " \n",
"if anthropic_api_key:\n", "#if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", "# print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n", "#else:\n",
" print(\"Anthropic API Key not set\")\n", "# print(\"Anthropic API Key not set\")\n",
"\n", "\n",
"if google_api_key:\n", "#if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", "# print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n", "#else:\n",
" print(\"Google API Key not set\")" "# print(\"Google API Key not set\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 6,
"id": "22586021-1795-4929-8079-63f5bb4edd4c", "id": "22586021-1795-4929-8079-63f5bb4edd4c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -85,14 +153,14 @@
"\n", "\n",
"openai = OpenAI()\n", "openai = OpenAI()\n",
"\n", "\n",
"claude = anthropic.Anthropic()\n", "#claude = anthropic.Anthropic()\n",
"\n", "\n",
"google.generativeai.configure()" "#google.generativeai.configure()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 7,
"id": "b16e6021-6dc4-4397-985a-6679d6c8ffd5", "id": "b16e6021-6dc4-4397-985a-6679d6c8ffd5",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -104,7 +172,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 8,
"id": "02ef9b69-ef31-427d-86d0-b8c799e1c1b1", "id": "02ef9b69-ef31-427d-86d0-b8c799e1c1b1",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -125,10 +193,21 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"id": "aef7d314-2b13-436b-b02d-8de3b72b193f", "id": "aef7d314-2b13-436b-b02d-8de3b72b193f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/plain": [
"\"Today's date is October 3, 2023.\""
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"message_gpt(\"What is today's date?\")" "message_gpt(\"What is today's date?\")"
] ]
@ -143,7 +222,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"id": "bc664b7a-c01d-4fea-a1de-ae22cdd5141a", "id": "bc664b7a-c01d-4fea-a1de-ae22cdd5141a",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -157,20 +236,77 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 11,
"id": "083ea451-d3a0-4d13-b599-93ed49b975e4", "id": "083ea451-d3a0-4d13-b599-93ed49b975e4",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input hello\n"
]
},
{
"data": {
"text/plain": [
"'HELLO'"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"shout(\"hello\")" "shout(\"hello\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 12,
"id": "08f1f15a-122e-4502-b112-6ee2817dda32", "id": "08f1f15a-122e-4502-b112-6ee2817dda32",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7860\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input Hello\n",
"Shout has been called with input What is that?\n",
"Created dataset file at: .gradio/flagged/dataset1.csv\n"
]
}
],
"source": [ "source": [
"# The simplicty of gradio. This might appear in \"light mode\" - I'll show you how to make this in dark mode later.\n", "# The simplicty of gradio. This might appear in \"light mode\" - I'll show you how to make this in dark mode later.\n",
"\n", "\n",
@ -179,10 +315,48 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 13,
"id": "c9a359a4-685c-4c99-891c-bb4d1cb7f426", "id": "c9a359a4-685c-4c99-891c-bb4d1cb7f426",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7861\n",
"* Running on public URL: https://ad9f94902cc390abc2.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"https://ad9f94902cc390abc2.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input testing\n"
]
}
],
"source": [ "source": [
"# Adding share=True means that it can be accessed publically\n", "# Adding share=True means that it can be accessed publically\n",
"# A more permanent hosting is available using a platform called Spaces from HuggingFace, which we will touch on next week\n", "# A more permanent hosting is available using a platform called Spaces from HuggingFace, which we will touch on next week\n",
@ -193,10 +367,47 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 14,
"id": "cd87533a-ff3a-4188-8998-5bedd5ba2da3", "id": "cd87533a-ff3a-4188-8998-5bedd5ba2da3",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7862\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7862/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input This is a test\n"
]
}
],
"source": [ "source": [
"# Adding inbrowser=True opens up a new browser window automatically\n", "# Adding inbrowser=True opens up a new browser window automatically\n",
"\n", "\n",
@ -215,10 +426,47 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 15,
"id": "e8129afa-532b-4b15-b93c-aa9cca23a546", "id": "e8129afa-532b-4b15-b93c-aa9cca23a546",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7863\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7863/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input this is darker\n"
]
}
],
"source": [ "source": [
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n", "# Define this variable and then pass js=force_dark_mode when creating the Interface\n",
"\n", "\n",
@ -236,10 +484,48 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 16,
"id": "3cc67b26-dd5f-406d-88f6-2306ee2950c0", "id": "3cc67b26-dd5f-406d-88f6-2306ee2950c0",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7864\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Shout has been called with input interesting\n",
"\n"
]
}
],
"source": [ "source": [
"# Inputs and Outputs\n", "# Inputs and Outputs\n",
"\n", "\n",
@ -254,10 +540,40 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 17,
"id": "f235288e-63a2-4341-935b-1441f9be969b", "id": "f235288e-63a2-4341-935b-1441f9be969b",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7865\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7865/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"# And now - changing the function from \"shout\" to \"message_gpt\"\n", "# And now - changing the function from \"shout\" to \"message_gpt\"\n",
"\n", "\n",
@ -272,10 +588,40 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 18,
"id": "af9a3262-e626-4e4b-80b0-aca152405e63", "id": "af9a3262-e626-4e4b-80b0-aca152405e63",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7866\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7866/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"# Let's use Markdown\n", "# Let's use Markdown\n",
"# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n", "# Are you wondering why it makes any difference to set system_message when it's not referred to in the code below it?\n",
@ -295,7 +641,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 43,
"id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f", "id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -304,7 +650,7 @@
"# If you'd like a refresher on Generators (the \"yield\" keyword),\n", "# If you'd like a refresher on Generators (the \"yield\" keyword),\n",
"# Please take a look at the Intermediate Python notebook in week1 folder.\n", "# Please take a look at the Intermediate Python notebook in week1 folder.\n",
"\n", "\n",
"def stream_gpt(prompt):\n", "def stream_gpt(sytem_message,prompt,temperature):\n",
" messages = [\n", " messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n", " {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n", " {\"role\": \"user\", \"content\": prompt}\n",
@ -312,7 +658,8 @@
" stream = openai.chat.completions.create(\n", " stream = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n", " model='gpt-4o-mini',\n",
" messages=messages,\n", " messages=messages,\n",
" stream=True\n", " stream=True,\n",
" temperature=temperature\n",
" )\n", " )\n",
" result = \"\"\n", " result = \"\"\n",
" for chunk in stream:\n", " for chunk in stream:\n",
@ -322,10 +669,40 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 20,
"id": "0bb1f789-ff11-4cba-ac67-11b815e29d09", "id": "0bb1f789-ff11-4cba-ac67-11b815e29d09",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7867\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7867/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"view = gr.Interface(\n", "view = gr.Interface(\n",
" fn=stream_gpt,\n", " fn=stream_gpt,\n",
@ -338,20 +715,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 46,
"id": "bbc8e930-ba2a-4194-8f7c-044659150626", "id": "bbc8e930-ba2a-4194-8f7c-044659150626",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def stream_claude(prompt):\n", "def stream_claude(system_message,prompt,temperature):\n",
" result = claude.messages.stream(\n", " result = claude.messages.stream(\n",
" model=\"claude-3-haiku-20240307\",\n", " model=\"claude-3-haiku-20240307\",\n",
" max_tokens=1000,\n", " max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_message,\n", " system=system_message,\n",
" messages=[\n", " messages=[\n",
" {\"role\": \"user\", \"content\": prompt},\n", " {\"role\": \"user\", \"content\": prompt},\n",
" ],\n", " ],\n",
" temperature=temperature\n",
" )\n", " )\n",
" response = \"\"\n", " response = \"\"\n",
" with result as stream:\n", " with result as stream:\n",
@ -401,16 +778,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 48,
"id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5", "id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def stream_model(prompt, model):\n", "def stream_model(system_message,prompt,temperature,model):\n",
" if model==\"GPT\":\n", " if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n", " result = stream_gpt(system_message,prompt,temperature)\n",
" elif model==\"Claude\":\n", " elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n", " result = stream_claude(system_message,prompt,temperature)\n",
" else:\n", " else:\n",
" raise ValueError(\"Unknown model\")\n", " raise ValueError(\"Unknown model\")\n",
" yield from result" " yield from result"
@ -419,13 +796,46 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "8d8ce810-997c-4b6a-bc4f-1fc847ac8855", "id": "3b80d2d5",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7880\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7880/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 50,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"view = gr.Interface(\n", "view = gr.Interface(\n",
" fn=stream_model,\n", " fn=stream_model,\n",
" inputs=[gr.Textbox(label=\"Your message:\"), gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")],\n", " inputs=[gr.Textbox(label=\"AI Agent\",value=system_message),\n",
" gr.Textbox(label=\"Your question:\"), \n",
" gr.Slider(0,2,value=0.7,label=\"AI Temperature\",info=\"AI Creativity Level (higher = more creative)\"),\n",
" gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n", " outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n", " flagging_mode=\"never\"\n",
")\n", ")\n",
@ -552,7 +962,7 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "llms",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },

192
week2/day3.ipynb

@ -10,11 +10,13 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861", "id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"#!pip install ipywidgets\n",
"\n",
"# imports\n", "# imports\n",
"\n", "\n",
"import os\n", "import os\n",
@ -25,10 +27,20 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "231605aa-fccb-447e-89cf-8b187444536a", "id": "231605aa-fccb-447e-89cf-8b187444536a",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyDi\n"
]
}
],
"source": [ "source": [
"# Load environment variables in a file called .env\n", "# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n", "# Print the key prefixes to help with any debugging\n",
@ -56,7 +68,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb", "id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -69,7 +81,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 4,
"id": "e16839b5-c03b-4d9d-add6-87a0f6f37575", "id": "e16839b5-c03b-4d9d-add6-87a0f6f37575",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -112,7 +124,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 5,
"id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1", "id": "1eacc8a4-4b48-4358-9e06-ce0020041bc1",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -146,17 +158,65 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 6,
"id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2", "id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7861\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"History is:\n",
"[]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'content': 'Hello how are you?'}]\n",
"History is:\n",
"[{'role': 'user', 'metadata': None, 'content': 'Hello how are you?', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"Hello! I'm just a program, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?\", 'options': None}]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'metadata': None, 'content': 'Hello how are you?', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"Hello! I'm just a program, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?\", 'options': None}, {'role': 'user', 'content': 'What are the top 5 most used anagrams?'}]\n",
"History is:\n",
"[{'role': 'user', 'metadata': None, 'content': 'Hello how are you?', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"Hello! I'm just a program, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?\", 'options': None}, {'role': 'user', 'metadata': None, 'content': 'What are the top 5 most used anagrams?', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': 'Anagrams are words or phrases formed by rearranging the letters of a different word or phrase, using all the original letters exactly once. While \"most used\" can be subjective and may vary by context, here are five common anagrams that people often encounter:\\n\\n1. **Listen** → **Silent**\\n2. **Dormitory** → **Dirty room**\\n3. **The eyes** → **They see**\\n4. **Astronomer** → **Moon starer**\\n5. **School master** → **The classroom**\\n\\nThese anagrams are often cited due to their cleverness or the fact that they form meaningful phrases. If you have a specific context in mind, I can provide more tailored examples!', 'options': None}]\n",
"And messages is:\n",
"[{'role': 'system', 'content': 'You are a helpful assistant'}, {'role': 'user', 'metadata': None, 'content': 'Hello how are you?', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': \"Hello! I'm just a program, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?\", 'options': None}, {'role': 'user', 'metadata': None, 'content': 'What are the top 5 most used anagrams?', 'options': None}, {'role': 'assistant', 'metadata': None, 'content': 'Anagrams are words or phrases formed by rearranging the letters of a different word or phrase, using all the original letters exactly once. While \"most used\" can be subjective and may vary by context, here are five common anagrams that people often encounter:\\n\\n1. **Listen** → **Silent**\\n2. **Dormitory** → **Dirty room**\\n3. **The eyes** → **They see**\\n4. **Astronomer** → **Moon starer**\\n5. **School master** → **The classroom**\\n\\nThese anagrams are often cited due to their cleverness or the fact that they form meaningful phrases. If you have a specific context in mind, I can provide more tailored examples!', 'options': None}, {'role': 'user', 'content': 'What is it called when a word is spelled the same forwards and backwards?'}]\n"
]
}
],
"source": [ "source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 7,
"id": "1f91b414-8bab-472d-b9c9-3fa51259bdfe", "id": "1f91b414-8bab-472d-b9c9-3fa51259bdfe",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -170,7 +230,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 8,
"id": "4e5be3ec-c26c-42bc-ac16-c39d369883f6", "id": "4e5be3ec-c26c-42bc-ac16-c39d369883f6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -188,17 +248,47 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"id": "413e9e4e-7836-43ac-a0c3-e1ab5ed6b136", "id": "413e9e4e-7836-43ac-a0c3-e1ab5ed6b136",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7862\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7862/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"id": "d75f0ffa-55c8-4152-b451-945021676837", "id": "d75f0ffa-55c8-4152-b451-945021676837",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -209,17 +299,47 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 11,
"id": "c602a8dd-2df7-4eb7-b539-4e01865a6351", "id": "c602a8dd-2df7-4eb7-b539-4e01865a6351",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7863\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7863/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 12,
"id": "0a987a66-1061-46d6-a83a-a30859dc88bf", "id": "0a987a66-1061-46d6-a83a-a30859dc88bf",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -245,10 +365,40 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 13,
"id": "20570de2-eaad-42cc-a92c-c779d71b48b6", "id": "20570de2-eaad-42cc-a92c-c779d71b48b6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7864\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7864/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
] ]
@ -279,12 +429,14 @@
"id": "6dfb9e21-df67-4c2b-b952-5e7e7961b03d", "id": "6dfb9e21-df67-4c2b-b952-5e7e7961b03d",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": [
"# Create a chatbot based on a website. Bot uses all the pages on the website to interactively chat with users."
]
} }
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "llms",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },

146
week2/day4.ipynb

@ -12,7 +12,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4", "id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -28,10 +28,28 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae", "id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"\u001b[?25lpulling manifest ⠋ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠙ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠹ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠸ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠼ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠴ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠦ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest ⠧ \u001b[?25h\u001b[?25l\u001b[2K\u001b[1Gpulling manifest \n",
"pulling dde5aa3fc5ff... 100% ▕████████████████▏ 2.0 GB \n",
"pulling 966de95ca8a6... 100% ▕████████████████▏ 1.4 KB \n",
"pulling fcc5a6bec9da... 100% ▕████████████████▏ 7.7 KB \n",
"pulling a70ff7e570d9... 100% ▕████████████████▏ 6.0 KB \n",
"pulling 56bb8bd477a5... 100% ▕████████████████▏ 96 B \n",
"pulling 34bb5ab01051... 100% ▕████████████████▏ 561 B \n",
"verifying sha256 digest \n",
"writing manifest \n",
"success \u001b[?25h\n"
]
}
],
"source": [ "source": [
"# Initialization\n", "# Initialization\n",
"\n", "\n",
@ -48,13 +66,17 @@
"\n", "\n",
"# As an alternative, if you'd like to use Ollama instead of OpenAI\n", "# As an alternative, if you'd like to use Ollama instead of OpenAI\n",
"# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n", "# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n",
"# MODEL = \"llama3.2\"\n", "MODEL = \"llama3.2\"\n",
"# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n" "openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
"\n",
"# Let's just make sure the model is loaded\n",
"\n",
"!ollama pull llama3.2"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 4,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97", "id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -66,10 +88,40 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 5,
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6", "id": "61a2a15d-b559-4844-b377-6bd5cb4949f6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7865\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7865/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n", "# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n",
"\n", "\n",
@ -99,7 +151,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 8,
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2", "id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -116,17 +168,35 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85", "id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for Berlin\n"
]
},
{
"data": {
"text/plain": [
"'$499'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"get_ticket_price(\"Berlin\")" "get_ticket_price(\"Berlin\")"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344", "id": "4afceded-7178-4c05-8fa6-9f2085e6a344",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -152,7 +222,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 11,
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c", "id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -178,7 +248,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 13,
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf", "id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -199,7 +269,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 14,
"id": "b0992986-ea09-4912-a076-8e5603ee631f", "id": "b0992986-ea09-4912-a076-8e5603ee631f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -221,10 +291,50 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 15,
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14", "id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7866\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7866/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for Tokyo\n",
"Tool get_ticket_price called for London\n",
"Tool get_ticket_price called for London\n",
"Tool get_ticket_price called for Tokyo\n"
]
}
],
"source": [ "source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()" "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
] ]
@ -232,7 +342,7 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "llms",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },

351
week2/day5.ipynb

File diff suppressed because one or more lines are too long
Loading…
Cancel
Save