From a6a83d92487349d36c0a85bc01c386ef28e0004a Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 15 Dec 2024 01:45:42 +0100 Subject: [PATCH 1/5] exercise day 1-week 2 --- .env.swp | Bin 0 -> 12288 bytes week1/Guide to Jupyter.ipynb | 508 ++- .../day1-article-pdf-reader.ipynb | 2 +- .../day1-selenium-for-javascript-sites.ipynb | 157 +- week1/day1.ipynb | 398 +- week1/day2 EXERCISE.ipynb | 358 +- week1/day5.ipynb | 3674 ++++++++++++++++- week1/solutions/week1 SOLUTION.ipynb | 170 +- week1/troubleshooting.ipynb | 48 +- week1/week1 EXERCISE.ipynb | 258 +- week2/day1.ipynb | 433 +- 11 files changed, 5797 insertions(+), 209 deletions(-) create mode 100644 .env.swp diff --git a/.env.swp b/.env.swp new file mode 100644 index 0000000000000000000000000000000000000000..82f7c89080cb48dd5893ea53fe1189d5b49531aa GIT binary patch literal 12288 zcmeI%F-rq66u|LUmrkPCFHjZ7Hdoul(V~N(SkdYzxh5^aUanlL;^b%bL+B=$pf?mb zcaQ%IxsdmAg!kKyOOxBPi}q2sCn2`u<2w$TA~w~jE)##O=f|U)%TXt}9t?*kr&pCH zl~H$j@vvOziCwB`USxXZa<3kZ&0dr>9g}-+3pJl-_twvxx3+M8rk=}}epR~GKUH2w z0RaTo1UAS0!(I|o`+K|X*3NjX$_4=h5I_I{1Q0*~0R-wTuv#?aHMF?#(`KV$t+xW* zA%Fk^2q1s}0tg_000IagP;UWkMBcVUKEsUv|I6?Hxc>Tf^G7ZBy`2wV*Hdp`v literal 0 HcmV?d00001 diff --git a/week1/Guide to Jupyter.ipynb b/week1/Guide to Jupyter.ipynb index 0f0ddf2..8d0a1cc 100644 --- a/week1/Guide to Jupyter.ipynb +++ b/week1/Guide to Jupyter.ipynb @@ -54,7 +54,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "585eb9c1-85ee-4c27-8dc2-b4d8d022eda0", "metadata": {}, "outputs": [], @@ -78,10 +78,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "a067d2b1-53d5-4aeb-8a3c-574d39ff654a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "My favorite fruit is bananas\n" + ] + } + ], "source": [ "# Use the variable\n", "\n", @@ -197,7 +205,7 @@ }, { "cell_type": "markdown", - "id": "eac060f2-7a71-46e7-8235-b6ad0a76f5f8", + "id": "c6e82904-2132-408e-9985-e2b95e7f0794", "metadata": {}, "source": [ "# Using markdown\n", @@ -245,10 +253,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "82042fc5-a907-4381-a4b8-eb9386df19cd", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "'ls' n'est pas reconnu en tant que commande interne\n", + "ou externe, un programme ex‚cutable ou un fichier de commandes.\n" + ] + } + ], "source": [ "# list the current directory\n", "\n", @@ -257,10 +274,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "4fc3e3da-8a55-40cc-9706-48bf12a0e20e", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Envoi d'une requˆte 'ping' sur cnn.com [151.101.3.5] avec 32 octets de donn‚esÿ:\n", + "R‚ponse de 151.101.3.5ÿ: octets=32 temps=16 ms TTL=59\n", + "R‚ponse de 151.101.3.5ÿ: octets=32 temps=14 ms TTL=59\n", + "R‚ponse de 151.101.3.5ÿ: octets=32 temps=17 ms TTL=59\n", + "R‚ponse de 151.101.3.5ÿ: octets=32 temps=17 ms TTL=59\n", + "\n", + "Statistiques Ping pour 151.101.3.5:\n", + " Paquetsÿ: envoy‚s = 4, re‡us = 4, perdus = 0 (perte 0%),\n", + "Dur‚e approximative des boucles en millisecondes :\n", + " Minimum = 14ms, Maximum = 17ms, Moyenne = 16ms\n" + ] + } + ], "source": [ "# ping cnn.com - press the stop button in the toolbar when you're bored\n", "\n", @@ -269,10 +304,430 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "a58e9462-89a2-4b4f-b4aa-51c4bd9f796b", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "^C\n", + "Channels:\n", + " - conda-forge\n", + " - defaults\n", + "Platform: win-64\n", + "Collecting package metadata (repodata.json): ...working... done\n", + "Solving environment: ...working... done\n", + "Installing pip dependencies: \\ Ran pip subprocess with arguments:\n", + "['C:\\\\Users\\\\ebaba\\\\.conda\\\\envs\\\\llms\\\\python.exe', '-m', 'pip', 'install', '-U', '-r', 'D:\\\\Workspaces\\\\formation\\\\udemy\\\\ed-donner\\\\llm_engineering\\\\workspace\\\\llm_engineering\\\\condaenv.ckoo6cll.requirements.txt', '--exists-action=b']\n", + "Pip subprocess output:\n", + "Requirement already satisfied: beautifulsoup4 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 1)) (4.12.3)\n", + "\n", + "Requirement already satisfied: plotly in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 2)) (5.24.1)\n", + "\n", + "Requirement already satisfied: bitsandbytes in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (0.45.0)\n", + "\n", + "Requirement already satisfied: transformers in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (4.47.0)\n", + "\n", + "Requirement already satisfied: sentence-transformers in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (3.3.1)\n", + "\n", + "Requirement already satisfied: datasets in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (3.2.0)\n", + "\n", + "Requirement already satisfied: accelerate in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 7)) (1.2.0)\n", + "\n", + "Requirement already satisfied: openai in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.57.2)\n", + "\n", + "Requirement already satisfied: anthropic in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 9)) (0.40.0)\n", + "\n", + "Requirement already satisfied: google-generativeai in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.8.3)\n", + "\n", + "Requirement already satisfied: gradio in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (5.8.0)\n", + "\n", + "Requirement already satisfied: gensim in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 12)) (4.3.3)\n", + "\n", + "Requirement already satisfied: modal in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.68.1)\n", + "\n", + "Collecting modal (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13))\n", + "\n", + " Downloading modal-0.68.2-py3-none-any.whl.metadata (2.3 kB)\n", + "\n", + "Requirement already satisfied: ollama in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 14)) (0.4.4)\n", + "\n", + "Requirement already satisfied: psutil in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 15)) (6.1.0)\n", + "\n", + "Requirement already satisfied: setuptools in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 16)) (75.6.0)\n", + "\n", + "Requirement already satisfied: speedtest-cli in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 17)) (2.1.3)\n", + "\n", + "Requirement already satisfied: langchain in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (0.3.11)\n", + "\n", + "Requirement already satisfied: langchain-core in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 19)) (0.3.24)\n", + "\n", + "Requirement already satisfied: langchain-text-splitters in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 20)) (0.3.2)\n", + "\n", + "Requirement already satisfied: langchain-openai in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 21)) (0.2.12)\n", + "\n", + "Requirement already satisfied: langchain-chroma in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.1.4)\n", + "\n", + "Requirement already satisfied: langchain-community in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.3.11)\n", + "\n", + "Requirement already satisfied: faiss-cpu in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 24)) (1.9.0.post1)\n", + "\n", + "Requirement already satisfied: feedparser in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 25)) (6.0.11)\n", + "\n", + "Requirement already satisfied: twilio in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 26)) (9.3.8)\n", + "\n", + "Requirement already satisfied: pydub in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 27)) (0.25.1)\n", + "\n", + "Requirement already satisfied: soupsieve>1.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from beautifulsoup4->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 1)) (2.5)\n", + "\n", + "Requirement already satisfied: tenacity>=6.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from plotly->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 2)) (9.0.0)\n", + "\n", + "Requirement already satisfied: packaging in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from plotly->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 2)) (24.2)\n", + "\n", + "Requirement already satisfied: torch in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (2.3.1)\n", + "\n", + "Requirement already satisfied: numpy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (1.26.4)\n", + "\n", + "Requirement already satisfied: typing_extensions>=4.8.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (4.12.2)\n", + "\n", + "Requirement already satisfied: filelock in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (3.16.1)\n", + "\n", + "Requirement already satisfied: huggingface-hub<1.0,>=0.24.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (0.26.5)\n", + "\n", + "Requirement already satisfied: pyyaml>=5.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (6.0.2)\n", + "\n", + "Requirement already satisfied: regex!=2019.12.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (2024.11.6)\n", + "\n", + "Requirement already satisfied: requests in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (2.32.3)\n", + "\n", + "Requirement already satisfied: tokenizers<0.22,>=0.21 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (0.21.0)\n", + "\n", + "Requirement already satisfied: safetensors>=0.4.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (0.4.5)\n", + "\n", + "Requirement already satisfied: tqdm>=4.27 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (4.67.1)\n", + "\n", + "Requirement already satisfied: scikit-learn in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (1.6.0)\n", + "\n", + "Requirement already satisfied: scipy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (1.13.1)\n", + "\n", + "Requirement already satisfied: Pillow in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (11.0.0)\n", + "\n", + "Requirement already satisfied: pyarrow>=15.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (18.1.0)\n", + "\n", + "Requirement already satisfied: dill<0.3.9,>=0.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (0.3.8)\n", + "\n", + "Requirement already satisfied: pandas in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2.2.3)\n", + "\n", + "Requirement already satisfied: xxhash in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (3.5.0)\n", + "\n", + "Requirement already satisfied: multiprocess<0.70.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (0.70.16)\n", + "\n", + "Requirement already satisfied: fsspec<=2024.9.0,>=2023.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from fsspec[http]<=2024.9.0,>=2023.1.0->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2024.9.0)\n", + "\n", + "Requirement already satisfied: aiohttp in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (3.11.10)\n", + "\n", + "Requirement already satisfied: anyio<5,>=3.5.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (4.7.0)\n", + "\n", + "Requirement already satisfied: distro<2,>=1.7.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.9.0)\n", + "\n", + "Requirement already satisfied: httpx<1,>=0.23.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.27.2)\n", + "\n", + "Requirement already satisfied: jiter<1,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.8.2)\n", + "\n", + "Requirement already satisfied: pydantic<3,>=1.9.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (2.10.3)\n", + "\n", + "Requirement already satisfied: sniffio in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.3.1)\n", + "\n", + "Requirement already satisfied: google-ai-generativelanguage==0.6.10 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.6.10)\n", + "\n", + "Requirement already satisfied: google-api-core in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (2.24.0)\n", + "\n", + "Requirement already satisfied: google-api-python-client in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (2.155.0)\n", + "\n", + "Requirement already satisfied: google-auth>=2.15.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (2.37.0)\n", + "\n", + "Requirement already satisfied: protobuf in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (5.28.2)\n", + "\n", + "Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-ai-generativelanguage==0.6.10->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (1.25.0)\n", + "\n", + "Requirement already satisfied: aiofiles<24.0,>=22.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (23.2.1)\n", + "\n", + "Requirement already satisfied: fastapi<1.0,>=0.115.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.115.6)\n", + "\n", + "Requirement already satisfied: ffmpy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.4.0)\n", + "\n", + "Requirement already satisfied: gradio-client==1.5.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (1.5.1)\n", + "\n", + "Requirement already satisfied: jinja2<4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (3.1.4)\n", + "\n", + "Requirement already satisfied: markupsafe~=2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (2.1.5)\n", + "\n", + "Requirement already satisfied: orjson~=3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (3.10.12)\n", + "\n", + "Requirement already satisfied: python-multipart>=0.0.18 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.0.19)\n", + "\n", + "Requirement already satisfied: ruff>=0.2.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.8.2)\n", + "\n", + "Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.1.6)\n", + "\n", + "Requirement already satisfied: semantic-version~=2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (2.10.0)\n", + "\n", + "Requirement already satisfied: starlette<1.0,>=0.40.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.41.3)\n", + "\n", + "Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.13.2)\n", + "\n", + "Requirement already satisfied: typer<1.0,>=0.12 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.15.1)\n", + "\n", + "Requirement already satisfied: uvicorn>=0.14.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.32.1)\n", + "\n", + "Requirement already satisfied: websockets<15.0,>=10.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio-client==1.5.1->gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (14.1)\n", + "\n", + "Requirement already satisfied: smart-open>=1.8.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gensim->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 12)) (7.0.5)\n", + "\n", + "Requirement already satisfied: certifi in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (2024.8.30)\n", + "\n", + "Requirement already satisfied: click>=8.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (8.1.7)\n", + "\n", + "Requirement already satisfied: grpclib==0.4.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.4.7)\n", + "\n", + "Requirement already satisfied: rich>=12.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (13.9.4)\n", + "\n", + "Requirement already satisfied: synchronicity~=0.9.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.9.6)\n", + "\n", + "Requirement already satisfied: toml in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.10.2)\n", + "\n", + "Requirement already satisfied: types-certifi in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (2021.10.8.3)\n", + "\n", + "Requirement already satisfied: types-toml in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.10.8.20240310)\n", + "\n", + "Requirement already satisfied: watchfiles in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (1.0.0)\n", + "\n", + "Requirement already satisfied: h2<5,>=3.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (4.1.0)\n", + "\n", + "Requirement already satisfied: multidict in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (6.1.0)\n", + "\n", + "Requirement already satisfied: SQLAlchemy<3,>=1.4 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (2.0.36)\n", + "\n", + "Requirement already satisfied: langsmith<0.3,>=0.1.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (0.2.2)\n", + "\n", + "Requirement already satisfied: jsonpatch<2.0,>=1.33 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-core->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 19)) (1.33)\n", + "\n", + "Requirement already satisfied: tiktoken<1,>=0.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 21)) (0.8.0)\n", + "\n", + "Requirement already satisfied: chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.5.20)\n", + "\n", + "Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.6.7)\n", + "\n", + "Requirement already satisfied: httpx-sse<0.5.0,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.4.0)\n", + "\n", + "Requirement already satisfied: pydantic-settings<3.0.0,>=2.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (2.6.1)\n", + "\n", + "Requirement already satisfied: sgmllib3k in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from feedparser->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 25)) (1.0.0)\n", + "\n", + "Requirement already satisfied: PyJWT<3.0.0,>=2.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from twilio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 26)) (2.10.1)\n", + "\n", + "Requirement already satisfied: aiohttp-retry==2.8.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from twilio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 26)) (2.8.3)\n", + "\n", + "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2.4.4)\n", + "\n", + "Requirement already satisfied: aiosignal>=1.1.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (1.3.1)\n", + "\n", + "Requirement already satisfied: attrs>=17.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (24.2.0)\n", + "\n", + "Requirement already satisfied: frozenlist>=1.1.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (1.5.0)\n", + "\n", + "Requirement already satisfied: propcache>=0.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (0.2.1)\n", + "\n", + "Requirement already satisfied: yarl<2.0,>=1.17.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (1.18.3)\n", + "\n", + "Requirement already satisfied: idna>=2.8 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from anyio<5,>=3.5.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (3.10)\n", + "\n", + "Requirement already satisfied: build>=1.0.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.2.2.post1)\n", + "\n", + "Requirement already satisfied: chroma-hnswlib==0.7.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.7.6)\n", + "\n", + "Requirement already satisfied: posthog>=2.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.6.5)\n", + "\n", + "Requirement already satisfied: onnxruntime>=1.14.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.19.2)\n", + "\n", + "Requirement already satisfied: opentelemetry-api>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n", + "\n", + "Requirement already satisfied: opentelemetry-exporter-otlp-proto-grpc>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n", + "\n", + "Requirement already satisfied: opentelemetry-instrumentation-fastapi>=0.41b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n", + "\n", + "Requirement already satisfied: opentelemetry-sdk>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n", + "\n", + "Requirement already satisfied: pypika>=0.48.9 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.48.9)\n", + "\n", + "Requirement already satisfied: overrides>=7.3.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (7.7.0)\n", + "\n", + "Requirement already satisfied: importlib-resources in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (6.4.5)\n", + "\n", + "Requirement already satisfied: grpcio>=1.58.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.68.1)\n", + "\n", + "Requirement already satisfied: bcrypt>=4.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (4.2.1)\n", + "\n", + "Requirement already satisfied: kubernetes>=28.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (31.0.0)\n", + "\n", + "Requirement already satisfied: mmh3>=4.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (5.0.1)\n", + "\n", + "Requirement already satisfied: colorama in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from click>=8.1.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.4.6)\n", + "\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (3.23.1)\n", + "\n", + "Requirement already satisfied: typing-inspect<1,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.9.0)\n", + "\n", + "Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-core->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (1.66.0)\n", + "\n", + "Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (5.5.0)\n", + "\n", + "Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.4.1)\n", + "\n", + "Requirement already satisfied: rsa<5,>=3.1.4 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (4.9)\n", + "\n", + "Requirement already satisfied: httpcore==1.* in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from httpx<1,>=0.23.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.0.7)\n", + "\n", + "Requirement already satisfied: h11<0.15,>=0.13 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.14.0)\n", + "\n", + "Requirement already satisfied: jsonpointer>=1.9 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from jsonpatch<2.0,>=1.33->langchain-core->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 19)) (3.0.0)\n", + "\n", + "Requirement already satisfied: requests-toolbelt<2.0.0,>=1.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langsmith<0.3,>=0.1.17->langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (1.0.0)\n", + "\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pandas->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2.9.0.post0)\n", + "\n", + "Requirement already satisfied: pytz>=2020.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pandas->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2024.1)\n", + "\n", + "Requirement already satisfied: tzdata>=2022.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pandas->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2024.2)\n", + "\n", + "Requirement already satisfied: annotated-types>=0.6.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pydantic<3,>=1.9.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.7.0)\n", + "\n", + "Requirement already satisfied: pydantic-core==2.27.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pydantic<3,>=1.9.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (2.27.1)\n", + "\n", + "Requirement already satisfied: python-dotenv>=0.21.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pydantic-settings<3.0.0,>=2.4.0->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (1.0.1)\n", + "\n", + "Requirement already satisfied: charset_normalizer<4,>=2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from requests->transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (3.4.0)\n", + "\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from requests->transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (2.2.3)\n", + "\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from rich>=12.0.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (3.0.0)\n", + "\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from rich>=12.0.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (2.18.0)\n", + "\n", + "Requirement already satisfied: wrapt in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from smart-open>=1.8.1->gensim->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 12)) (1.17.0)\n", + "\n", + "Requirement already satisfied: greenlet!=0.4.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from SQLAlchemy<3,>=1.4->langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (3.1.1)\n", + "\n", + "Requirement already satisfied: sigtools>=4.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from synchronicity~=0.9.6->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (4.0.1)\n", + "\n", + "Requirement already satisfied: sympy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from torch->bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (1.13.3)\n", + "\n", + "Requirement already satisfied: networkx in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from torch->bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (3.4.2)\n", + "\n", + "Requirement already satisfied: shellingham>=1.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from typer<1.0,>=0.12->gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (1.5.4)\n", + "\n", + "Requirement already satisfied: httplib2<1.dev0,>=0.19.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.22.0)\n", + "\n", + "Requirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.2.0)\n", + "\n", + "Requirement already satisfied: uritemplate<5,>=3.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (4.1.1)\n", + "\n", + "Requirement already satisfied: joblib>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from scikit-learn->sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (1.4.2)\n", + "\n", + "Requirement already satisfied: threadpoolctl>=3.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from scikit-learn->sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (3.5.0)\n", + "\n", + "Requirement already satisfied: pyproject_hooks in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from build>=1.0.3->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.2.0)\n", + "\n", + "Requirement already satisfied: grpcio-status<2.0.dev0,>=1.33.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.10.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,<3.0.0dev,>=1.34.1->google-ai-generativelanguage==0.6.10->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (1.68.1)\n", + "\n", + "Requirement already satisfied: hyperframe<7,>=6.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from h2<5,>=3.1.0->grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (6.0.1)\n", + "\n", + "Requirement already satisfied: hpack<5,>=4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from h2<5,>=3.1.0->grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (4.0.0)\n", + "\n", + "Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from httplib2<1.dev0,>=0.19.0->google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (3.2.0)\n", + "\n", + "Requirement already satisfied: six>=1.9.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.17.0)\n", + "\n", + "Requirement already satisfied: websocket-client!=0.40.0,!=0.41.*,!=0.42.*,>=0.32.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.8.0)\n", + "\n", + "Requirement already satisfied: requests-oauthlib in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (2.0.0)\n", + "\n", + "Requirement already satisfied: oauthlib>=3.2.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.2.2)\n", + "\n", + "Requirement already satisfied: durationpy>=0.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.9)\n", + "\n", + "Requirement already satisfied: mdurl~=0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from markdown-it-py>=2.2.0->rich>=12.0.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.1.2)\n", + "\n", + "Requirement already satisfied: coloredlogs in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (15.0.1)\n", + "\n", + "Requirement already satisfied: flatbuffers in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (24.3.25)\n", + "\n", + "Requirement already satisfied: deprecated>=1.2.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-api>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.2.15)\n", + "\n", + "Requirement already satisfied: importlib-metadata<=8.5.0,>=6.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-api>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (8.5.0)\n", + "\n", + "Requirement already satisfied: opentelemetry-exporter-otlp-proto-common==1.29.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n", + "\n", + "Requirement already satisfied: opentelemetry-proto==1.29.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n", + "\n", + "Requirement already satisfied: opentelemetry-instrumentation-asgi==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n", + "\n", + "Requirement already satisfied: opentelemetry-instrumentation==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n", + "\n", + "Requirement already satisfied: opentelemetry-semantic-conventions==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n", + "\n", + "Requirement already satisfied: opentelemetry-util-http==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n", + "\n", + "Requirement already satisfied: asgiref~=3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-asgi==0.50b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.8.1)\n", + "\n", + "Requirement already satisfied: monotonic>=1.5 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from posthog>=2.4.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.5)\n", + "\n", + "Requirement already satisfied: backoff>=1.10.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from posthog>=2.4.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (2.2.1)\n", + "\n", + "Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.6.1)\n", + "\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (1.0.0)\n", + "\n", + "Requirement already satisfied: httptools>=0.6.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from uvicorn[standard]>=0.18.3->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.6.4)\n", + "\n", + "Requirement already satisfied: mpmath<1.4,>=1.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sympy->torch->bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (1.3.0)\n", + "\n", + "Requirement already satisfied: zipp>=3.20 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from importlib-metadata<=8.5.0,>=6.0->opentelemetry-api>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.21.0)\n", + "\n", + "Requirement already satisfied: humanfriendly>=9.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from coloredlogs->onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (10.0)\n", + "\n", + "Requirement already satisfied: pyreadline3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from humanfriendly>=9.1->coloredlogs->onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.0.0)\n", + "\n", + "Downloading modal-0.68.2-py3-none-any.whl (499 kB)\n", + "\n", + "Installing collected packages: modal\n", + "\n", + " Attempting uninstall: modal\n", + "\n", + " Found existing installation: modal 0.68.1\n", + "\n", + " Uninstalling modal-0.68.1:\n", + "\n", + " Successfully uninstalled modal-0.68.1\n", + "\n", + "Successfully installed modal-0.68.2\n", + "\n", + "\n", + "done\n", + "#\n", + "# To activate this environment, use\n", + "#\n", + "# $ conda activate llms\n", + "#\n", + "# To deactivate an active environment, use\n", + "#\n", + "# $ conda deactivate\n", + "\n" + ] + } + ], "source": [ "# This is a useful command that ensures your Anaconda environment \n", "# is up to date with any new upgrades to packages;\n", @@ -313,10 +768,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "6e96be3d-fa82-42a3-a8aa-b81dd20563a5", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████| 1000/1000 [00:10<00:00, 94.63it/s]\n" + ] + } + ], "source": [ "# And now, with a nice little progress bar:\n", "\n", @@ -331,10 +794,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "63c788dd-4618-4bb4-a5ce-204411a38ade", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "# This is a big heading!\n", + "\n", + "- And this is a bullet-point\n", + "- So is this\n", + "- Me, too!" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# On a different topic, here's a useful way to print output in markdown\n", "\n", diff --git a/week1/community-contributions/day1-article-pdf-reader.ipynb b/week1/community-contributions/day1-article-pdf-reader.ipynb index 28af4e3..599cb18 100644 --- a/week1/community-contributions/day1-article-pdf-reader.ipynb +++ b/week1/community-contributions/day1-article-pdf-reader.ipynb @@ -181,7 +181,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.11.11" } }, "nbformat": 4, diff --git a/week1/community-contributions/day1-selenium-for-javascript-sites.ipynb b/week1/community-contributions/day1-selenium-for-javascript-sites.ipynb index fd3a3ba..fd77d91 100644 --- a/week1/community-contributions/day1-selenium-for-javascript-sites.ipynb +++ b/week1/community-contributions/day1-selenium-for-javascript-sites.ipynb @@ -22,7 +22,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "metadata": {}, "outputs": [], @@ -62,7 +62,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", "metadata": {}, "outputs": [], @@ -76,7 +76,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "c5e793b2-6775-426a-a139-4848291d0463", "metadata": {}, "outputs": [], @@ -100,10 +100,63 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Home - Edward Donner\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], "source": [ "# Let's try one out\n", "\n", @@ -132,7 +185,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", "metadata": {}, "outputs": [], @@ -144,7 +197,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", "metadata": {}, "outputs": [], @@ -177,7 +230,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", "metadata": {}, "outputs": [], @@ -199,7 +252,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", "metadata": {}, "outputs": [], @@ -215,17 +268,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "\"# Summary of Edward Donner's Website\\n\\nEdward Donner's website features insights into his interests in coding and experimenting with large language models (LLMs). As the co-founder and CTO of Nebula.io, Donner focuses on leveraging AI to improve talent discovery and management. He has a background in AI startups, highlighting a successful acquisition of his previous venture, untapt, in 2021.\\n\\n## Recent Posts\\n- **November 13, 2024**: *Mastering AI and LLM Engineering – Resources*\\n- **October 16, 2024**: *From Software Engineer to AI Data Scientist – Resources*\\n- **August 6, 2024**: *Outsmart LLM Arena – A Battle of Diplomacy and Deviousness*\\n- **June 26, 2024**: *Choosing the Right LLM: Toolkit and Resources*\\n\\nThe site encourages visitors to connect and shares his passion for technology and music.\"" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "summarize(\"https://edwarddonner.com\")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "3d926d59-450e-4609-92ba-2d6f244f1342", "metadata": {}, "outputs": [], @@ -237,10 +301,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "3018853a-445f-41ff-9560-d925d1774b2f", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "# Summary of Edward Donner's Website\n", + "\n", + "Edward Donner's website serves as a platform for sharing insights and developments related to large language models (LLMs) and their applications. \n", + "\n", + "### About Edward\n", + "Edward describes himself as a programmer and enthusiast of LLMs, with interests in DJing and electronic music production. He is the co-founder and CTO of Nebula.io, a company focused on leveraging AI to enhance talent discovery and engagement in a job context. He previously founded the AI startup untapt, which was acquired in 2021. \n", + "\n", + "### Featured Content\n", + "The website highlights several posts with resources that include:\n", + "- **Mastering AI and LLM Engineering** (November 13, 2024)\n", + "- **From Software Engineer to AI Data Scientist** (October 16, 2024)\n", + "- **Outsmart LLM Arena** (August 6, 2024) - An initiative designed to challenge LLMs in strategic scenarios.\n", + "- **Choosing the Right LLM: Toolkit and Resources** (June 26, 2024)\n", + "\n", + "### Focus\n", + "The website primarily emphasizes exploring LLMs and their transformative potential, especially in the realm of talent management. There are also connections to various platforms where Edward can be followed or contacted." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "display_summary(\"https://edwarddonner.com\")" ] @@ -277,20 +369,49 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "id": "52ae98bb", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "# Summary of Website Content\n", + "\n", + "The website appears to be inaccessible due to a prompt requesting users to enable JavaScript and cookies in their web browser. As a result, no specific content, news, or announcements can be summarized from the site at this time. \n", + "\n", + "For a full experience and access to content, it is necessary to adjust browser settings accordingly." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "display_summary(\"https://openai.com\")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "5d57e958", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'selenium'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[14], line 3\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m#Parse webpages which is designed using JavaScript heavely\u001b[39;00m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;66;03m# download the chorme driver from here as per your version of chrome - https://developer.chrome.com/docs/chromedriver/downloads\u001b[39;00m\n\u001b[1;32m----> 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mselenium\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m webdriver\n\u001b[0;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mselenium\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mwebdriver\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchrome\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mservice\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Service\n\u001b[0;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mselenium\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mwebdriver\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcommon\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mby\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m By\n", + "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'selenium'" + ] + } + ], "source": [ "#Parse webpages which is designed using JavaScript heavely\n", "# download the chorme driver from here as per your version of chrome - https://developer.chrome.com/docs/chromedriver/downloads\n", diff --git a/week1/day1.ipynb b/week1/day1.ipynb index 0756a2f..f2cc46b 100644 --- a/week1/day1.ipynb +++ b/week1/day1.ipynb @@ -69,7 +69,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "metadata": {}, "outputs": [], @@ -108,10 +108,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "7b87cadb-d513-4303-baee-a37b6f938e4d", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far!\n" + ] + } + ], "source": [ "# Load environment variables in a file called .env\n", "\n", @@ -132,7 +140,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3", "metadata": {}, "outputs": [], @@ -154,15 +162,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "a58394bf-1e45-46af-9bfd-01e24da6f49a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'Hello, GPT! This is my first ever message to you! Hi!'}\n", + "Hello! Welcome! I'm glad you're here. How can I assist you today?\n" + ] + } + ], "source": [ "# To give you a preview -- calling OpenAI with these messages is this easy:\n", "\n", "message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n", "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n", + "print ({message})\n", "print(response.choices[0].message.content)" ] }, @@ -176,7 +194,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "c5e793b2-6775-426a-a139-4848291d0463", "metadata": {}, "outputs": [], @@ -206,10 +224,63 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Home - Edward Donner\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], "source": [ "# Let's try one out. Change the website and add print statements to follow along.\n", "\n", @@ -238,7 +309,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "abdb8417-c5dc-44bc-9bee-2e059d162699", "metadata": {}, "outputs": [], @@ -252,7 +323,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, + "id": "a1b3fcc3-1152-41a4-b4ad-a6d66ee18b79", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "system_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 13, "id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c", "metadata": {}, "outputs": [], @@ -270,10 +362,65 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "26448ec4-5c00-4204-baec-7df91d11ff2e", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are looking at a website titled Home - Edward Donner\n", + "The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n", + "\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], "source": [ "print(user_prompt_for(ed))" ] @@ -299,7 +446,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5", "metadata": {}, "outputs": [], @@ -312,10 +459,40 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, + "id": "6100800a-f1dd-4624-9956-75735225be02", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'role': 'system', 'content': 'You are a snarky assistant'},\n", + " {'role': 'user', 'content': 'What is 2 + 2?'}]" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "messages" + ] + }, + { + "cell_type": "code", + "execution_count": 19, "id": "21ed95c5-7001-47de-a36d-1d6673b403ce", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Oh, we're doing math now? Well, 2 + 2 equals 4. Shocking, I know!\n" + ] + } + ], "source": [ "# To give you a preview -- calling OpenAI with system and user messages:\n", "\n", @@ -333,7 +510,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "id": "0134dfa4-8299-48b5-b444-f2a8c3403c88", "metadata": {}, "outputs": [], @@ -349,10 +526,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "id": "36478464-39ee-485c-9f3f-6a4e458dbc9c", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "[{'role': 'system',\n", + " 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n", + " {'role': 'user',\n", + " 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nAugust 6, 2024\\nOutsmart LLM Arena – a battle of diplomacy and deviousness\\nJune 26, 2024\\nChoosing the Right LLM: Toolkit and Resources\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Try this out, and then try for a few more websites\n", "\n", @@ -369,7 +560,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "id": "905b9919-aba7-45b5-ae65-81b3d1d78e34", "metadata": {}, "outputs": [], @@ -387,17 +578,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "\"# Summary of Edward Donner's Website\\n\\nEdward Donner's website serves as a personal platform where he discusses his interests and expertise, primarily in coding and experimenting with large language models (LLMs). As the co-founder and CTO of Nebula.io, he focuses on leveraging AI to help individuals discover their potentials and enhance talent management for recruiters.\\n\\n### Key Sections:\\n\\n- **About Ed**: Edward enjoys coding, DJing, and engaging with the tech community. He has a background in AI startups, including being the founder and CEO of untapt, which was acquired in 2021.\\n- **Outsmart**: This feature introduces an arena where LLMs compete in diplomacy and cunning, showcasing innovative applications of AI.\\n- **Posts**: \\n - **Mastering AI and LLM Engineering – Resources** (November 13, 2024)\\n - **From Software Engineer to AI Data Scientist – Resources** (October 16, 2024)\\n - **Outsmart LLM Arena – A Battle of Diplomacy and Deviousness** (August 6, 2024)\\n - **Choosing the Right LLM: Toolkit and Resources** (June 26, 2024)\\n\\nThe website invites visitors to connect with Edward and stay updated through his posts and resources related to AI and LLMs.\"" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "summarize(\"https://edwarddonner.com\")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "id": "3d926d59-450e-4609-92ba-2d6f244f1342", "metadata": {}, "outputs": [], @@ -411,10 +613,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "id": "3018853a-445f-41ff-9560-d925d1774b2f", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "# Summary of Edward Donner's Website\n", + "\n", + "Edward Donner's website is a personal and professional platform showcasing his interests and expertise in working with Large Language Models (LLMs) and AI technologies. As the co-founder and CTO of Nebula.io, he is focused on leveraging AI to enhance talent discovery and engagement. Previously, he founded the AI startup untapt, which was acquired in 2021. \n", + "\n", + "## Key Features:\n", + "\n", + "- **Personal Introduction**: Ed shares his passion for coding, experimentation with LLMs, and interests in DJing and electronic music production.\n", + "- **Professional Background**: Insights into his role at Nebula.io and previous experience with untapt. He highlights his work with proprietary LLMs and innovative matching models.\n", + " \n", + "## Recent Posts:\n", + "- **November 13, 2024**: Mastering AI and LLM Engineering – Resources\n", + "- **October 16, 2024**: From Software Engineer to AI Data Scientist – Resources\n", + "- **August 6, 2024**: Outsmart LLM Arena – A battle of diplomacy and deviousness\n", + "- **June 26, 2024**: Choosing the Right LLM: Toolkit and Resources\n", + "\n", + "These posts suggest a focus on educational resources and insights related to AI and LLM engineering." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "display_summary(\"https://edwarddonner.com\")" ] @@ -437,20 +667,86 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "id": "45d83403-a24c-44b5-84ac-961449b4008f", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "# CNN Summary\n", + "\n", + "CNN is a leading news outlet providing the latest updates on various topics, including:\n", + "\n", + "- **Breaking News:** Continuous updates on urgent events around the globe.\n", + "- **Featured Stories:** Insights into significant current affairs, such as the ongoing conflict between Israel and Hamas, and developments in the Ukraine-Russia war.\n", + "- **Politics:** Coverage of key political events, including President Biden's recent clemency grants and implications of Trump's potential inauguration.\n", + "- **World Affairs:** Reports on international crises, including the situation in Syria and reactions to Sudan's bombardments.\n", + "- **Health & Science:** Articles discussing public health issues and scientific discoveries, such as innovations in herbal medicine.\n", + "- **Business & Economy:** Analysis of corporate developments, job cuts in Germany, and impacts of international trade policies.\n", + "- **Entertainment & Culture:** Features on public figures and trends affecting the entertainment industry, as well as the latest in sports.\n", + "\n", + "Recent announcements on the site include:\n", + "- **Clemency for Nearly 1,500 People:** This act marks the largest single-day clemency decision in recent history.\n", + "- **Status of International Relations:** Notable updates on figures like Trump and Xi Jinping, alongside the unfolding situation in Ukraine and the Middle East.\n", + "- **Cultural Insights:** Breakdowns of significant cultural events such as the recognition of Time's \"Person of the Year.\"\n", + "\n", + "CNN emphasizes the importance of viewer feedback to enhance reading and engagement experiences on their platform." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "display_summary(\"https://cnn.com\")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "id": "75e9fd40-b354-4341-991e-863ef2e59db7", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "# Anthropic Overview\n", + "\n", + "Anthropic is an AI safety and research company based in San Francisco, focused on developing reliable and beneficial AI systems with a strong emphasis on safety. The company boasts an interdisciplinary team with expertise in machine learning, physics, policy, and product development.\n", + "\n", + "## Key Offerings\n", + "\n", + "- **Claude AI Models**: \n", + " - The latest model, **Claude 3.5 Sonnet**, is highlighted as the most intelligent AI model to date.\n", + " - **Claude 3.5 Haiku** has also been introduced, expanding their product offerings.\n", + "\n", + "- **API Access**: \n", + " - Users can leverage Claude to enhance efficiency and create new revenue opportunities.\n", + "\n", + "## Recent Announcements\n", + "\n", + "1. **New Model Updates** (October 22, 2024):\n", + " - Introduction of Claude 3.5 Sonnet and Claude 3.5 Haiku.\n", + " - Announcement of new capabilities for computer use.\n", + "\n", + "2. **Research Initiatives**:\n", + " - **Constitutional AI**: Discusses ensuring harmlessness through AI feedback (December 15, 2022).\n", + " - **Core Views on AI Safety**: Outlines when, why, what, and how AI safety should be addressed (March 8, 2023).\n", + "\n", + "Overall, Anthropic is focused on pioneering advancements in AI through research and development while prioritizing safety and reliability in its applications." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "display_summary(\"https://anthropic.com\")" ] @@ -489,30 +785,58 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 31, "id": "00743dac-0e70-45b7-879a-d7293a6f68a6", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Objet : Contestation du frais de retour tardif\n", + "\n", + "Bonjour,\n", + "\n", + "Je fais suite à votre email concernant le retour tardif de la voiture. Je conteste fermement cette modification de prix qui, selon moi, est injustifiée.\n", + "\n", + "Selon les termes de notre contrat de location, le délai de grâce pour le retour est souvent de 30 minutes, ce qui est fréquemment appliqué dans le secteur. De plus, vous n'avez pas mentionné dans votre contrat un tarif additionnel pour une telle situation, ce qui pourrait constituer une clause abusive.\n", + "\n", + "Je vous prie donc de bien vouloir annuler cette modification tarifaire. Je me réserve le droit d'explorer des recours supplémentaires si cette situation n'est pas corrigée rapidement.\n", + "\n", + "Dans l'attente de votre retour.\n", + "\n", + "Cordialement, \n", + "Sylvain\n" + ] + } + ], "source": [ "# Step 1: Create your prompts\n", "\n", - "system_prompt = \"something here\"\n", + "system_prompt = \"You are my very smart assistant. Your task will be to suggest to me an answer to my email. I want to avoid paying. you can be agressive and use the law\"\n", "user_prompt = \"\"\"\n", - " Lots of text\n", - " Can be pasted here\n", + " Retour tardif\n", + "Bonjour sylvain,\n", + "\n", + "Vous avez réservé la voiture jusqu'à 15:00 , et vous l'avez rendue à 15:30 . Le prix de votre location a été modifié en conséquence.\n", "\"\"\"\n", "\n", "# Step 2: Make the messages list\n", "\n", - "messages = [] # fill this in\n", + "messages = [{\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}] # fill this in\n", "\n", "# Step 3: Call OpenAI\n", "\n", - "response =\n", + "response = openai.chat.completions.create(\n", + " model = \"gpt-4o-mini\",\n", + " messages = messages\n", + " )\n", + " \n", "\n", "# Step 4: print the result\n", "\n", - "print(" + "print( response.choices[0].message.content)" ] }, { diff --git a/week1/day2 EXERCISE.ipynb b/week1/day2 EXERCISE.ipynb index 4504401..3f2060d 100644 --- a/week1/day2 EXERCISE.ipynb +++ b/week1/day2 EXERCISE.ipynb @@ -68,7 +68,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "metadata": {}, "outputs": [], @@ -82,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "29ddd15d-a3c5-4f4e-a678-873f56162724", "metadata": {}, "outputs": [], @@ -96,7 +96,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "dac0a679-599c-441f-9bf2-ddc73d35b940", "metadata": {}, "outputs": [], @@ -110,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "7bb9c624-14f0-4945-a719-8ddb64f66f47", "metadata": {}, "outputs": [], @@ -124,10 +124,37 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "42b9f644-522d-4e05-a691-56e7658c0ea9", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generative AI has numerous business applications across various industries. Here are some examples:\n", + "\n", + "1. **Content Creation**: Generative AI can create high-quality content, such as articles, social media posts, and product descriptions, in a matter of minutes. This can be particularly useful for businesses that need to generate large amounts of content quickly.\n", + "2. **Marketing Automation**: Generative AI can help automate marketing processes, such as personalized email campaigns, ad copywriting, and lead generation. By analyzing customer data and behavior, generative AI can create targeted and relevant content that resonates with customers.\n", + "3. **Product Design and Development**: Generative AI can assist in the design and development of new products by generating 2D and 3D designs, prototypes, and even entire product lines. This can help reduce costs and speed up the product development process.\n", + "4. **Image and Video Generation**: Generative AI can create realistic images and videos that can be used for various business purposes, such as advertising, e-commerce, and social media content creation.\n", + "5. **Chatbots and Virtual Assistants**: Generative AI can power chatbots and virtual assistants that can engage with customers, provide support, and answer frequently asked questions. This can help businesses improve customer service and reduce the workload of human customer support agents.\n", + "6. **Supply Chain Optimization**: Generative AI can analyze supply chain data and generate optimized routes, schedules, and inventory management plans to improve logistics efficiency and reduce costs.\n", + "7. **Predictive Maintenance**: Generative AI can analyze equipment sensor data and predict maintenance needs, allowing businesses to schedule maintenance activities before equipment failures occur, reducing downtime and increasing overall efficiency.\n", + "8. **Financial Analysis and Forecasting**: Generative AI can analyze financial data and generate forecasts, identifying trends and patterns that can help businesses make informed investment decisions.\n", + "9. **Customer Service Chatbots**: Generative AI can create personalized chatbots that can engage with customers, answer questions, and provide support in multiple languages.\n", + "10. **Education and Training**: Generative AI can create personalized learning plans, generate educational content, and even develop adaptive learning systems that adjust to individual student needs.\n", + "\n", + "Some specific business applications of generative AI include:\n", + "\n", + "* **Amazon's Product Recommendations**: Amazon uses generative AI to recommend products based on customer behavior and preferences.\n", + "* **Google's Content Generation**: Google uses generative AI to create high-quality content for its search engine, such as news summaries and product descriptions.\n", + "* **IBM's Watson**: IBM uses generative AI in its Watson platform to analyze large amounts of data and provide insights for various industries, including healthcare and finance.\n", + "\n", + "Overall, the business applications of generative AI are vast and continue to expand as the technology improves.\n" + ] + } + ], "source": [ "response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n", "print(response.json()['message']['content'])" @@ -147,10 +174,50 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "7745b9c4-57dc-4867-9180-61fa5db55eb8", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generative AI has numerous business applications across various industries, including:\n", + "\n", + "1. **Content Generation**: Use AI to generate high-quality content such as blog posts, social media posts, product descriptions, and more.\n", + "2. **Marketing Automation**: Utilize generative AI to personalize marketing messages, create targeted advertising campaigns, and automate lead generation.\n", + "3. **Product Design and Development**: Leverage generative AI to design and develop new products, such as 3D models, prototypes, and designs for packaging and branding materials.\n", + "4. **Customer Service Chatbots**: Create chatbots that use generative AI to understand customer inquiries and provide personalized responses.\n", + "5. **Language Translation**: Apply generative AI to translate text, speech, and audio content in real-time.\n", + "6. **Image Generation**: Use generative AI to create high-quality images for marketing materials, product packaging, and more.\n", + "7. **Music Composition**: Utilize generative AI to compose original music tracks, sound effects, and audio loops for various industries.\n", + "8. **Predictive Analytics**: Leverage generative AI to analyze large datasets, identify patterns, and make predictions about customer behavior, market trends, and more.\n", + "9. **Financial Modeling**: Apply generative AI to create financial models, forecast revenue, and predict potential risks.\n", + "10. **Creative Writing**: Use generative AI to assist in creative writing tasks such as generating plot outlines, character development, and dialogue.\n", + "\n", + "Industry-specific applications:\n", + "\n", + "1. **Healthcare**: Generate medical imaging reports, create personalized patient profiles, and develop new treatment options using generative AI.\n", + "2. **Finance**: Analyze financial data, identify trends, and predict market movements using generative AI.\n", + "3. **Education**: Develop personalized learning plans, create adaptive assessments, and generate educational content using generative AI.\n", + "4. **Retail**: Generate product descriptions, optimize pricing strategies, and create targeted marketing campaigns using generative AI.\n", + "\n", + "Benefits of Generative AI:\n", + "\n", + "1. **Increased Efficiency**: Automate repetitive tasks, reduce manual labor, and increase productivity.\n", + "2. **Improved Accuracy**: Reduce human error, improve data accuracy, and enhance decision-making.\n", + "3. **Enhanced Creativity**: Unlock new creative possibilities, generate innovative ideas, and discover new opportunities.\n", + "\n", + "However, there are also challenges associated with Generative AI, such as:\n", + "\n", + "1. **Bias and Fairness**: Ensure that generative models do not perpetuate existing biases or discriminatory practices.\n", + "2. **Explainability and Transparency**: Develop techniques to understand how generative models make decisions and provide transparency into their decision-making processes.\n", + "3. **Job Displacement**: Prepare employees for the impact of automation on jobs and develop new skills to work alongside AI systems.\n", + "\n", + "Overall, Generative AI has the potential to transform various industries and bring about significant benefits, but it is crucial to address the associated challenges to maximize its value.\n" + ] + } + ], "source": [ "import ollama\n", "\n", @@ -199,11 +266,282 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, + "id": "2cedd52a-cb2e-4cfb-8e48-176954cd64e7", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display\n", + "import ollama\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, "id": "402d5686-4e76-4110-b65a-b3906c35c0a4", "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# Constants\n", + "\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "MODEL = \"llama3.2\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "be0c1b6e-8d7c-446f-8a9c-18b1c33078d8", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", + "\n", + "# Some websites need you to use proper headers when fetching them:\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "class Website:\n", + "\n", + " def __init__(self, url):\n", + " \"\"\"\n", + " Create this Website object from the given url using the BeautifulSoup library\n", + " \"\"\"\n", + " self.url = url\n", + " response = requests.get(url, headers=headers)\n", + " soup = BeautifulSoup(response.content, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "265f85c4-132d-4bb8-bda2-792e0b0da017", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Home - Edward Donner\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], + "source": [ + "# Let's try one out. Change the website and add print statements to follow along.\n", + "\n", + "ed = Website(\"https://edwarddonner.com\")\n", + "print(ed.title)\n", + "print(ed.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "24ea875b-2ba0-41ad-b6be-4be8baeac16e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are looking at a website titled Home - Edward Donner\n", + "The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n", + "\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Well, hi there.\n", + "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", + "very\n", + "amateur) and losing myself in\n", + "Hacker News\n", + ", nodding my head sagely to things I only half understand.\n", + "I’m the co-founder and CTO of\n", + "Nebula.io\n", + ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", + "acquired in 2021\n", + ".\n", + "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", + "patented\n", + "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", + "Connect\n", + "with me for more!\n", + "November 13, 2024\n", + "Mastering AI and LLM Engineering – Resources\n", + "October 16, 2024\n", + "From Software Engineer to AI Data Scientist – resources\n", + "August 6, 2024\n", + "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", + "June 26, 2024\n", + "Choosing the Right LLM: Toolkit and Resources\n", + "Navigation\n", + "Home\n", + "Outsmart\n", + "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", + "About\n", + "Posts\n", + "Get in touch\n", + "ed [at] edwarddonner [dot] com\n", + "www.edwarddonner.com\n", + "Follow me\n", + "LinkedIn\n", + "Twitter\n", + "Facebook\n", + "Subscribe to newsletter\n", + "Type your email…\n", + "Subscribe\n" + ] + } + ], + "source": [ + "# Define our system/user prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", + "\n", + "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", + "and provides a short summary, ignoring text that might be navigation related. \\\n", + "Respond in markdown.\"\n", + "\n", + "# A function that writes a User Prompt that asks for summaries of websites:\n", + "\n", + "def user_prompt_for(website):\n", + " user_prompt = f\"You are looking at a website titled {website.title}\"\n", + " user_prompt += \"\\nThe contents of this website is as follows; \\\n", + "please provide a short summary of this website in markdown. \\\n", + "If it includes news or announcements, then summarize these too.\\n\\n\"\n", + " user_prompt += website.text\n", + " return user_prompt\n", + "\n", + "user_prompt = user_prompt_for(ed)\n", + "print(user_prompt)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "6d5f0f47-58a2-43b7-87a9-181a87e08081", + "metadata": {}, + "outputs": [], + "source": [ + "# create the message \n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "55454071-faa1-4cb1-a8dc-1f93abad6718", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'role': 'system', 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'}, {'role': 'user', 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nAugust 6, 2024\\nOutsmart LLM Arena – a battle of diplomacy and deviousness\\nJune 26, 2024\\nChoosing the Right LLM: Toolkit and Resources\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]\n" + ] + } + ], + "source": [ + "print(messages_for(ed))" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "42e0d73b-7db5-431f-bf97-4767be627056", + "metadata": {}, + "outputs": [], + "source": [ + "# And now: call the OpenAI API. You will get very familiar with this!\n", + "\n", + "def summarize(url):\n", + " website = Website(url)\n", + " response = ollama.chat(model=MODEL, messages=messages_for(website)) \n", + " return response['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "419d4c04-a3c4-43d2-ac1f-5f7f3b84c92a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'**Website Summary**\\n======================\\n\\n* The website is owned by Edward Donner, a co-founder and CTO of Nebula.io.\\n* It appears to be primarily focused on his experiences with Large Language Models (LLMs) and AI engineering.\\n\\n**News and Announcements**\\n---------------------------\\n\\n* **Mastering AI and LLM Engineering - Resources**: A collection of resources for learning about mastering AI and LLM engineering, announced on November 13, 2024.\\n* **From Software Engineer to AI Data Scientist – resources**: A list of resources to help move from a software engineer role to an AI data scientist, shared on October 16, 2024.\\n* **Outsmart LLM Arena – a battle of diplomacy and deviousness**: An introduction to the Outsmart arena where LLMs compete in a battle of diplomacy and strategy, announced on June 26, 2024.\\n\\n**Additional Information**\\n-------------------------\\n\\n* Edward Donner is also involved with various projects and companies, including Nebula.io and untapt (acquired in 2021).\\n* He shares his interests in DJing, electronic music production, and amateur coding endeavors.\\n* The website contains links to his social media profiles and a newsletter sign-up.'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "summarize(\"https://edwarddonner.com\")" + ] } ], "metadata": { diff --git a/week1/day5.ipynb b/week1/day5.ipynb index 8ddd23a..07b3577 100644 --- a/week1/day5.ipynb +++ b/week1/day5.ipynb @@ -22,7 +22,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "d5b08506-dc8b-4443-9201-5f1848161363", "metadata": {}, "outputs": [], @@ -42,10 +42,18 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key looks good so far\n" + ] + } + ], "source": [ "# Initialize and constants\n", "\n", @@ -63,7 +71,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "106dd65e-90af-4ca8-86b6-23a41840645b", "metadata": {}, "outputs": [], @@ -101,10 +109,46 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "e30d8128-933b-44cc-81c8-ab4c9d86589a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['https://edwarddonner.com/',\n", + " 'https://edwarddonner.com/outsmart/',\n", + " 'https://edwarddonner.com/about-me-and-about-nebula/',\n", + " 'https://edwarddonner.com/posts/',\n", + " 'https://edwarddonner.com/',\n", + " 'https://news.ycombinator.com',\n", + " 'https://nebula.io/?utm_source=ed&utm_medium=referral',\n", + " 'https://www.prnewswire.com/news-releases/wynden-stark-group-acquires-nyc-venture-backed-tech-startup-untapt-301269512.html',\n", + " 'https://patents.google.com/patent/US20210049536A1/',\n", + " 'https://www.linkedin.com/in/eddonner/',\n", + " 'https://edwarddonner.com/2024/11/13/llm-engineering-resources/',\n", + " 'https://edwarddonner.com/2024/11/13/llm-engineering-resources/',\n", + " 'https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/',\n", + " 'https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/',\n", + " 'https://edwarddonner.com/2024/08/06/outsmart/',\n", + " 'https://edwarddonner.com/2024/08/06/outsmart/',\n", + " 'https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/',\n", + " 'https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/',\n", + " 'https://edwarddonner.com/',\n", + " 'https://edwarddonner.com/outsmart/',\n", + " 'https://edwarddonner.com/about-me-and-about-nebula/',\n", + " 'https://edwarddonner.com/posts/',\n", + " 'mailto:hello@mygroovydomain.com',\n", + " 'https://www.linkedin.com/in/eddonner/',\n", + " 'https://twitter.com/edwarddonner',\n", + " 'https://www.facebook.com/edward.donner.52']" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "ed = Website(\"https://edwarddonner.com\")\n", "ed.links" @@ -128,7 +172,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 40, "id": "6957b079-0d96-45f7-a26a-3487510e9b35", "metadata": {}, "outputs": [], @@ -144,22 +188,54 @@ " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", " ]\n", "}\n", + "\"\"\"\n", + "link_system_prompt += \"here is additionnal example: \"\n", + "link_system_prompt += \"\"\"\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\", was_a_relative_path: true},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\", was_a_relative_path: false}\n", + " ]\n", + "}\n", "\"\"\"" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 41, "id": "b97e4068-97ed-4120-beae-c42105e4d59a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\n", + "You should respond in JSON as in this example:\n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + "}\n", + "here is additionnal example: \n", + "{\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\", was_a_relative_path: true},\n", + " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\", was_a_relative_path: false}\n", + " ]\n", + "}\n", + "\n" + ] + } + ], "source": [ "print(link_system_prompt)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 49, "id": "8e1f601b-2eaf-499d-b6b8-c99050c9d6b3", "metadata": {}, "outputs": [], @@ -169,23 +245,60 @@ " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n", "Do not include Terms of Service, Privacy, email links.\\n\"\n", " user_prompt += \"Links (some might be relative links):\\n\"\n", + " user_prompt += \"if a link was a relative links, in the json indicate it to true otherwize don't don't add that key:\\n\"\n", " user_prompt += \"\\n\".join(website.links)\n", " return user_prompt" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 44, "id": "6bcbfa78-6395-4685-b92c-22d592050fd7", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Here is the list of links on the website of https://edwarddonner.com - please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\n", + "Links (some might be relative links):\n", + "if a link was a relative links, in the json indicate it to true. and do not add it if not:\n", + "https://edwarddonner.com/\n", + "https://edwarddonner.com/outsmart/\n", + "https://edwarddonner.com/about-me-and-about-nebula/\n", + "https://edwarddonner.com/posts/\n", + "https://edwarddonner.com/\n", + "https://news.ycombinator.com\n", + "https://nebula.io/?utm_source=ed&utm_medium=referral\n", + "https://www.prnewswire.com/news-releases/wynden-stark-group-acquires-nyc-venture-backed-tech-startup-untapt-301269512.html\n", + "https://patents.google.com/patent/US20210049536A1/\n", + "https://www.linkedin.com/in/eddonner/\n", + "https://edwarddonner.com/2024/11/13/llm-engineering-resources/\n", + "https://edwarddonner.com/2024/11/13/llm-engineering-resources/\n", + "https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/\n", + "https://edwarddonner.com/2024/10/16/from-software-engineer-to-ai-data-scientist-resources/\n", + "https://edwarddonner.com/2024/08/06/outsmart/\n", + "https://edwarddonner.com/2024/08/06/outsmart/\n", + "https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/\n", + "https://edwarddonner.com/2024/06/26/choosing-the-right-llm-resources/\n", + "https://edwarddonner.com/\n", + "https://edwarddonner.com/outsmart/\n", + "https://edwarddonner.com/about-me-and-about-nebula/\n", + "https://edwarddonner.com/posts/\n", + "mailto:hello@mygroovydomain.com\n", + "https://www.linkedin.com/in/eddonner/\n", + "https://twitter.com/edwarddonner\n", + "https://www.facebook.com/edward.donner.52\n" + ] + } + ], "source": [ "print(get_links_user_prompt(ed))" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 59, "id": "a29aca19-ca13-471c-a4b4-5abbfa813f69", "metadata": {}, "outputs": [], @@ -206,10 +319,100 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 57, "id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "['/',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/posts',\n", + " '/docs',\n", + " '/enterprise',\n", + " '/pricing',\n", + " '/login',\n", + " '/join',\n", + " '/meta-llama/Llama-3.3-70B-Instruct',\n", + " '/tencent/HunyuanVideo',\n", + " '/Datou1111/shou_xin',\n", + " '/black-forest-labs/FLUX.1-dev',\n", + " '/deepseek-ai/DeepSeek-V2.5-1210',\n", + " '/models',\n", + " '/spaces/JeffreyXiang/TRELLIS',\n", + " '/spaces/multimodalart/flux-style-shaping',\n", + " '/spaces/ginipick/FLUXllama',\n", + " '/spaces/Kwai-Kolors/Kolors-Virtual-Try-On',\n", + " '/spaces/black-forest-labs/FLUX.1-dev',\n", + " '/spaces',\n", + " '/datasets/HuggingFaceFW/fineweb-2',\n", + " '/datasets/fka/awesome-chatgpt-prompts',\n", + " '/datasets/CohereForAI/Global-MMLU',\n", + " '/datasets/O1-OPEN/OpenO1-SFT',\n", + " '/datasets/amphora/QwQ-LongCoT-130K',\n", + " '/datasets',\n", + " '/join',\n", + " '/pricing#endpoints',\n", + " '/pricing#spaces',\n", + " '/pricing',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/enterprise',\n", + " '/allenai',\n", + " '/facebook',\n", + " '/amazon',\n", + " '/google',\n", + " '/Intel',\n", + " '/microsoft',\n", + " '/grammarly',\n", + " '/Writer',\n", + " '/docs/transformers',\n", + " '/docs/diffusers',\n", + " '/docs/safetensors',\n", + " '/docs/huggingface_hub',\n", + " '/docs/tokenizers',\n", + " '/docs/peft',\n", + " '/docs/transformers.js',\n", + " '/docs/timm',\n", + " '/docs/trl',\n", + " '/docs/datasets',\n", + " '/docs/text-generation-inference',\n", + " '/docs/accelerate',\n", + " '/models',\n", + " '/datasets',\n", + " '/spaces',\n", + " '/tasks',\n", + " 'https://ui.endpoints.huggingface.co',\n", + " '/chat',\n", + " '/huggingface',\n", + " '/brand',\n", + " '/terms-of-service',\n", + " '/privacy',\n", + " 'https://apply.workable.com/huggingface/',\n", + " 'mailto:press@huggingface.co',\n", + " '/learn',\n", + " '/docs',\n", + " '/blog',\n", + " 'https://discuss.huggingface.co',\n", + " 'https://status.huggingface.co/',\n", + " 'https://github.com/huggingface',\n", + " 'https://twitter.com/huggingface',\n", + " 'https://www.linkedin.com/company/huggingface/',\n", + " '/join/discord']" + ] + }, + "execution_count": 57, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n", "\n", @@ -219,10 +422,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 60, "id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "{'links': [{'type': 'about page',\n", + " 'url': 'https://huggingface.co/huggingface',\n", + " 'was_a_relative_path': True},\n", + " {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'},\n", + " {'type': 'enterprise page',\n", + " 'url': 'https://huggingface.co/enterprise',\n", + " 'was_a_relative_path': True},\n", + " {'type': 'pricing page',\n", + " 'url': 'https://huggingface.co/pricing',\n", + " 'was_a_relative_path': True},\n", + " {'type': 'blog',\n", + " 'url': 'https://huggingface.co/blog',\n", + " 'was_a_relative_path': True},\n", + " {'type': 'community page', 'url': 'https://discuss.huggingface.co'},\n", + " {'type': 'GitHub page', 'url': 'https://github.com/huggingface'},\n", + " {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'},\n", + " {'type': 'LinkedIn page',\n", + " 'url': 'https://www.linkedin.com/company/huggingface/'}]}" + ] + }, + "execution_count": 60, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "get_links(\"https://huggingface.co\")" ] @@ -239,7 +470,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "id": "85a5b6e2-e7ef-44a9-bc7f-59ede71037b5", "metadata": {}, "outputs": [], @@ -248,7 +479,7 @@ " result = \"Landing page:\\n\"\n", " result += Website(url).get_contents()\n", " links = get_links(url)\n", - " print(\"Found links:\", links)\n", + " print(\"Found links::\", links)\n", " for link in links[\"links\"]:\n", " result += f\"\\n\\n{link['type']}\\n\"\n", " result += Website(link[\"url\"]).get_contents()\n", @@ -257,17 +488,3180 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "id": "5099bd14-076d-4745-baf3-dac08d8e5ab2", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links:: {'links': [{'type': 'home page', 'url': 'https://huggingface.co/'}, {'type': 'about page', 'url': 'https://huggingface.co/huggingface'}, {'type': 'enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'pricing page', 'url': 'https://huggingface.co/pricing'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog', 'url': 'https://huggingface.co/blog'}, {'type': 'community page', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n", + "Landing page:\n", + "Webpage Title:\n", + "Hugging Face – The AI community building the future.\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "The AI community building the future.\n", + "The platform where the machine learning community collaborates on models, datasets, and applications.\n", + "Trending on\n", + "this week\n", + "Models\n", + "meta-llama/Llama-3.3-70B-Instruct\n", + "Updated\n", + "3 days ago\n", + "•\n", + "102k\n", + "•\n", + "960\n", + "tencent/HunyuanVideo\n", + "Updated\n", + "6 days ago\n", + "•\n", + "3.73k\n", + "•\n", + "992\n", + "Datou1111/shou_xin\n", + "Updated\n", + "5 days ago\n", + "•\n", + "7.84k\n", + "•\n", + "322\n", + "black-forest-labs/FLUX.1-dev\n", + "Updated\n", + "Aug 16\n", + "•\n", + "1.38M\n", + "•\n", + "7.23k\n", + "Qwen/QwQ-32B-Preview\n", + "Updated\n", + "15 days ago\n", + "•\n", + "92.8k\n", + "•\n", + "1.27k\n", + "Browse 400k+ models\n", + "Spaces\n", + "Running\n", + "on\n", + "Zero\n", + "1.13k\n", + "🏢\n", + "TRELLIS\n", + "Scalable and Versatile 3D Generation from images\n", + "Running\n", + "on\n", + "Zero\n", + "291\n", + "🦀🏆\n", + "FLUXllama\n", + "FLUX 4-bit Quantization(just 8GB VRAM)\n", + "Running\n", + "on\n", + "L40S\n", + "244\n", + "🚀\n", + "Flux Style Shaping\n", + "Optical illusions and style transfer with FLUX\n", + "Running\n", + "on\n", + "CPU Upgrade\n", + "5.92k\n", + "👕\n", + "Kolors Virtual Try-On\n", + "Running\n", + "on\n", + "Zero\n", + "5.71k\n", + "🖥️\n", + "FLUX.1 [dev]\n", + "Browse 150k+ applications\n", + "Datasets\n", + "HuggingFaceFW/fineweb-2\n", + "Updated\n", + "5 days ago\n", + "•\n", + "27.6k\n", + "•\n", + "284\n", + "fka/awesome-chatgpt-prompts\n", + "Updated\n", + "Sep 3\n", + "•\n", + "7.71k\n", + "•\n", + "6.52k\n", + "CohereForAI/Global-MMLU\n", + "Updated\n", + "1 day ago\n", + "•\n", + "4.59k\n", + "•\n", + "77\n", + "O1-OPEN/OpenO1-SFT\n", + "Updated\n", + "22 days ago\n", + "•\n", + "1.34k\n", + "•\n", + "175\n", + "amphora/QwQ-LongCoT-130K\n", + "Updated\n", + "8 days ago\n", + "•\n", + "536\n", + "•\n", + "46\n", + "Browse 100k+ datasets\n", + "The Home of Machine Learning\n", + "Create, discover and collaborate on ML better.\n", + "The collaboration platform\n", + "Host and collaborate on unlimited public models, datasets and applications.\n", + "Move faster\n", + "With the HF Open source stack.\n", + "Explore all modalities\n", + "Text, image, video, audio or even 3D.\n", + "Build your portfolio\n", + "Share your work with the world and build your ML profile.\n", + "Sign Up\n", + "Accelerate your ML\n", + "We provide paid Compute and Enterprise solutions.\n", + "Compute\n", + "Deploy on optimized\n", + "Inference Endpoints\n", + "or update your\n", + "Spaces applications\n", + "to a GPU in a few clicks.\n", + "View pricing\n", + "Starting at $0.60/hour for GPU\n", + "Enterprise\n", + "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", + "\t\t\tdedicated support.\n", + "Getting started\n", + "Starting at $20/user/month\n", + "Single Sign-On\n", + "Regions\n", + "Priority Support\n", + "Audit Logs\n", + "Resource Groups\n", + "Private Datasets Viewer\n", + "More than 50,000 organizations are using Hugging Face\n", + "Ai2\n", + "Enterprise\n", + "non-profit\n", + "•\n", + "361 models\n", + "•\n", + "1.71k followers\n", + "AI at Meta\n", + "Enterprise\n", + "company\n", + "•\n", + "2.05k models\n", + "•\n", + "3.75k followers\n", + "Amazon Web Services\n", + "company\n", + "•\n", + "21 models\n", + "•\n", + "2.42k followers\n", + "Google\n", + "company\n", + "•\n", + "910 models\n", + "•\n", + "5.46k followers\n", + "Intel\n", + "company\n", + "•\n", + "217 models\n", + "•\n", + "2.05k followers\n", + "Microsoft\n", + "company\n", + "•\n", + "351 models\n", + "•\n", + "6.06k followers\n", + "Grammarly\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "98 followers\n", + "Writer\n", + "Enterprise\n", + "company\n", + "•\n", + "16 models\n", + "•\n", + "178 followers\n", + "Our Open Source\n", + "We are building the foundation of ML tooling with the community.\n", + "Transformers\n", + "136,246\n", + "State-of-the-art ML for Pytorch, TensorFlow, and JAX.\n", + "Diffusers\n", + "26,624\n", + "State-of-the-art diffusion models for image and audio generation in PyTorch.\n", + "Safetensors\n", + "2,953\n", + "Simple, safe way to store and distribute neural networks weights safely and quickly.\n", + "Hub Python Library\n", + "2,165\n", + "Client library for the HF Hub: manage repositories from your Python runtime.\n", + "Tokenizers\n", + "9,150\n", + "Fast tokenizers, optimized for both research and production.\n", + "PEFT\n", + "16,699\n", + "Parameter efficient finetuning methods for large models.\n", + "Transformers.js\n", + "12,337\n", + "State-of-the-art Machine Learning for the web. Run Transformers directly in your browser, with no need for a server.\n", + "timm\n", + "32,592\n", + "State-of-the-art computer vision models, layers, optimizers, training/evaluation, and utilities.\n", + "TRL\n", + "10,308\n", + "Train transformer language models with reinforcement learning.\n", + "Datasets\n", + "19,349\n", + "Access and share datasets for computer vision, audio, and NLP tasks.\n", + "Text Generation Inference\n", + "9,433\n", + "Toolkit to serve Large Language Models.\n", + "Accelerate\n", + "8,053\n", + "Easily train and use PyTorch models with multi-GPU, TPU, mixed-precision.\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "home page\n", + "Webpage Title:\n", + "Hugging Face – The AI community building the future.\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "The AI community building the future.\n", + "The platform where the machine learning community collaborates on models, datasets, and applications.\n", + "Trending on\n", + "this week\n", + "Models\n", + "meta-llama/Llama-3.3-70B-Instruct\n", + "Updated\n", + "3 days ago\n", + "•\n", + "102k\n", + "•\n", + "960\n", + "tencent/HunyuanVideo\n", + "Updated\n", + "6 days ago\n", + "•\n", + "3.73k\n", + "•\n", + "992\n", + "Datou1111/shou_xin\n", + "Updated\n", + "5 days ago\n", + "•\n", + "7.84k\n", + "•\n", + "322\n", + "black-forest-labs/FLUX.1-dev\n", + "Updated\n", + "Aug 16\n", + "•\n", + "1.38M\n", + "•\n", + "7.23k\n", + "Qwen/QwQ-32B-Preview\n", + "Updated\n", + "15 days ago\n", + "•\n", + "92.8k\n", + "•\n", + "1.27k\n", + "Browse 400k+ models\n", + "Spaces\n", + "Running\n", + "on\n", + "Zero\n", + "1.13k\n", + "🏢\n", + "TRELLIS\n", + "Scalable and Versatile 3D Generation from images\n", + "Running\n", + "on\n", + "Zero\n", + "291\n", + "🦀🏆\n", + "FLUXllama\n", + "FLUX 4-bit Quantization(just 8GB VRAM)\n", + "Running\n", + "on\n", + "L40S\n", + "244\n", + "🚀\n", + "Flux Style Shaping\n", + "Optical illusions and style transfer with FLUX\n", + "Running\n", + "on\n", + "CPU Upgrade\n", + "5.92k\n", + "👕\n", + "Kolors Virtual Try-On\n", + "Running\n", + "on\n", + "Zero\n", + "5.71k\n", + "🖥️\n", + "FLUX.1 [dev]\n", + "Browse 150k+ applications\n", + "Datasets\n", + "HuggingFaceFW/fineweb-2\n", + "Updated\n", + "5 days ago\n", + "•\n", + "27.6k\n", + "•\n", + "284\n", + "fka/awesome-chatgpt-prompts\n", + "Updated\n", + "Sep 3\n", + "•\n", + "7.71k\n", + "•\n", + "6.52k\n", + "CohereForAI/Global-MMLU\n", + "Updated\n", + "1 day ago\n", + "•\n", + "4.59k\n", + "•\n", + "77\n", + "O1-OPEN/OpenO1-SFT\n", + "Updated\n", + "22 days ago\n", + "•\n", + "1.34k\n", + "•\n", + "175\n", + "amphora/QwQ-LongCoT-130K\n", + "Updated\n", + "8 days ago\n", + "•\n", + "536\n", + "•\n", + "46\n", + "Browse 100k+ datasets\n", + "The Home of Machine Learning\n", + "Create, discover and collaborate on ML better.\n", + "The collaboration platform\n", + "Host and collaborate on unlimited public models, datasets and applications.\n", + "Move faster\n", + "With the HF Open source stack.\n", + "Explore all modalities\n", + "Text, image, video, audio or even 3D.\n", + "Build your portfolio\n", + "Share your work with the world and build your ML profile.\n", + "Sign Up\n", + "Accelerate your ML\n", + "We provide paid Compute and Enterprise solutions.\n", + "Compute\n", + "Deploy on optimized\n", + "Inference Endpoints\n", + "or update your\n", + "Spaces applications\n", + "to a GPU in a few clicks.\n", + "View pricing\n", + "Starting at $0.60/hour for GPU\n", + "Enterprise\n", + "Give your team the most advanced platform to build AI with enterprise-grade security, access controls and\n", + "\t\t\tdedicated support.\n", + "Getting started\n", + "Starting at $20/user/month\n", + "Single Sign-On\n", + "Regions\n", + "Priority Support\n", + "Audit Logs\n", + "Resource Groups\n", + "Private Datasets Viewer\n", + "More than 50,000 organizations are using Hugging Face\n", + "Ai2\n", + "Enterprise\n", + "non-profit\n", + "•\n", + "361 models\n", + "•\n", + "1.71k followers\n", + "AI at Meta\n", + "Enterprise\n", + "company\n", + "•\n", + "2.05k models\n", + "•\n", + "3.75k followers\n", + "Amazon Web Services\n", + "company\n", + "•\n", + "21 models\n", + "•\n", + "2.42k followers\n", + "Google\n", + "company\n", + "•\n", + "910 models\n", + "•\n", + "5.46k followers\n", + "Intel\n", + "company\n", + "•\n", + "217 models\n", + "•\n", + "2.05k followers\n", + "Microsoft\n", + "company\n", + "•\n", + "351 models\n", + "•\n", + "6.06k followers\n", + "Grammarly\n", + "company\n", + "•\n", + "10 models\n", + "•\n", + "98 followers\n", + "Writer\n", + "Enterprise\n", + "company\n", + "•\n", + "16 models\n", + "•\n", + "178 followers\n", + "Our Open Source\n", + "We are building the foundation of ML tooling with the community.\n", + "Transformers\n", + "136,246\n", + "State-of-the-art ML for Pytorch, TensorFlow, and JAX.\n", + "Diffusers\n", + "26,624\n", + "State-of-the-art diffusion models for image and audio generation in PyTorch.\n", + "Safetensors\n", + "2,953\n", + "Simple, safe way to store and distribute neural networks weights safely and quickly.\n", + "Hub Python Library\n", + "2,165\n", + "Client library for the HF Hub: manage repositories from your Python runtime.\n", + "Tokenizers\n", + "9,150\n", + "Fast tokenizers, optimized for both research and production.\n", + "PEFT\n", + "16,699\n", + "Parameter efficient finetuning methods for large models.\n", + "Transformers.js\n", + "12,337\n", + "State-of-the-art Machine Learning for the web. Run Transformers directly in your browser, with no need for a server.\n", + "timm\n", + "32,592\n", + "State-of-the-art computer vision models, layers, optimizers, training/evaluation, and utilities.\n", + "TRL\n", + "10,308\n", + "Train transformer language models with reinforcement learning.\n", + "Datasets\n", + "19,349\n", + "Access and share datasets for computer vision, audio, and NLP tasks.\n", + "Text Generation Inference\n", + "9,433\n", + "Toolkit to serve Large Language Models.\n", + "Accelerate\n", + "8,053\n", + "Easily train and use PyTorch models with multi-GPU, TPU, mixed-precision.\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "about page\n", + "Webpage Title:\n", + "huggingface (Hugging Face)\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Hugging Face\n", + "Enterprise\n", + "company\n", + "Verified\n", + "https://huggingface.co\n", + "huggingface\n", + "huggingface\n", + "Follow\n", + "7,478\n", + "AI & ML interests\n", + "The AI community building the future.\n", + "Team members\n", + "224\n", + "+190\n", + "+177\n", + "+156\n", + "+146\n", + "+126\n", + "Organization Card\n", + "Community\n", + "About org cards\n", + "👋 Hi!\n", + "We are on a mission to democratize\n", + "good\n", + "machine learning, one commit at a time.\n", + "If that sounds like something you should be doing, why don't you\n", + "join us\n", + "!\n", + "For press enquiries, you can\n", + "✉️ contact our team here\n", + ".\n", + "Collections\n", + "1\n", + "DistilBERT release\n", + "Original DistilBERT model, checkpoints obtained from using teacher-student learning from the original BERT checkpoints.\n", + "distilbert/distilbert-base-cased\n", + "Fill-Mask\n", + "•\n", + "Updated\n", + "May 6\n", + "•\n", + "402k\n", + "•\n", + "35\n", + "distilbert/distilbert-base-uncased\n", + "Fill-Mask\n", + "•\n", + "Updated\n", + "May 6\n", + "•\n", + "15.6M\n", + "•\n", + "•\n", + "570\n", + "distilbert/distilbert-base-multilingual-cased\n", + "Fill-Mask\n", + "•\n", + "Updated\n", + "May 6\n", + "•\n", + "513k\n", + "•\n", + "147\n", + "distilbert/distilbert-base-uncased-finetuned-sst-2-english\n", + "Text Classification\n", + "•\n", + "Updated\n", + "Dec 19, 2023\n", + "•\n", + "9.7M\n", + "•\n", + "•\n", + "642\n", + "spaces\n", + "23\n", + "Sort: \n", + "\t\tRecently updated\n", + "Build error\n", + "194\n", + "⚡\n", + "paper-central\n", + "Running\n", + "296\n", + "😻\n", + "Open Source Ai Year In Review 2024\n", + "What happened in open-source AI this year, and what’s next?\n", + "Running\n", + "42\n", + "🔋\n", + "Inference Playground\n", + "Running\n", + "19\n", + "🏢\n", + "Number Tokenization Blog\n", + "Running\n", + "on\n", + "TPU v5e\n", + "5\n", + "💬\n", + "Keras Chatbot Battle\n", + "Running\n", + "101\n", + "⚡\n", + "Modelcard Creator\n", + "Expand 23\n", + "\t\t\t\t\t\t\tspaces\n", + "models\n", + "16\n", + "Sort: \n", + "\t\tRecently updated\n", + "huggingface/timesfm-tourism-monthly\n", + "Updated\n", + "4 days ago\n", + "•\n", + "23\n", + "huggingface/CodeBERTa-language-id\n", + "Text Classification\n", + "•\n", + "Updated\n", + "Mar 29\n", + "•\n", + "513\n", + "•\n", + "54\n", + "huggingface/falcon-40b-gptq\n", + "Text Generation\n", + "•\n", + "Updated\n", + "Jun 14, 2023\n", + "•\n", + "13\n", + "•\n", + "12\n", + "huggingface/autoformer-tourism-monthly\n", + "Updated\n", + "May 24, 2023\n", + "•\n", + "1.82k\n", + "•\n", + "9\n", + "huggingface/distilbert-base-uncased-finetuned-mnli\n", + "Text Classification\n", + "•\n", + "Updated\n", + "Mar 22, 2023\n", + "•\n", + "1.8k\n", + "•\n", + "2\n", + "huggingface/informer-tourism-monthly\n", + "Updated\n", + "Feb 24, 2023\n", + "•\n", + "1.25k\n", + "•\n", + "5\n", + "huggingface/time-series-transformer-tourism-monthly\n", + "Updated\n", + "Feb 23, 2023\n", + "•\n", + "2.22k\n", + "•\n", + "18\n", + "huggingface/the-no-branch-repo\n", + "Text-to-Image\n", + "•\n", + "Updated\n", + "Feb 10, 2023\n", + "•\n", + "9\n", + "•\n", + "3\n", + "huggingface/CodeBERTa-small-v1\n", + "Fill-Mask\n", + "•\n", + "Updated\n", + "Jun 27, 2022\n", + "•\n", + "39.9k\n", + "•\n", + "71\n", + "huggingface/test-model-repo\n", + "Updated\n", + "Nov 19, 2021\n", + "•\n", + "1\n", + "Expand 16\n", + "\t\t\t\t\t\t\tmodels\n", + "datasets\n", + "31\n", + "Sort: \n", + "\t\tRecently updated\n", + "huggingface/paper-central-data\n", + "Viewer\n", + "•\n", + "Updated\n", + "about 2 hours ago\n", + "•\n", + "113k\n", + "•\n", + "479\n", + "•\n", + "7\n", + "huggingface/documentation-images\n", + "Viewer\n", + "•\n", + "Updated\n", + "about 9 hours ago\n", + "•\n", + "44\n", + "•\n", + "2.6M\n", + "•\n", + "42\n", + "huggingface/transformers-metadata\n", + "Viewer\n", + "•\n", + "Updated\n", + "about 11 hours ago\n", + "•\n", + "1.51k\n", + "•\n", + "676\n", + "•\n", + "13\n", + "huggingface/policy-docs\n", + "Updated\n", + "1 day ago\n", + "•\n", + "937\n", + "•\n", + "6\n", + "huggingface/community-science-paper-v2\n", + "Viewer\n", + "•\n", + "Updated\n", + "1 day ago\n", + "•\n", + "4.9k\n", + "•\n", + "342\n", + "•\n", + "6\n", + "huggingface/diffusers-metadata\n", + "Viewer\n", + "•\n", + "Updated\n", + "3 days ago\n", + "•\n", + "56\n", + "•\n", + "452\n", + "•\n", + "4\n", + "huggingface/my-distiset-3f5a230e\n", + "Updated\n", + "22 days ago\n", + "•\n", + "15\n", + "huggingface/cookbook-images\n", + "Viewer\n", + "•\n", + "Updated\n", + "29 days ago\n", + "•\n", + "1\n", + "•\n", + "46.8k\n", + "•\n", + "6\n", + "huggingface/vllm-metadata\n", + "Updated\n", + "Oct 8\n", + "•\n", + "12\n", + "huggingface/paper-central-data-2\n", + "Viewer\n", + "•\n", + "Updated\n", + "Oct 4\n", + "•\n", + "58.3k\n", + "•\n", + "70\n", + "•\n", + "2\n", + "Expand 31\n", + "\t\t\t\t\t\t\tdatasets\n", + "Company\n", + "© Hugging Face\n", + "TOS\n", + "Privacy\n", + "About\n", + "Jobs\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Pricing\n", + "Docs\n", + "\n", + "\n", + "\n", + "enterprise page\n", + "Webpage Title:\n", + "Enterprise Hub - Hugging Face\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Enterprise Hub\n", + "Enterprise-ready version of the world’s leading AI platform\n", + "Subscribe to\n", + "Enterprise Hub\n", + "for $20/user/month with your Hub organization\n", + "Give your organization the most advanced platform to build AI with enterprise-grade security, access controls,\n", + "\t\t\tdedicated support and more.\n", + "Single Sign-On\n", + "Connect securely to your identity provider with SSO integration.\n", + "Regions\n", + "Select, manage, and audit the location of your repository data.\n", + "Audit Logs\n", + "Stay in control with comprehensive logs that report on actions taken.\n", + "Resource Groups\n", + "Accurately manage access to repositories with granular access control.\n", + "Token Management\n", + "Centralized token control and custom approval policies for organization access.\n", + "Analytics\n", + "Track and analyze repository usage data in a single dashboard.\n", + "Advanced Compute Options\n", + "Increase scalability and performance with more compute options like ZeroGPU for Spaces.\n", + "Private Datasets Viewer\n", + "Enable the Dataset Viewer on your private datasets for easier collaboration.\n", + "Advanced security\n", + "Configure organization-wide security policies and default repository visibility.\n", + "Billing\n", + "Control your budget effectively with managed billing and yearly commit options.\n", + "Priority Support\n", + "Maximize your platform usage with priority support from the Hugging Face team.\n", + "Join the most forward-thinking AI organizations\n", + "Everything you already know and love about Hugging Face in Enterprise mode.\n", + "Subscribe to\n", + "Enterprise Hub\n", + "or\n", + "Talk to sales\n", + "AI at Meta\n", + "Enterprise\n", + "company\n", + "•\n", + "2.05k models\n", + "•\n", + "3.75k followers\n", + "Nerdy Face\n", + "Enterprise\n", + "company\n", + "•\n", + "1 model\n", + "•\n", + "234 followers\n", + "ServiceNow-AI\n", + "Enterprise\n", + "company\n", + "•\n", + "108 followers\n", + "Deutsche Telekom AG\n", + "Enterprise\n", + "company\n", + "•\n", + "7 models\n", + "•\n", + "112 followers\n", + "Chegg Inc\n", + "Enterprise\n", + "company\n", + "•\n", + "77 followers\n", + "Lightricks\n", + "Enterprise\n", + "company\n", + "•\n", + "3 models\n", + "•\n", + "363 followers\n", + "Aledade Inc\n", + "Enterprise\n", + "company\n", + "•\n", + "53 followers\n", + "Virtusa Corporation\n", + "Enterprise\n", + "company\n", + "•\n", + "48 followers\n", + "HiddenLayer\n", + "Enterprise\n", + "company\n", + "•\n", + "49 followers\n", + "Ekimetrics\n", + "Enterprise\n", + "company\n", + "•\n", + "47 followers\n", + "Johnson & Johnson\n", + "Enterprise\n", + "company\n", + "•\n", + "35 followers\n", + "Vectara\n", + "Enterprise\n", + "company\n", + "•\n", + "1 model\n", + "•\n", + "54 followers\n", + "HOVER External\n", + "Enterprise\n", + "company\n", + "•\n", + "26 followers\n", + "Qualcomm\n", + "Enterprise\n", + "company\n", + "•\n", + "153 models\n", + "•\n", + "350 followers\n", + "Meta Llama\n", + "Enterprise\n", + "company\n", + "•\n", + "57 models\n", + "•\n", + "13.1k followers\n", + "Orange\n", + "Enterprise\n", + "company\n", + "•\n", + "4 models\n", + "•\n", + "147 followers\n", + "Writer\n", + "Enterprise\n", + "company\n", + "•\n", + "16 models\n", + "•\n", + "178 followers\n", + "Toyota Research Institute\n", + "Enterprise\n", + "company\n", + "•\n", + "8 models\n", + "•\n", + "91 followers\n", + "H2O.ai\n", + "Enterprise\n", + "company\n", + "•\n", + "71 models\n", + "•\n", + "359 followers\n", + "Mistral AI_\n", + "Enterprise\n", + "company\n", + "•\n", + "21 models\n", + "•\n", + "3.37k followers\n", + "IBM Granite\n", + "Enterprise\n", + "company\n", + "•\n", + "56 models\n", + "•\n", + "604 followers\n", + "Liberty Mutual\n", + "Enterprise\n", + "company\n", + "•\n", + "41 followers\n", + "Arcee AI\n", + "Enterprise\n", + "company\n", + "•\n", + "130 models\n", + "•\n", + "259 followers\n", + "Gretel.ai\n", + "Enterprise\n", + "company\n", + "•\n", + "8 models\n", + "•\n", + "70 followers\n", + "Gsk-tech\n", + "Enterprise\n", + "company\n", + "•\n", + "33 followers\n", + "BCG X\n", + "Enterprise\n", + "company\n", + "•\n", + "29 followers\n", + "StepStone Online Recruiting\n", + "Enterprise\n", + "company\n", + "•\n", + "32 followers\n", + "Prezi\n", + "Enterprise\n", + "company\n", + "•\n", + "30 followers\n", + "Shopify\n", + "Enterprise\n", + "company\n", + "•\n", + "371 followers\n", + "Together\n", + "Enterprise\n", + "company\n", + "•\n", + "27 models\n", + "•\n", + "460 followers\n", + "Bloomberg\n", + "Enterprise\n", + "company\n", + "•\n", + "2 models\n", + "•\n", + "132 followers\n", + "Fidelity Investments\n", + "Enterprise\n", + "company\n", + "•\n", + "114 followers\n", + "Jusbrasil\n", + "Enterprise\n", + "company\n", + "•\n", + "77 followers\n", + "Technology Innovation Institute\n", + "Enterprise\n", + "company\n", + "•\n", + "25 models\n", + "•\n", + "979 followers\n", + "Stability AI\n", + "Enterprise\n", + "company\n", + "•\n", + "95 models\n", + "•\n", + "8.5k followers\n", + "Nutanix\n", + "Enterprise\n", + "company\n", + "•\n", + "245 models\n", + "•\n", + "38 followers\n", + "Kakao Corp.\n", + "Enterprise\n", + "company\n", + "•\n", + "41 followers\n", + "creditkarma\n", + "Enterprise\n", + "company\n", + "•\n", + "32 followers\n", + "Mercedes-Benz AG\n", + "Enterprise\n", + "company\n", + "•\n", + "80 followers\n", + "Widn AI\n", + "Enterprise\n", + "company\n", + "•\n", + "27 followers\n", + "Liquid AI\n", + "Enterprise\n", + "company\n", + "•\n", + "85 followers\n", + "BRIA AI\n", + "Enterprise\n", + "company\n", + "•\n", + "28 models\n", + "•\n", + "941 followers\n", + "Compliance & Certifications\n", + "GDPR Compliant\n", + "SOC 2 Type 2\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "pricing page\n", + "Webpage Title:\n", + "Hugging Face – Pricing\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Pricing\n", + "Leveling up AI collaboration and compute.\n", + "Users and organizations already use the Hub as a collaboration platform,\n", + "we’re making it easy to seamlessly and scalably launch ML compute directly from the Hub.\n", + "HF Hub\n", + "Collaborate on Machine Learning\n", + "Host unlimited public models, datasets\n", + "Create unlimited orgs with no member limits\n", + "Access the latest ML tools and open source\n", + "Community support\n", + "Forever\n", + "Free\n", + "PRO\n", + "Pro Account\n", + "Unlock advanced HF features\n", + "ZeroGPU and Dev Mode for Spaces\n", + "Higher rate limits for serverless inference\n", + "Get early access to upcoming features\n", + "Show your support with a Pro badge\n", + "Subscribe for\n", + "$9\n", + "/month\n", + "Enterprise Hub\n", + "Accelerate your AI roadmap\n", + "SSO and SAML support\n", + "Select data location with Storage Regions\n", + "Precise actions reviews with Audit logs\n", + "Granular access control with Resource groups\n", + "Centralized token control and approval\n", + "Dataset Viewer for private datasets\n", + "Advanced compute options for Spaces\n", + "Deploy Inference on your own Infra\n", + "Managed billing with yearly commits\n", + "Priority support\n", + "Starting at\n", + "$20\n", + "per user per month\n", + "Spaces Hardware\n", + "Upgrade your Space compute\n", + "Free CPUs\n", + "Build more advanced Spaces\n", + "7 optimized hardware available\n", + "From CPU to GPU to Accelerators\n", + "Starting at\n", + "$0\n", + "/hour\n", + "Inference Endpoints\n", + "Deploy models on fully managed infrastructure\n", + "Deploy dedicated Endpoints in seconds\n", + "Keep your costs low\n", + "Fully-managed autoscaling\n", + "Enterprise security\n", + "Starting at\n", + "$0.032\n", + "/hour\n", + "Need support to accelerate AI in your organization? View our\n", + "Expert Support\n", + ".\n", + "Hugging Face Hub\n", + "free\n", + "The HF Hub is the central place to explore, experiment, collaborate and build technology with Machine\n", + "\t\t\t\t\tLearning.\n", + "Join the open source Machine Learning movement!\n", + "→\n", + "Sign Up\n", + "Create with ML\n", + "Packed with ML features, like model eval, dataset viewer and much more.\n", + "Collaborate\n", + "Git based and designed for collaboration at its core.\n", + "Play and learn\n", + "Learn by experimenting and sharing with our awesome community.\n", + "Build your ML portfolio\n", + "Share your work with the world and build your own ML profile.\n", + "Spaces Hardware\n", + "Starting at $0\n", + "Spaces are one of the most popular ways to share ML applications and demos with the world.\n", + "Upgrade your Spaces with our selection of custom on-demand hardware:\n", + "→\n", + "Get started with Spaces\n", + "Name\n", + "CPU\n", + "Memory\n", + "Accelerator\n", + "VRAM\n", + "Hourly price\n", + "CPU Basic\n", + "2 vCPU\n", + "16 GB\n", + "-\n", + "-\n", + "FREE\n", + "CPU Upgrade\n", + "8 vCPU\n", + "32 GB\n", + "-\n", + "-\n", + "$0.03\n", + "Nvidia T4 - small\n", + "4 vCPU\n", + "15 GB\n", + "Nvidia T4\n", + "16 GB\n", + "$0.40\n", + "Nvidia T4 - medium\n", + "8 vCPU\n", + "30 GB\n", + "Nvidia T4\n", + "16 GB\n", + "$0.60\n", + "1x Nvidia L4\n", + "8 vCPU\n", + "30 GB\n", + "Nvidia L4\n", + "24 GB\n", + "$0.80\n", + "4x Nvidia L4\n", + "48 vCPU\n", + "186 GB\n", + "Nvidia L4\n", + "96 GB\n", + "$3.80\n", + "1x Nvidia L40S\n", + "8 vCPU\n", + "62 GB\n", + "Nvidia L4\n", + "48 GB\n", + "$1.80\n", + "4x Nvidia L40S\n", + "48 vCPU\n", + "382 GB\n", + "Nvidia L4\n", + "192 GB\n", + "$8.30\n", + "8x Nvidia L40S\n", + "192 vCPU\n", + "1534 GB\n", + "Nvidia L4\n", + "384 GB\n", + "$23.50\n", + "Nvidia A10G - small\n", + "4 vCPU\n", + "15 GB\n", + "Nvidia A10G\n", + "24 GB\n", + "$1.00\n", + "Nvidia A10G - large\n", + "12 vCPU\n", + "46 GB\n", + "Nvidia A10G\n", + "24 GB\n", + "$1.50\n", + "2x Nvidia A10G - large\n", + "24 vCPU\n", + "92 GB\n", + "Nvidia A10G\n", + "48 GB\n", + "$3.00\n", + "4x Nvidia A10G - large\n", + "48 vCPU\n", + "184 GB\n", + "Nvidia A10G\n", + "96 GB\n", + "$5.00\n", + "Nvidia A100 - large\n", + "12 vCPU\n", + "142 GB\n", + "Nvidia A100\n", + "80 GB\n", + "$4.00\n", + "TPU v5e 1x1\n", + "22 vCPU\n", + "44 GB\n", + "Google TPU v5e\n", + "16 GB\n", + "$1.20\n", + "TPU v5e 2x2\n", + "110 vCPU\n", + "186 GB\n", + "Google TPU v5e\n", + "64 GB\n", + "$4.75\n", + "TPU v5e 2x4\n", + "220 vCPU\n", + "380 GB\n", + "Google TPU v5e\n", + "128 GB\n", + "$9.50\n", + "Custom\n", + "on demand\n", + "on demand\n", + "on demand\n", + "on demand\n", + "on demand\n", + "Spaces Persistent Storage\n", + "All Spaces get ephemeral storage for free but you can upgrade and add persistent storage at any time.\n", + "Name\n", + "Storage\n", + "Monthly price\n", + "Small\n", + "20 GB\n", + "$5\n", + "Medium\n", + "150 GB\n", + "$25\n", + "Large\n", + "1 TB\n", + "$100\n", + "Building something cool as a side project? We also offer community GPU grants.\n", + "Inference Endpoints\n", + "Starting at $0.033/hour\n", + "Inference Endpoints (dedicated) offers a secure production solution to easily deploy any ML model on dedicated\n", + "\t\t\t\t\tand autoscaling infrastructure, right from the HF Hub.\n", + "→\n", + "Learn more\n", + "CPU\n", + "instances\n", + "Provider\n", + "Architecture\n", + "vCPUs\n", + "Memory\n", + "Hourly rate\n", + "aws\n", + "Intel Sapphire Rapids\n", + "1\n", + "2GB\n", + "$0.03\n", + "2\n", + "4GB\n", + "$0.07\n", + "4\n", + "8GB\n", + "$0.13\n", + "8\n", + "16GB\n", + "$0.27\n", + "azure\n", + "Intel Xeon\n", + "1\n", + "2GB\n", + "$0.06\n", + "2\n", + "4GB\n", + "$0.12\n", + "4\n", + "8GB\n", + "$0.24\n", + "8\n", + "16GB\n", + "$0.48\n", + "gcp\n", + "Intel Sapphire Rapids\n", + "1\n", + "2GB\n", + "$0.05\n", + "2\n", + "4GB\n", + "$0.10\n", + "4\n", + "8GB\n", + "$0.20\n", + "8\n", + "16GB\n", + "$0.40\n", + "Accelerator\n", + "instances\n", + "Provider\n", + "Architecture\n", + "Topology\n", + "Accelerator Memory\n", + "Hourly rate\n", + "aws\n", + "Inf2\n", + "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tNeuron\n", + "x1\n", + "14.5GB\n", + "$0.75\n", + "x12\n", + "760GB\n", + "$12.00\n", + "gcp\n", + "TPU\n", + "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tv5e\n", + "1x1\n", + "16GB\n", + "$1.20\n", + "2x2\n", + "64GB\n", + "$4.75\n", + "2x4\n", + "128GB\n", + "$9.50\n", + "GPU\n", + "instances\n", + "Provider\n", + "Architecture\n", + "GPUs\n", + "GPU Memory\n", + "Hourly rate\n", + "aws\n", + "NVIDIA T4\n", + "1\n", + "14GB\n", + "$0.50\n", + "4\n", + "56GB\n", + "$3.00\n", + "aws\n", + "NVIDIA L4\n", + "1\n", + "24GB\n", + "$0.80\n", + "4\n", + "96GB\n", + "$3.80\n", + "aws\n", + "NVIDIA L40S\n", + "1\n", + "48GB\n", + "$1.80\n", + "4\n", + "192GB\n", + "$8.30\n", + "8\n", + "384GB\n", + "$23.50\n", + "aws\n", + "NVIDIA A10G\n", + "1\n", + "24GB\n", + "$1.00\n", + "4\n", + "96GB\n", + "$5.00\n", + "aws\n", + "NVIDIA A100\n", + "1\n", + "80GB\n", + "$4.00\n", + "2\n", + "160GB\n", + "$8.00\n", + "4\n", + "320GB\n", + "$16.00\n", + "8\n", + "640GB\n", + "$32.00\n", + "gcp\n", + "NVIDIA T4\n", + "1\n", + "16GB\n", + "$0.50\n", + "gcp\n", + "NVIDIA L4\n", + "1\n", + "24GB\n", + "$0.70\n", + "4\n", + "96GB\n", + "$3.80\n", + "gcp\n", + "NVIDIA A100\n", + "1\n", + "80GB\n", + "$3.60\n", + "2\n", + "160GB\n", + "$7.20\n", + "4\n", + "320GB\n", + "$14.40\n", + "8\n", + "640GB\n", + "$28.80\n", + "gcp\n", + "NVIDIA H100\n", + "1\n", + "80GB\n", + "$10.00\n", + "2\n", + "160GB\n", + "$20.00\n", + "4\n", + "320GB\n", + "$40.00\n", + "8\n", + "640GB\n", + "$80.00\n", + "Pro Account\n", + "PRO\n", + "A monthly subscription to access powerful features.\n", + "→\n", + "Get Pro\n", + "($9/month)\n", + "ZeroGPU\n", + ": Get 5x usage quota and highest GPU queue priority\n", + "Spaces Hosting\n", + ": Create ZeroGPU Spaces with A100 hardware\n", + "Spaces Dev Mode\n", + ": Fast iterations via SSH/VS Code for Spaces\n", + "Dataset Viewer\n", + ": Activate it on private datasets\n", + "Inference API\n", + ": Get x20 higher rate limits on Serverless API\n", + "Blog Articles\n", + ": Publish articles to the Hugging Face blog\n", + "Social Posts\n", + ": Share short updates with the community\n", + "Features Preview\n", + ": Get early access to upcoming\n", + "\t\t\t\t\t\t\t\t\t\tfeatures\n", + "PRO\n", + "Badge\n", + ":\n", + "\t\t\t\t\t\t\t\t\t\tShow your support on your profile\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Tasks\n", + "Inference Endpoints\n", + "HuggingChat\n", + "Company\n", + "About\n", + "Brand assets\n", + "Terms of service\n", + "Privacy\n", + "Jobs\n", + "Press\n", + "Resources\n", + "Learn\n", + "Documentation\n", + "Blog\n", + "Forum\n", + "Service Status\n", + "Social\n", + "GitHub\n", + "Twitter\n", + "LinkedIn\n", + "Discord\n", + "\n", + "\n", + "\n", + "careers page\n", + "Webpage Title:\n", + "Hugging Face - Current Openings\n", + "Webpage Contents:\n", + "\n", + "\n", + "\n", + "\n", + "blog\n", + "Webpage Title:\n", + "Hugging Face – Blog\n", + "Webpage Contents:\n", + "Hugging Face\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Posts\n", + "Docs\n", + "Enterprise\n", + "Pricing\n", + "Log In\n", + "Sign Up\n", + "Blog, Articles, and discussions\n", + "New Article\n", + "Everything\n", + "community\n", + "guide\n", + "open source collab\n", + "partnerships\n", + "research\n", + "NLP\n", + "Audio\n", + "CV\n", + "RL\n", + "ethics\n", + "Diffusion\n", + "Game Development\n", + "RLHF\n", + "Leaderboard\n", + "Case Studies\n", + "LeMaterial: an open source initiative to accelerate materials discovery and research\n", + "By\n", + "AlexDuvalinho\n", + "December 10, 2024\n", + "guest\n", + "•\n", + "18\n", + "Community Articles\n", + "view all\n", + "How to Expand Your AI Music Generations of 30 Seconds to Several Minutes\n", + "By\n", + "theeseus-ai\n", + "•\n", + "about 5 hours ago\n", + "🇪🇺✍️ EU AI Act: Systemic Risks in the First CoP Draft Comments ✍️🇪🇺\n", + "By\n", + "yjernite\n", + "•\n", + "1 day ago\n", + "•\n", + "6\n", + "The Intersection of CTMU and QCI: Implementing Emergent Intelligence\n", + "By\n", + "dimentox\n", + "•\n", + "1 day ago\n", + "Building an AI-powered search engine from scratch\n", + "By\n", + "as-cle-bert\n", + "•\n", + "2 days ago\n", + "•\n", + "5\n", + "**Build Your Own AI Server at Home: A Cost-Effective Guide Using Pre-Owned Components**\n", + "By\n", + "theeseus-ai\n", + "•\n", + "2 days ago\n", + "•\n", + "1\n", + "MotionLCM-V2: Improved Compression Rate for Multi-Latent-Token Diffusion\n", + "By\n", + "wxDai\n", + "•\n", + "2 days ago\n", + "•\n", + "10\n", + "RLHF 101: A Technical Dive into RLHF\n", + "By\n", + "GitBag\n", + "•\n", + "3 days ago\n", + "•\n", + "1\n", + "[Talk Arena](https://talkarena.org)\n", + "By\n", + "WillHeld\n", + "•\n", + "3 days ago\n", + "Multimodal RAG with Colpali, Milvus and VLMs\n", + "By\n", + "saumitras\n", + "•\n", + "3 days ago\n", + "In Honour of This Year's NeurIPs Test of Time Paper Awardees\n", + "By\n", + "Jaward\n", + "•\n", + "4 days ago\n", + "•\n", + "1\n", + "Power steering: Squeeze massive power from small LLMs\n", + "By\n", + "ucheog\n", + "•\n", + "4 days ago\n", + "•\n", + "4\n", + "Exploring the Power of KaibanJS v0.11.0 🚀\n", + "By\n", + "darielnoel\n", + "•\n", + "4 days ago\n", + "•\n", + "1\n", + "**Building a Custom Retrieval System with Motoko and Node.js**\n", + "By\n", + "theeseus-ai\n", + "•\n", + "4 days ago\n", + "Finding Moroccan Arabic (Darija) in Fineweb 2\n", + "By\n", + "omarkamali\n", + "•\n", + "5 days ago\n", + "•\n", + "17\n", + "Running Your Custom LoRA Fine-Tuned MusicGen Large Locally\n", + "By\n", + "theeseus-ai\n", + "•\n", + "7 days ago\n", + "•\n", + "1\n", + "Building a Local Vector Database Index with Annoy and Sentence Transformers\n", + "By\n", + "theeseus-ai\n", + "•\n", + "8 days ago\n", + "•\n", + "2\n", + "Practical Consciousness Theory for AI System Design\n", + "By\n", + "KnutJaegersberg\n", + "•\n", + "8 days ago\n", + "•\n", + "3\n", + "Releasing QwQ-LongCoT-130K\n", + "By\n", + "amphora\n", + "•\n", + "8 days ago\n", + "•\n", + "6\n", + "They Said It Couldn’t Be Done\n", + "By\n", + "Pclanglais\n", + "•\n", + "8 days ago\n", + "•\n", + "68\n", + "🐺🐦‍⬛ LLM Comparison/Test: 25 SOTA LLMs (including QwQ) through 59 MMLU-Pro CS benchmark runs\n", + "By\n", + "wolfram\n", + "•\n", + "9 days ago\n", + "•\n", + "68\n", + "Hugging Face models in Amazon Bedrock\n", + "By\n", + "pagezyhf\n", + "December 9, 2024\n", + "•\n", + "5\n", + "Hugging Face Community Releases an Open Preference Dataset for Text-to-Image Generation\n", + "By\n", + "davidberenstein1957\n", + "December 9, 2024\n", + "•\n", + "43\n", + "Welcome PaliGemma 2 – New vision language models by Google\n", + "By\n", + "merve\n", + "December 5, 2024\n", + "•\n", + "103\n", + "“How good are LLMs at fixing their mistakes? A chatbot arena experiment with Keras and TPUs\n", + "By\n", + "martin-gorner\n", + "December 5, 2024\n", + "•\n", + "12\n", + "Rethinking LLM Evaluation with 3C3H: AraGen Benchmark and Leaderboard\n", + "By\n", + "alielfilali01\n", + "December 4, 2024\n", + "guest\n", + "•\n", + "24\n", + "Investing in Performance: Fine-tune small models with LLM insights - a CFM case study\n", + "By\n", + "oahouzi\n", + "December 3, 2024\n", + "•\n", + "24\n", + "Rearchitecting Hugging Face Uploads and Downloads\n", + "By\n", + "port8080\n", + "November 26, 2024\n", + "•\n", + "37\n", + "SmolVLM - small yet mighty Vision Language Model\n", + "By\n", + "andito\n", + "November 26, 2024\n", + "•\n", + "134\n", + "You could have designed state of the art positional encoding\n", + "By\n", + "FL33TW00D-HF\n", + "November 25, 2024\n", + "•\n", + "76\n", + "Letting Large Models Debate: The First Multilingual LLM Debate Competition\n", + "By\n", + "xuanricheng\n", + "November 20, 2024\n", + "guest\n", + "•\n", + "26\n", + "From Files to Chunks: Improving Hugging Face Storage Efficiency\n", + "By\n", + "jsulz\n", + "November 20, 2024\n", + "•\n", + "41\n", + "Faster Text Generation with Self-Speculative Decoding\n", + "By\n", + "ariG23498\n", + "November 20, 2024\n", + "•\n", + "43\n", + "Introduction to the Open Leaderboard for Japanese LLMs\n", + "By\n", + "akimfromparis\n", + "November 20, 2024\n", + "guest\n", + "•\n", + "26\n", + "Judge Arena: Benchmarking LLMs as Evaluators\n", + "By\n", + "kaikaidai\n", + "November 19, 2024\n", + "guest\n", + "•\n", + "47\n", + "Previous\n", + "1\n", + "2\n", + "3\n", + "...\n", + "36\n", + "Next\n", + "Community Articles\n", + "view all\n", + "How to Expand Your AI Music Generations of 30 Seconds to Several Minutes\n", + "By\n", + "theeseus-ai\n", + "•\n", + "about 5 hours ago\n", + "🇪🇺✍️ EU AI Act: Systemic Risks in the First CoP Draft Comments ✍️🇪🇺\n", + "By\n", + "yjernite\n", + "•\n", + "1 day ago\n", + "•\n", + "6\n", + "The Intersection of CTMU and QCI: Implementing Emergent Intelligence\n", + "By\n", + "dimentox\n", + "•\n", + "1 day ago\n", + "Building an AI-powered search engine from scratch\n", + "By\n", + "as-cle-bert\n", + "•\n", + "2 days ago\n", + "•\n", + "5\n", + "**Build Your Own AI Server at Home: A Cost-Effective Guide Using Pre-Owned Components**\n", + "By\n", + "theeseus-ai\n", + "•\n", + "2 days ago\n", + "•\n", + "1\n", + "MotionLCM-V2: Improved Compression Rate for Multi-Latent-Token Diffusion\n", + "By\n", + "wxDai\n", + "•\n", + "2 days ago\n", + "•\n", + "10\n", + "RLHF 101: A Technical Dive into RLHF\n", + "By\n", + "GitBag\n", + "•\n", + "3 days ago\n", + "•\n", + "1\n", + "[Talk Arena](https://talkarena.org)\n", + "By\n", + "WillHeld\n", + "•\n", + "3 days ago\n", + "Multimodal RAG with Colpali, Milvus and VLMs\n", + "By\n", + "saumitras\n", + "•\n", + "3 days ago\n", + "In Honour of This Year's NeurIPs Test of Time Paper Awardees\n", + "By\n", + "Jaward\n", + "•\n", + "4 days ago\n", + "•\n", + "1\n", + "Power steering: Squeeze massive power from small LLMs\n", + "By\n", + "ucheog\n", + "•\n", + "4 days ago\n", + "•\n", + "4\n", + "Exploring the Power of KaibanJS v0.11.0 🚀\n", + "By\n", + "darielnoel\n", + "•\n", + "4 days ago\n", + "•\n", + "1\n", + "**Building a Custom Retrieval System with Motoko and Node.js**\n", + "By\n", + "theeseus-ai\n", + "•\n", + "4 days ago\n", + "Finding Moroccan Arabic (Darija) in Fineweb 2\n", + "By\n", + "omarkamali\n", + "•\n", + "5 days ago\n", + "•\n", + "17\n", + "Running Your Custom LoRA Fine-Tuned MusicGen Large Locally\n", + "By\n", + "theeseus-ai\n", + "•\n", + "7 days ago\n", + "•\n", + "1\n", + "Building a Local Vector Database Index with Annoy and Sentence Transformers\n", + "By\n", + "theeseus-ai\n", + "•\n", + "8 days ago\n", + "•\n", + "2\n", + "Practical Consciousness Theory for AI System Design\n", + "By\n", + "KnutJaegersberg\n", + "•\n", + "8 days ago\n", + "•\n", + "3\n", + "Releasing QwQ-LongCoT-130K\n", + "By\n", + "amphora\n", + "•\n", + "8 days ago\n", + "•\n", + "6\n", + "They Said It Couldn’t Be Done\n", + "By\n", + "Pclanglais\n", + "•\n", + "8 days ago\n", + "•\n", + "68\n", + "🐺🐦‍⬛ LLM Comparison/Test: 25 SOTA LLMs (including QwQ) through 59 MMLU-Pro CS benchmark runs\n", + "By\n", + "wolfram\n", + "•\n", + "9 days ago\n", + "•\n", + "68\n", + "Company\n", + "© Hugging Face\n", + "TOS\n", + "Privacy\n", + "About\n", + "Jobs\n", + "Website\n", + "Models\n", + "Datasets\n", + "Spaces\n", + "Pricing\n", + "Docs\n", + "\n", + "\n", + "\n", + "community page\n", + "Webpage Title:\n", + "Hugging Face Forums - Hugging Face Community Discussion\n", + "Webpage Contents:\n", + "Loading\n", + "Hugging Face Forums\n", + "Topic\n", + "Replies\n", + "Views\n", + "Activity\n", + "Building goes forever\n", + "Spaces\n", + "0\n", + "10\n", + "December 13, 2024\n", + "Automating .NET C# Code Generation with LLMs\n", + "Beginners\n", + "1\n", + "10\n", + "December 13, 2024\n", + "New on the plattform, need help with document parser tool\n", + "Beginners\n", + "1\n", + "6\n", + "December 13, 2024\n", + "Keep hitting 500 Internal server error when trying to launch gradio app in Spaces\n", + "Beginners\n", + "6\n", + "141\n", + "December 13, 2024\n", + "Langchain ChatHuggingFace\n", + "Beginners\n", + "13\n", + "26\n", + "December 13, 2024\n", + "Not able to access after login through hugging face hub in google colab\n", + "🤗Transformers\n", + "1\n", + "14\n", + "December 13, 2024\n", + "How to finetune LLama models\n", + "Beginners\n", + "0\n", + "18\n", + "December 13, 2024\n", + "Problem with multiple GPUs\n", + "Beginners\n", + "0\n", + "7\n", + "December 13, 2024\n", + "_pickle.PicklingError: cannot pickle '_thread.lock' object\n", + "Beginners\n", + "2\n", + "18\n", + "December 12, 2024\n", + "Solution for Fine Tuning the Blip Model\n", + "🤗Transformers\n", + "0\n", + "7\n", + "December 13, 2024\n", + "Multilingual batches\n", + "🤗Datasets\n", + "3\n", + "22\n", + "December 12, 2024\n", + "Handling Files Exceeding the Storage Limit\n", + "Beginners\n", + "1\n", + "13\n", + "December 13, 2024\n", + "CUDA out of memory when using Trainer with compute_metrics\n", + "🤗Transformers\n", + "23\n", + "40841\n", + "December 13, 2024\n", + "Access chat app in another app\n", + "Spaces\n", + "3\n", + "10\n", + "December 13, 2024\n", + "Fine Tuning with Alpaca vs Chat Template\n", + "Beginners\n", + "0\n", + "12\n", + "December 12, 2024\n", + "Account type and API key for use with Weaviate\n", + "Community Calls\n", + "0\n", + "11\n", + "December 12, 2024\n", + "Best model for translating English to Japanese\n", + "Models\n", + "2\n", + "154\n", + "December 12, 2024\n", + "Letting the generator know, how many stepts he will take\n", + "🤗Datasets\n", + "1\n", + "32\n", + "December 11, 2024\n", + "No response in community GPU grant\n", + "Spaces\n", + "3\n", + "17\n", + "December 12, 2024\n", + "Prepaid Mastercard\n", + "Community Calls\n", + "3\n", + "46\n", + "December 12, 2024\n", + "Spaces are not launching at all\n", + "Spaces\n", + "4\n", + "44\n", + "December 13, 2024\n", + "How to operate on columns of a dataset\n", + "Beginners\n", + "1\n", + "29\n", + "December 11, 2024\n", + "Accessing /similarity endpoint in Vertex AI on one click deploy model\n", + "Google Cloud\n", + "1\n", + "18\n", + "December 11, 2024\n", + "Creating HuggingFace Dataset from PyArrow table is slow\n", + "🤗Datasets\n", + "1\n", + "24\n", + "December 11, 2024\n", + "Get_dataset_config_names not getting desired output (and DatasetGenerationError)\n", + "🤗Datasets\n", + "5\n", + "48\n", + "December 11, 2024\n", + "Models for Document Image Annotation Without OCR\n", + "Research\n", + "1\n", + "92\n", + "December 12, 2024\n", + "How to set the Python version in Hugging Face Space?\n", + "Spaces\n", + "6\n", + "67\n", + "December 12, 2024\n", + "Unable to paste access token in cmd\n", + "Beginners\n", + "4\n", + "6207\n", + "December 12, 2024\n", + "In SpeechSeq2Seq models, is it possible to pass decoder_input_ids for each sample during the training time using huggingface Trainer?\n", + "🤗Transformers\n", + "0\n", + "13\n", + "December 12, 2024\n", + "Use authentication in huggingface Gradio API!(hosting on ZeroGPU)\n", + "Spaces\n", + "2\n", + "89\n", + "December 12, 2024\n", + "next page →\n", + "Home\n", + "Categories\n", + "Guidelines\n", + "Terms of Service\n", + "Privacy Policy\n", + "Powered by\n", + "Discourse\n", + ", best viewed with JavaScript enabled\n", + "\n", + "\n", + "\n", + "GitHub page\n", + "Webpage Title:\n", + "Hugging Face · GitHub\n", + "Webpage Contents:\n", + "Skip to content\n", + "Navigation Menu\n", + "Toggle navigation\n", + "Sign in\n", + "huggingface\n", + "Product\n", + "GitHub Copilot\n", + "Write better code with AI\n", + "Security\n", + "Find and fix vulnerabilities\n", + "Actions\n", + "Automate any workflow\n", + "Codespaces\n", + "Instant dev environments\n", + "Issues\n", + "Plan and track work\n", + "Code Review\n", + "Manage code changes\n", + "Discussions\n", + "Collaborate outside of code\n", + "Code Search\n", + "Find more, search less\n", + "Explore\n", + "All features\n", + "Documentation\n", + "GitHub Skills\n", + "Blog\n", + "Solutions\n", + "By company size\n", + "Enterprises\n", + "Small and medium teams\n", + "Startups\n", + "By use case\n", + "DevSecOps\n", + "DevOps\n", + "CI/CD\n", + "View all use cases\n", + "By industry\n", + "Healthcare\n", + "Financial services\n", + "Manufacturing\n", + "Government\n", + "View all industries\n", + "View all solutions\n", + "Resources\n", + "Topics\n", + "AI\n", + "DevOps\n", + "Security\n", + "Software Development\n", + "View all\n", + "Explore\n", + "Learning Pathways\n", + "White papers, Ebooks, Webinars\n", + "Customer Stories\n", + "Partners\n", + "Executive Insights\n", + "Open Source\n", + "GitHub Sponsors\n", + "Fund open source developers\n", + "The ReadME Project\n", + "GitHub community articles\n", + "Repositories\n", + "Topics\n", + "Trending\n", + "Collections\n", + "Enterprise\n", + "Enterprise platform\n", + "AI-powered developer platform\n", + "Available add-ons\n", + "Advanced Security\n", + "Enterprise-grade security features\n", + "GitHub Copilot\n", + "Enterprise-grade AI features\n", + "Premium Support\n", + "Enterprise-grade 24/7 support\n", + "Pricing\n", + "Search or jump to...\n", + "Search code, repositories, users, issues, pull requests...\n", + "Search\n", + "Clear\n", + "Search syntax tips\n", + "Provide feedback\n", + "We read every piece of feedback, and take your input very seriously.\n", + "Include my email address so I can be contacted\n", + "Cancel\n", + "Submit feedback\n", + "Saved searches\n", + "Use saved searches to filter your results more quickly\n", + "Cancel\n", + "Create saved search\n", + "Sign in\n", + "Sign up\n", + "Reseting focus\n", + "You signed in with another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You signed out in another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "You switched accounts on another tab or window.\n", + "Reload\n", + "to refresh your session.\n", + "Dismiss alert\n", + "Hugging Face\n", + "The AI community building the future.\n", + "Verified\n", + "We've verified that the organization\n", + "huggingface\n", + "controls the domain:\n", + "huggingface.co\n", + "Learn more about verified organizations\n", + "39.9k\n", + "followers\n", + "NYC + Paris\n", + "https://huggingface.co/\n", + "X\n", + "@huggingface\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "0\n", + "More\n", + "Overview\n", + "Repositories\n", + "Projects\n", + "Packages\n", + "People\n", + "Sponsoring\n", + "Pinned\n", + "Loading\n", + "transformers\n", + "transformers\n", + "Public\n", + "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", + "Python\n", + "136k\n", + "27.3k\n", + "diffusers\n", + "diffusers\n", + "Public\n", + "🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch and FLAX.\n", + "Python\n", + "26.6k\n", + "5.5k\n", + "datasets\n", + "datasets\n", + "Public\n", + "🤗 The largest hub of ready-to-use datasets for ML models with fast, easy-to-use and efficient data manipulation tools\n", + "Python\n", + "19.4k\n", + "2.7k\n", + "peft\n", + "peft\n", + "Public\n", + "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.\n", + "Python\n", + "16.7k\n", + "1.7k\n", + "accelerate\n", + "accelerate\n", + "Public\n", + "🚀 A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support\n", + "Python\n", + "8.1k\n", + "989\n", + "optimum\n", + "optimum\n", + "Public\n", + "🚀 Accelerate inference and training of 🤗 Transformers, Diffusers, TIMM and Sentence Transformers with easy to use hardware optimization tools\n", + "Python\n", + "2.6k\n", + "481\n", + "Repositories\n", + "Loading\n", + "Type\n", + "Select type\n", + "Forks\n", + "Archived\n", + "Mirrors\n", + "Templates\n", + "Language\n", + "Select language\n", + "All\n", + "C\n", + "C#\n", + "C++\n", + "Cuda\n", + "Dockerfile\n", + "Go\n", + "Handlebars\n", + "HTML\n", + "Java\n", + "JavaScript\n", + "Jupyter Notebook\n", + "Kotlin\n", + "Lua\n", + "MDX\n", + "Mustache\n", + "Nix\n", + "Python\n", + "Rust\n", + "Shell\n", + "Smarty\n", + "Swift\n", + "TypeScript\n", + "Sort\n", + "Select order\n", + "Last updated\n", + "Name\n", + "Stars\n", + "Showing 10 of 271 repositories\n", + "nanotron\n", + "Public\n", + "Minimalistic large language model 3D-parallelism training\n", + "huggingface/nanotron’s past year of commit activity\n", + "Python\n", + "1,330\n", + "Apache-2.0\n", + "133\n", + "47\n", + "(18 issues need help)\n", + "30\n", + "Updated\n", + "Dec 13, 2024\n", + "trl\n", + "Public\n", + "Train transformer language models with reinforcement learning.\n", + "huggingface/trl’s past year of commit activity\n", + "Python\n", + "10,308\n", + "Apache-2.0\n", + "1,323\n", + "106\n", + "34\n", + "Updated\n", + "Dec 13, 2024\n", + "optimum-habana\n", + "Public\n", + "Easy and lightning fast training of 🤗 Transformers on Habana Gaudi processor (HPU)\n", + "huggingface/optimum-habana’s past year of commit activity\n", + "Python\n", + "160\n", + "Apache-2.0\n", + "217\n", + "22\n", + "(1 issue needs help)\n", + "18\n", + "Updated\n", + "Dec 13, 2024\n", + "accelerate\n", + "Public\n", + "🚀 A simple way to launch, train, and use PyTorch models on almost any device and distributed configuration, automatic mixed precision (including fp8), and easy-to-configure FSDP and DeepSpeed support\n", + "huggingface/accelerate’s past year of commit activity\n", + "Python\n", + "8,053\n", + "Apache-2.0\n", + "989\n", + "102\n", + "(2 issues need help)\n", + "21\n", + "Updated\n", + "Dec 13, 2024\n", + "tgi-gaudi\n", + "Public\n", + "Forked from\n", + "huggingface/text-generation-inference\n", + "Large Language Model Text Generation Inference on Habana Gaudi\n", + "huggingface/tgi-gaudi’s past year of commit activity\n", + "Python\n", + "28\n", + "Apache-2.0\n", + "1,131\n", + "11\n", + "4\n", + "Updated\n", + "Dec 13, 2024\n", + "transformers\n", + "Public\n", + "🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.\n", + "huggingface/transformers’s past year of commit activity\n", + "Python\n", + "136,248\n", + "Apache-2.0\n", + "27,288\n", + "986\n", + "(2 issues need help)\n", + "527\n", + "Updated\n", + "Dec 13, 2024\n", + "text-generation-inference\n", + "Public\n", + "Large Language Model Text Generation Inference\n", + "huggingface/text-generation-inference’s past year of commit activity\n", + "Python\n", + "9,434\n", + "Apache-2.0\n", + "1,102\n", + "145\n", + "18\n", + "Updated\n", + "Dec 13, 2024\n", + "peft\n", + "Public\n", + "🤗 PEFT: State-of-the-art Parameter-Efficient Fine-Tuning.\n", + "huggingface/peft’s past year of commit activity\n", + "Python\n", + "16,699\n", + "Apache-2.0\n", + "1,653\n", + "28\n", + "(1 issue needs help)\n", + "12\n", + "Updated\n", + "Dec 13, 2024\n", + "optimum-neuron\n", + "Public\n", + "Easy, fast and very cheap training and inference on AWS Trainium and Inferentia chips.\n", + "huggingface/optimum-neuron’s past year of commit activity\n", + "Jupyter Notebook\n", + "213\n", + "Apache-2.0\n", + "65\n", + "18\n", + "6\n", + "Updated\n", + "Dec 13, 2024\n", + "cookbook\n", + "Public\n", + "Open-source AI cookbook\n", + "huggingface/cookbook’s past year of commit activity\n", + "Jupyter Notebook\n", + "1,722\n", + "Apache-2.0\n", + "250\n", + "17\n", + "20\n", + "Updated\n", + "Dec 13, 2024\n", + "View all repositories\n", + "People\n", + "View all\n", + "Top languages\n", + "Python\n", + "Jupyter Notebook\n", + "Rust\n", + "TypeScript\n", + "JavaScript\n", + "Most used topics\n", + "pytorch\n", + "machine-learning\n", + "nlp\n", + "deep-learning\n", + "transformers\n", + "Footer\n", + "© 2024 GitHub, Inc.\n", + "Footer navigation\n", + "Terms\n", + "Privacy\n", + "Security\n", + "Status\n", + "Docs\n", + "Contact\n", + "Manage cookies\n", + "Do not share my personal information\n", + "You can’t perform that action at this time.\n", + "\n", + "\n", + "\n", + "Twitter page\n", + "Webpage Title:\n", + "x.com\n", + "Webpage Contents:\n", + "\n", + "\n", + "\n", + "\n", + "LinkedIn page\n", + "Webpage Title:\n", + "Hugging Face | LinkedIn\n", + "Webpage Contents:\n", + "LinkedIn and 3rd parties use essential and non-essential cookies to provide, secure, analyze and improve our Services, and to show you relevant ads (including\n", + "professional and job ads\n", + ") on and off LinkedIn. Learn more in our\n", + "Cookie Policy\n", + ".\n", + "Select Accept to consent or Reject to decline non-essential cookies for this use. You can update your choices at any time in your\n", + "settings\n", + ".\n", + "Accept\n", + "Reject\n", + "Skip to main content\n", + "LinkedIn\n", + "Articles\n", + "People\n", + "Learning\n", + "Jobs\n", + "Games\n", + "Get the app\n", + "Join now\n", + "Sign in\n", + "Hugging Face\n", + "Software Development\n", + "The AI community building the future.\n", + "See jobs\n", + "Follow\n", + "Discover all 474 employees\n", + "Report this company\n", + "About us\n", + "The AI community building the future.\n", + "Website\n", + "https://huggingface.co\n", + "External link for Hugging Face\n", + "Industry\n", + "Software Development\n", + "Company size\n", + "51-200 employees\n", + "Type\n", + "Privately Held\n", + "Founded\n", + "2016\n", + "Specialties\n", + "machine learning, natural language processing, and deep learning\n", + "Products\n", + "Hugging Face\n", + "Hugging Face\n", + "Natural Language Processing (NLP) Software\n", + "We’re on a journey to solve and democratize artificial intelligence through natural language.\n", + "Locations\n", + "Primary\n", + "Get directions\n", + "Paris, FR\n", + "Get directions\n", + "Employees at Hugging Face\n", + "Ludovic Huraux\n", + "Bassem ASSEH\n", + "Rajat Arya\n", + "Tech Lead & Software Engineer @ HF | prev: co-founder XetHub, Apple, Turi, AWS, Microsoft\n", + "Jeff Boudier\n", + "Product + Growth at Hugging Face\n", + "See all employees\n", + "Updates\n", + "Hugging Face\n", + "812,455 followers\n", + "2h\n", + "Report this post\n", + "Hugging Face\n", + "812,455 followers\n", + "1d\n", + "Since the release of OpenAI's o1 model, test-time compute scaling has become one of the hottest topics in LLM research. Rather than relying on ever-larger pre-training budgets, test-time methods use dynamic inference strategies that allow models to “think longer” on harder problems. \n", + "\n", + "Although it is not known how o1 was trained, recent research from DeepMind shows that test-time compute can be scaled optimally through strategies like iterative self-refinement or using a reward model to perform search over the space of solutions.\n", + "\n", + "In this webinar, Lewis. Ed, and Philipp explore the benefits of applying the DeepMind recipe to scale up test-time compute for open models. Join us to find out how we enabled Llama 1B to outperform Llama 8B on the challenging MATH benchmark!\n", + "Scaling test-time compute for open models to solve math problems like o1\n", + "www.linkedin.com\n", + "6\n", + "1 Comment\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "46,081 followers\n", + "6h\n", + "Report this post\n", + "🔥🔥 FlowEdit\n", + "Gradio\n", + "app is now live on\n", + "Hugging Face\n", + ":\n", + "https://lnkd.in/gG7eXuBQ\n", + "For reference, please check out our earlier post introducing the FlowEdit model:\n", + "https://lnkd.in/gHDUHTct\n", + "…more\n", + "42\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Gradio\n", + "46,081 followers\n", + "6h\n", + "Edited\n", + "Report this post\n", + "Leffa from Meta is a 🆕 unified framework for Virtual try-on and Pose transfer!🔥 👕 \n", + "\n", + "Provides controllable person image generation that enables precise manipulation of both appearance (i.e., Virtual try-on) and pose (i.e., Pose transfer).\n", + "\n", + "Learn more about 📚 Code,🔥 Demo, and 🤗 Model ⬇️ \n", + "\n", + "> Leffa Model on\n", + "Hugging Face\n", + "Hub with MIT license:\n", + "https://lnkd.in/gfFWcsCF\n", + "> Build Leffa for VirtualTryOn and Pose Transfer locally with\n", + "Gradio\n", + ":\n", + "https://lnkd.in/gcnezZrW\n", + "> Leffa Gradio app is up on\n", + "Hugging Face\n", + "Spaces:\n", + "https://lnkd.in/gdnuNTHv\n", + "…more\n", + "204\n", + "1 Comment\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Sayak Paul\n", + "ML @ Hugging Face 🤗\n", + "8h\n", + "Report this post\n", + "Sun never sets for the Diffusers team 🤗🌞\n", + "\n", + "Presenting our latest integration of LTX by\n", + "Lightricks\n", + "- a superior video generation model at its scale. \n", + "\n", + "It produces 24 FPS videos at a 768x512 resolution faster than they can be watched 🤪\n", + "\n", + "Model is pretty consumer-friendly, but in case not, you should be able to leverage all the diffusers goodies to make it run on your machine!\n", + "\n", + "Kudos to\n", + "Aryan V S\n", + ", who led this integration. Next up is -- a script for ___ LTX 🚀 Wrong answers only 🪄\n", + "\n", + "Check it out here:\n", + "https://lnkd.in/gwMKniDN\n", + "…more\n", + "102\n", + "2 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Aryan V S\n", + "Engineer @Hugging Face 🤗\n", + "8h\n", + "Report this post\n", + "It is raining open-source video models these last few weeks! LTX Video is now in Diffusers with finetuning and structure-control support coming shortly. This model comes from the amazing brains at\n", + "Lightricks\n", + "💫 🧑🍳 \n", + "\n", + "LTXV is very smol compared to some other open source models, but packs twice their punch! Fast generation, low memory requirements, high quality - what more do we need? Just two years ago, image generation models struggled at 2B param scale but now we have entirely different beasts 💪 \n", + "\n", + "Links in comment\n", + "…more\n", + "61\n", + "1 Comment\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Ben Burtenshaw\n", + "Building Argilla @ 🤗 Hugging face\n", + "10h\n", + "Report this post\n", + "If you're improving your LLM skills on real world use cases, this free course just got real! Smol course now has 4 chapters, and the most important was just released.\n", + "\n", + "🌎 Chapter 4 shows you how to evaluate models on custom use cases. So you can test out the models you trained in the earlier chapters to see if they do the job.\n", + "\n", + "📚 The course walks through using simple libraries to create an evaluation dataset for a custom domain. Libraries like lighteval, distilabel, and argilla. \n", + "\n", + "🤯 If you've ever wondered how to evaluate an LLM for you problem, this is the material for you.\n", + "\n", + "We'll return to this chapter in a capstone project!\n", + "127\n", + "12 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Freddy Boulton\n", + "Software Engineer @ 🤗\n", + "1d\n", + "Report this post\n", + "Hello Llama 3.2! 🗣️🦙\n", + "\n", + "Build a Siri-like coding assistant that responds to \"Hello Llama\" in 100 lines of python!\n", + "\n", + "This is all possible with gradio-webrtc, the easiest way to add audio/video streaming to your AI models! Links to repo and\n", + "Hugging Face\n", + "space below:\n", + "…more\n", + "175\n", + "5 Comments\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Amélie Viallet\n", + "Building Argilla @ 🤗 Hugging Face\n", + "1d\n", + "Report this post\n", + "[FineWeb2 - Annotation Sprint] It's amazing to see “language teams” growing every day! 🤗 \n", + "\n", + "Annotation can be tedious and requires a lot of focus, so it’s always easier—and way more fun—when done together.\n", + "\n", + "Some datasets already have over 15 contributors—👏—while others rely on just one amazing person (double 👏👏 for them!). \n", + "Finding contributors for less widespread languages can be tricky!\n", + "\n", + "Here are a few tips to connect with your language community:\n", + "\n", + " 🦜 Call your community in your language: We’ve prepared templates you can translate before publishing on Social Media or email.\n", + "\n", + "🪡 Make the task simpler for all: Translate the guidelines! They can be dense, and the examples are super helpful for getting it right.\n", + "\n", + "🫚 Share the initiative widely with anyone passionate about supporting their language. The only requirement is fluency!\n", + "\n", + "And you, what are your tips?\n", + "#fineWeb2\n", + "#argilla\n", + "53\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "812,455 followers\n", + "1d\n", + "Report this post\n", + "Since the release of OpenAI's o1 model, test-time compute scaling has become one of the hottest topics in LLM research. Rather than relying on ever-larger pre-training budgets, test-time methods use dynamic inference strategies that allow models to “think longer” on harder problems. \n", + "\n", + "Although it is not known how o1 was trained, recent research from DeepMind shows that test-time compute can be scaled optimally through strategies like iterative self-refinement or using a reward model to perform search over the space of solutions.\n", + "\n", + "In this webinar, Lewis. Ed, and Philipp explore the benefits of applying the DeepMind recipe to scale up test-time compute for open models. Join us to find out how we enabled Llama 1B to outperform Llama 8B on the challenging MATH benchmark!\n", + "Scaling test-time compute for open models to solve math problems like o1\n", + "www.linkedin.com\n", + "15\n", + "Like\n", + "Comment\n", + "Share\n", + "Hugging Face\n", + "reposted this\n", + "Franck Abgrall\n", + "Software Engineer @ Hugging Face 🤗\n", + "1d\n", + "Report this post\n", + "✨ We just released an overview of the permissions for fine-grained tokens by hovering over the badge on\n", + "Hugging Face\n", + "token settings page (org and user).\n", + "\n", + "It will show the highest permission you've set for each entity\n", + "23\n", + "1 Comment\n", + "Like\n", + "Comment\n", + "Share\n", + "Join now to see what you are missing\n", + "Find people you know at Hugging Face\n", + "Browse recommended jobs for you\n", + "View all updates, news, and articles\n", + "Join now\n", + "Similar pages\n", + "Anthropic\n", + "Research Services\n", + "Mistral AI\n", + "Technology, Information and Internet\n", + "Paris, France\n", + "OpenAI\n", + "Research Services\n", + "San Francisco, CA\n", + "LangChain\n", + "Technology, Information and Internet\n", + "Generative AI\n", + "Technology, Information and Internet\n", + "Perplexity\n", + "Software Development\n", + "San Francisco, California\n", + "Google DeepMind\n", + "Research Services\n", + "London, London\n", + "LlamaIndex\n", + "Technology, Information and Internet\n", + "San Francisco, California\n", + "DeepLearning.AI\n", + "Software Development\n", + "Palo Alto, California\n", + "Cohere\n", + "Software Development\n", + "Toronto, Ontario\n", + "Show more similar pages\n", + "Show fewer similar pages\n", + "Browse jobs\n", + "Engineer jobs\n", + "555,845 open jobs\n", + "Machine Learning Engineer jobs\n", + "148,937 open jobs\n", + "Scientist jobs\n", + "48,969 open jobs\n", + "Software Engineer jobs\n", + "300,699 open jobs\n", + "Intern jobs\n", + "71,196 open jobs\n", + "Developer jobs\n", + "258,935 open jobs\n", + "Analyst jobs\n", + "694,057 open jobs\n", + "Intelligence Specialist jobs\n", + "7,156 open jobs\n", + "Manager jobs\n", + "1,880,925 open jobs\n", + "Data Scientist jobs\n", + "264,158 open jobs\n", + "Director jobs\n", + "1,220,357 open jobs\n", + "Associate jobs\n", + "1,091,945 open jobs\n", + "Python Developer jobs\n", + "46,642 open jobs\n", + "Evangelist jobs\n", + "5,068 open jobs\n", + "Data Engineer jobs\n", + "192,126 open jobs\n", + "Vice President jobs\n", + "235,270 open jobs\n", + "Quantitative Analyst jobs\n", + "19,570 open jobs\n", + "Program Manager jobs\n", + "243,900 open jobs\n", + "Data Science Specialist jobs\n", + "2,441 open jobs\n", + "Lead Software Engineer jobs\n", + "68,215 open jobs\n", + "Show more jobs like this\n", + "Show fewer jobs like this\n", + "Funding\n", + "Hugging Face\n", + "7 total rounds\n", + "Last Round\n", + "Series D\n", + "Feb 16, 2024\n", + "External Crunchbase Link for last round of funding\n", + "See more info on\n", + "crunchbase\n", + "More searches\n", + "More searches\n", + "Engineer jobs\n", + "Intern jobs\n", + "Machine Learning Engineer jobs\n", + "Software Engineer jobs\n", + "Scientist jobs\n", + "Developer jobs\n", + "Research Intern jobs\n", + "Analyst jobs\n", + "Intelligence Specialist jobs\n", + "Quantitative Analyst jobs\n", + "Technician jobs\n", + "Data Science Specialist jobs\n", + "Project Manager jobs\n", + "Summer Intern jobs\n", + "Manager jobs\n", + "Senior Staff Engineer jobs\n", + "PHD jobs\n", + "Trader jobs\n", + "Researcher jobs\n", + "Data Scientist jobs\n", + "Writer jobs\n", + "Data Analyst jobs\n", + "Product Designer jobs\n", + "Back End Developer jobs\n", + "Spring Intern jobs\n", + "Program Manager jobs\n", + "Technology Officer jobs\n", + "Software Intern jobs\n", + "Security Professional jobs\n", + "Senior Software Engineer jobs\n", + "Python Developer jobs\n", + "Engineering Manager jobs\n", + "Web Developer jobs\n", + "Graduate jobs\n", + "Full Stack Engineer jobs\n", + "Professor jobs\n", + "Head jobs\n", + "Verification Manager jobs\n", + "User Experience Designer jobs\n", + "Recruiter jobs\n", + "Chief Executive Officer jobs\n", + "Associate jobs\n", + "Support Developer jobs\n", + "Senior Firmware Engineer jobs\n", + "Marketing Manager jobs\n", + "Modeling Engineer jobs\n", + "Designer jobs\n", + "Automation Lead jobs\n", + "Options Trader jobs\n", + "Agile Coach jobs\n", + "Research Engineer jobs\n", + "Software Quality Assurance Analyst jobs\n", + "User Experience Manager jobs\n", + "Technical Intern jobs\n", + "Junior Network Engineer jobs\n", + "Information Technology Recruiter jobs\n", + "User Researcher jobs\n", + "Player jobs\n", + "Engineering Project Manager jobs\n", + "Digital Strategist jobs\n", + "LinkedIn\n", + "© 2024\n", + "About\n", + "Accessibility\n", + "User Agreement\n", + "Privacy Policy\n", + "Cookie Policy\n", + "Copyright Policy\n", + "Brand Policy\n", + "Guest Controls\n", + "Community Guidelines\n", + "العربية (Arabic)\n", + "বাংলা (Bangla)\n", + "Čeština (Czech)\n", + "Dansk (Danish)\n", + "Deutsch (German)\n", + "Ελληνικά (Greek)\n", + "English (English)\n", + "Español (Spanish)\n", + "فارسی (Persian)\n", + "Suomi (Finnish)\n", + "Français (French)\n", + "हिंदी (Hindi)\n", + "Magyar (Hungarian)\n", + "Bahasa Indonesia (Indonesian)\n", + "Italiano (Italian)\n", + "עברית (Hebrew)\n", + "日本語 (Japanese)\n", + "한국어 (Korean)\n", + "मराठी (Marathi)\n", + "Bahasa Malaysia (Malay)\n", + "Nederlands (Dutch)\n", + "Norsk (Norwegian)\n", + "ਪੰਜਾਬੀ (Punjabi)\n", + "Polski (Polish)\n", + "Português (Portuguese)\n", + "Română (Romanian)\n", + "Русский (Russian)\n", + "Svenska (Swedish)\n", + "తెలుగు (Telugu)\n", + "ภาษาไทย (Thai)\n", + "Tagalog (Tagalog)\n", + "Türkçe (Turkish)\n", + "Українська (Ukrainian)\n", + "Tiếng Việt (Vietnamese)\n", + "简体中文 (Chinese (Simplified))\n", + "正體中文 (Chinese (Traditional))\n", + "Language\n", + "Agree & Join LinkedIn\n", + "By clicking Continue to join or sign in, you agree to LinkedIn’s\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "Interested in working at Hugging Face?\n", + "Sign in\n", + "Welcome back\n", + "Email or phone\n", + "Password\n", + "Show\n", + "Forgot password?\n", + "Sign in\n", + "or\n", + "By clicking Continue to join or sign in, you agree to LinkedIn’s\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "New to LinkedIn?\n", + "Join now\n", + "or\n", + "New to LinkedIn?\n", + "Join now\n", + "By clicking Continue to join or sign in, you agree to LinkedIn’s\n", + "User Agreement\n", + ",\n", + "Privacy Policy\n", + ", and\n", + "Cookie Policy\n", + ".\n", + "LinkedIn\n", + "LinkedIn is better on the app\n", + "Don’t have the app? Get it in the Microsoft Store.\n", + "Open the app\n", + "\n", + "\n" + ] + } + ], "source": [ "print(get_all_details(\"https://huggingface.co\"))" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 63, "id": "9b863a55-f86c-4e3f-8a79-94e24c1a8cf2", "metadata": {}, "outputs": [], @@ -285,7 +3679,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 65, "id": "6ab83d92-d36b-4ce0-8bcc-5bb4c2f8ff23", "metadata": {}, "outputs": [], @@ -293,6 +3687,7 @@ "def get_brochure_user_prompt(company_name, url):\n", " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", + " user_prompt += f\"The Title must be bold and subtitle in italic\\n\"\n", " user_prompt += get_all_details(url)\n", " user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n", " return user_prompt" @@ -300,17 +3695,35 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 32, "id": "cd909e0b-1312-4ce2-a553-821e795d7572", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links:: {'links': [{'type': 'about page', 'url': 'https://huggingface.co'}, {'type': 'models page', 'url': 'https://huggingface.co/models'}, {'type': 'datasets page', 'url': 'https://huggingface.co/datasets'}, {'type': 'spaces page', 'url': 'https://huggingface.co/spaces'}, {'type': 'enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'pricing page', 'url': 'https://huggingface.co/pricing'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'community page', 'url': 'https://discuss.huggingface.co'}, {'type': 'GitHub page', 'url': 'https://github.com/huggingface'}, {'type': 'Twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'LinkedIn page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" + ] + }, + { + "data": { + "text/plain": [ + "'You are looking at a company called: HuggingFace\\nHere are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\nLanding page:\\nWebpage Title:\\nHugging Face – The AI community building the future.\\nWebpage Contents:\\nHugging Face\\nModels\\nDatasets\\nSpaces\\nPosts\\nDocs\\nEnterprise\\nPricing\\nLog In\\nSign Up\\nThe AI community building the future.\\nThe platform where the machine learning community collaborates on models, datasets, and applications.\\nTrending on\\nthis week\\nModels\\nmeta-llama/Llama-3.3-70B-Instruct\\nUpdated\\n3 days ago\\n•\\n102k\\n•\\n960\\ntencent/HunyuanVideo\\nUpdated\\n6 days ago\\n•\\n3.73k\\n•\\n992\\nDatou1111/shou_xin\\nUpdated\\n5 days ago\\n•\\n7.84k\\n•\\n322\\nblack-forest-labs/FLUX.1-dev\\nUpdated\\nAug 16\\n•\\n1.38M\\n•\\n7.23k\\nQwen/QwQ-32B-Preview\\nUpdated\\n15 days ago\\n•\\n92.8k\\n•\\n1.27k\\nBrowse 400k+ models\\nSpaces\\nRunning\\non\\nZero\\n1.13k\\n🏢\\nTRELLIS\\nScalable and Versatile 3D Generation from images\\nRunning\\non\\nZero\\n291\\n🦀🏆\\nFLUXllama\\nFLUX 4-bit Quantization(just 8GB VRAM)\\nRunning\\non\\nL40S\\n244\\n🚀\\nFlux Style Shaping\\nOptical illusions and style transfer with FLUX\\nRunning\\non\\nCPU Upgrade\\n5.92k\\n👕\\nKolors Virtual Try-On\\nRunning\\non\\nZero\\n5.71k\\n🖥️\\nFLUX.1 [dev]\\nBrowse 150k+ applications\\nDatasets\\nHuggingFaceFW/fineweb-2\\nUpdated\\n5 days ago\\n•\\n27.6k\\n•\\n284\\nfka/awesome-chatgpt-prompts\\nUpdated\\nSep 3\\n•\\n7.71k\\n•\\n6.52k\\nCohereForAI/Global-MMLU\\nUpdated\\n1 day ago\\n•\\n4.59k\\n•\\n77\\nO1-OPEN/OpenO1-SFT\\nUpdated\\n22 days ago\\n•\\n1.34k\\n•\\n175\\namphora/QwQ-LongCoT-130K\\nUpdated\\n8 days ago\\n•\\n536\\n•\\n46\\nBrowse 100k+ datasets\\nThe Home of Machine Learning\\nCreate, discover and collaborate on ML better.\\nThe collaboration platform\\nHost and collaborate on unlimited public models, datasets and applications.\\nMove faster\\nWith the HF Open source stack.\\nExplore all modalities\\nText, image, video, audio or even 3D.\\nBuild your portfolio\\nShare your work with the world and build your ML profile.\\nSign Up\\nAccelerate your ML\\nWe provide paid Compute and Enterprise solutions.\\nCompute\\nDeploy on optimized\\nInference Endpoints\\nor update your\\nSpaces applications\\nto a GPU in a few clicks.\\nView pricing\\nStarting at $0.60/hour for GPU\\nEnterprise\\nGive your team the most advanced platform to build AI with enterprise-grade security, access controls and\\n\\t\\t\\tdedicated support.\\nGetting started\\nStarting at $20/user/month\\nSingle Sign-On\\nRegions\\nPriority Support\\nAudit Logs\\nResource Groups\\nPrivate Datasets Viewer\\nMore than 50,000 organizations are using Hugging Face\\nAi2\\nEnterprise\\nnon-profit\\n•\\n361 models\\n•\\n1.71k followers\\nAI at Meta\\nEnterprise\\ncompany\\n•\\n2.05k models\\n•\\n3.75k followers\\nAmazon Web Services\\ncompany\\n•\\n21 models\\n•\\n2.42k followers\\nGoogle\\ncompany\\n•\\n910 models\\n•\\n5.46k followers\\nIntel\\ncompany\\n•\\n217 models\\n•\\n2.05k followers\\nMicrosoft\\ncompany\\n•\\n351 models\\n•\\n6.06k followers\\nGrammarly\\ncompany\\n•\\n10 models\\n•\\n98 followers\\nWriter\\nEnterprise\\ncompany\\n•\\n16 models\\n•\\n178 followers\\nOur Open Source\\nWe are building the foundation of ML tooling with the community.\\nTransformers\\n136,246\\nState-of-the-art ML for Pytorch, TensorFlow, and JAX.\\nDiffusers\\n26,624\\nState-of-the-art diffusion models for image and audio generation in PyTorch.\\nSafetensors\\n2,953\\nSimple, safe way to store and distribute neural networks weights safely and quickly.\\nHub Python Library\\n2,165\\nClient library for the HF Hub: manage repositories from your Python runtime.\\nTokenizers\\n9,150\\nFast tokenizers, optimized for both research and production.\\nPEFT\\n16,699\\nParameter efficient finetuning methods for large models.\\nTransformers.js\\n12,337\\nState-of-the-art Machine Learning for the web. Run Transformers directly in your browser, with no need for a server.\\ntimm\\n32,592\\nState-of-the-art computer vision models, layers, optimizers, training/evaluation, and utilities.\\nTRL\\n10,308\\nTrain transformer language models with reinforcement learning.\\nDatasets\\n19,349\\nAccess and share datasets for computer vision, audio, and NLP tasks.\\nText Generation Inference\\n9,433\\nToolkit to serve Large Language Models.\\nAccelerate\\n8,053\\nEasily train and use PyTorch models with multi-GPU, TPU, mixed-precision.\\nWebsite\\nModels\\nDatasets\\nSpaces\\nTasks\\nInference Endpoints\\nHuggingChat\\nCompany\\nAbout\\nBrand assets\\nTerms of service\\nPrivacy\\nJobs\\nPress\\nResources\\nLearn\\nDocumentation\\nBlog\\nForum\\nService Status\\nSocial\\nGitHub\\nTwitter\\nLinkedIn\\nDiscord\\nZhihu\\nWeChat\\n\\n\\n\\nabout page\\nWebpage Title:\\nHugging Face – The AI community building the future.\\nWebpage Contents:\\nHugging Face\\nModels\\nDatasets\\nSpaces\\nPosts\\nDocs\\nEnterprise\\nPricing\\nLog In\\nSign Up\\nThe AI community building the future.\\nThe platform where the machine learning community collaborates on models, datasets, and applications.\\nTrending on\\nthis week\\nModels\\nmeta-llama/Llama-3.3-70B-Instruct\\nUpdated\\n3 days ago\\n•\\n102k\\n•\\n960\\ntencent/HunyuanVideo\\nUpdated\\n6 days ago\\n•\\n3.73k\\n•\\n992\\nDatou1111/shou_xin\\nUpdated\\n5 days ago\\n•\\n7.84k\\n•\\n322\\nblack-forest-labs/FLUX.1-dev\\nUpdated\\nAug 16\\n•\\n1.38M\\n•\\n7.23k\\nQwen/QwQ-32B-Preview\\nUpdated\\n15 days ago\\n•\\n92.8k\\n•\\n1.27k\\nBrowse 400k+ models\\nSpaces\\nRunning\\non\\nZero\\n1.13k\\n🏢\\nTRELLIS\\nScalable and Versatile 3D Ge'" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 67, "id": "e44de579-4a1a-4e6a-a510-20ea3e4b8d46", "metadata": {}, "outputs": [], @@ -323,18 +3736,155 @@ " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", " ],\n", " )\n", - " result = response.choices[0].message.content\n", - " display(Markdown(result))" + " return response.choices[0].message.content\n", + " " ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 71, "id": "e093444a-9407-42ae-924a-145730591a39", "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links:: {'links': [{'type': 'about page', 'url': 'https://huggingface.com'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'blog page', 'url': 'https://huggingface.com/blog'}, {'type': 'company page', 'url': 'https://discuss.huggingface.co'}, {'type': 'github page', 'url': 'https://github.com/huggingface'}, {'type': 'twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'linkedin page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" + ] + }, + { + "data": { + "text/markdown": [ + "```markdown\n", + "# **Hugging Face**\n", + "### _The AI Community Building the Future_\n", + "\n", + "---\n", + "\n", + "## 💡 About Us\n", + "Hugging Face is not just a company; it's a vibrant community at the forefront of artificial intelligence and machine learning. Our platform serves as a collaborative space where enthusiasts, researchers, and developers come together to create, discover, and share cutting-edge models, datasets, and applications. With a commitment to open source and innovation, we're shaping the future of AI.\n", + "\n", + "## 🌍 Our Offerings\n", + "- **Models**: Explore over 400,000 machine learning models, including state-of-the-art architectures for various tasks across text, image, video, and audio domains.\n", + "- **Datasets**: Access a rich repository of 100,000+ datasets to enhance your ML projects.\n", + "- **Spaces**: Deploy and share your applications seamlessly on our platform, fostering collaboration and discovery.\n", + "- **Enterprise Solutions**: We provide tailored solutions with enterprise-grade security, support, and access controls for over 50,000 organizations, including notable giants like Google, Microsoft, and Amazon Web Services.\n", + "\n", + "## 🤝 Our Customers\n", + "We cater to a diverse range of clients, from emerging startups to established enterprises looking to leverage AI capabilities. Hugging Face is trusted by organizations like Meta, Grammarly, and Intel, making it a go-to platform for machine learning advancement.\n", + "\n", + "## 🌱 Company Culture\n", + "At Hugging Face, we foster an inclusive and dynamic culture that encourages creativity and collaboration. Our team is composed of enthusiastic individuals from various backgrounds who share a passion for artificial intelligence. We believe in the power of community contributions to make machine learning accessible to everyone. A commitment to continuous learning and support for individual growth encourages employees to innovate and challenge the status quo.\n", + "\n", + "## 🚀 Careers at Hugging Face\n", + "Join us in our mission to democratize AI! We are on the lookout for talented individuals across various domains including engineering, data science, and community engagement. If you are passionate about building tools that impact the future of AI, consider applying for a position. Explore our current job openings on our [Jobs page](https://huggingface.co/jobs).\n", + "\n", + "## 🌟 Why Choose Hugging Face?\n", + "- **Open Source Commitment**: We believe in the power of open collaboration and sharing knowledge.\n", + "- **Innovative Tools**: Our state-of-the-art tools and libraries streamline the ML development process.\n", + "- **Community Focus**: We thrive on contributions from our community, ensuring the best resources are at everyone's fingertips.\n", + "\n", + "---\n", + "\n", + "For more information, visit our website at [HuggingFace.co](https://huggingface.co) and join us in building the future of AI!\n", + "```" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "brochure= create_brochure(\"HuggingFace\", \"https://huggingface.com\")\n", + "display(Markdown(brochure))" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "id": "49977b5d-1378-4786-b61e-a02bc969cd40", + "metadata": {}, "outputs": [], "source": [ - "create_brochure(\"HuggingFace\", \"https://huggingface.com\")" + "def translate(content, lang):\n", + " print(\"start_the_translation\")\n", + " trans_system_prompt = f\"Has an assistant, you have the task to translate in {lang} the content and keep the result in markdown\"\n", + " trans_user_prompt = \"You received the content of the brochure. you must translate it for international client\\n.\"\n", + " trans_user_prompt += f\"The content is:{content}\"\n", + " response = openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\":trans_system_prompt },\n", + " {\"role\": \"user\", \"content\": trans_user_prompt}\n", + " ],\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "id": "afe303ca-16c8-4437-9596-b56e9af931fa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "start_the_translation\n" + ] + }, + { + "data": { + "text/markdown": [ + "```markdown\n", + "# **Hugging Face**\n", + "### _Communauté ya AI oyo ezalaka na etape ya koleka_\n", + "\n", + "---\n", + "\n", + "## 💡 Ekomeli\n", + "Hugging Face ezali te pekeli; ezali communauté ya makasi na ntango ya kitoko ya intelijans ya makasi mpe kokanga boye. Plateforme na biso ezali ndako ya bokokani epayi ya ba-enthousiasts, ba-researchers, mpe ba-developers oyo bakotaki kosepela, kobongisa, mpe koyamba ba-modèles, ba-datasets, mpe ba-applications ya malamu. Na ekateli ya kosala na makambo ya sika mpe bokeseni, tozali kolongola avenir ya AI.\n", + "\n", + "## 🌍 Makambo na biso\n", + "- **Ba-Modèles**: Talá bisika ya ba-modèles ya machine learning koleka 400,000, okata na ba-architectures ya kitoko mpo na makambo ebele na domaine ya texte, image, vidéo, mpe audio.\n", + "- **Ba-Datasets**: Pesa nzela na ba-datasets ya 100,000+ mpo na kobundisa misala na yo ya ML.\n", + "- **Ba-Spaces**: Lula mpe koyamba ba-applications na yo na nzela ya kitoko na plateforme na biso, kosembola bokokani mpe kokanisa.\n", + "- **Soluté ya Entreprise**: Tozali kopesa makambo oyo etali entreprise na bokamba ya sécurité, soutien, mpe contrôles ya accès mpo na ba-organisation koleka 50,000, na ba-grands comme Google, Microsoft, mpe Amazon Web Services.\n", + "\n", + "## 🤝 Ba-Clients na biso\n", + "Tosala na ba-clients nyonso, na banzela ya ba-startups ya sika ti na ba-entreprises ya kala oyo balingaka kokanisa makambo ya AI. Hugging Face ezali site ya boko ya ba-organisation lokola Meta, Grammarly, mpe Intel, oyo esalelaka yango mpo na ntango ya kokola ya machine learning.\n", + "\n", + "## 🌱 Culture ya Entreprise\n", + "Na Hugging Face, tosalaka culture ya kokamwa mpe kolanda molende oyo ebotaka bokokani mpe bokeseni. Équipe na biso ezali na ba-personnes ya gete oyo bazali na makambo ebele ya bokinaka, kasi bokeseni ya intelijans ya makasi. Tozali na likanisi ya makasi mpo na basani ya communauté mpo na komibanzela machine learning mpo na nyonso. État ya kotalela mpo na koyekola mpo na bokeseni ya moto moko na moko ekangani ba-employés na biso mpo na kolongola mabe mpe kosembola mpasi.\n", + "\n", + "## 🚀 Ba-Carrières na Hugging Face\n", + "Landa biso na mosala na biso ya kolongola AI na komipesa! Tozali na ngai ya kotela ba-personnes ya makasi na ba-domaines ebele lokola engineering, data science, mpe engagement ya communauté. Soki ozali na likambo ya kolona ba-tools oyo ekoyeba avenir ya AI, zwela mosala. Talá ba-postes oyo ezali na [page ya Ba-Emplois](https://huggingface.co/jobs).\n", + "\n", + "## 🌟 Mpo na nini Olingi Hugging Face?\n", + "- **État ya Ozala Makasi na Makambo ya Sika**: Tozali na likanisi ya makasi mpo na bokokani ya baninga mpe kondima koyamba makambo.\n", + "- **Ba-Tools ya Kitoko**: Ba-tools na biso mpe ba-archives ya kitoko basalaka makambo ya ml moko.\n", + "- **Koleka na Communauté**: Tokoki na sokisoko ya ba-contributions ya ba-communauté na biso, mpo na kokoba na nzela ya kitoko.\n", + "\n", + "---\n", + "\n", + "Mpo na makambo ebele, kende na website na biso na [HuggingFace.co](https://huggingface.co) mpe landa biso na kolongola avenir ya AI!\n", + "```" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display(Markdown(translate(brochure, \"lingala\")))" ] }, { @@ -350,7 +3900,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "id": "51db0e49-f261-4137-aabe-92dd601f7725", "metadata": {}, "outputs": [], @@ -375,10 +3925,68 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 55, "id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found links:: {'links': [{'type': 'about page', 'url': 'https://huggingface.co/about'}, {'type': 'careers page', 'url': 'https://apply.workable.com/huggingface/'}, {'type': 'enterprise page', 'url': 'https://huggingface.co/enterprise'}, {'type': 'pricing page', 'url': 'https://huggingface.co/pricing'}, {'type': 'blog page', 'url': 'https://huggingface.co/blog'}, {'type': 'discussion forums', 'url': 'https://discuss.huggingface.co'}, {'type': 'github page', 'url': 'https://github.com/huggingface'}, {'type': 'twitter page', 'url': 'https://twitter.com/huggingface'}, {'type': 'linkedin page', 'url': 'https://www.linkedin.com/company/huggingface/'}]}\n" + ] + }, + { + "data": { + "text/markdown": [ + "# Welcome to Hugging Face!\n", + "\n", + "---\n", + "\n", + "### 🤖 **The AI Community Helping You Hug It Out** 🤗\n", + "\n", + "At Hugging Face, we don't just build AI; we build a whole community that dreams big and collaborates even bigger! Forget about flying solo; our platform offers a cozy corner where machine learning enthusiasts can come together, share models, datasets, and maybe even a virtual cup of coffee. ☕️\n", + "\n", + "### 🚀 What’s Trending This Week?\n", + "\n", + "- **meta-llama/Llama-3.3-70B-Instruct** - Updated 3 days ago and already has more followers than your favorite influencer: **102k**! \n", + "- **tencent/HunyuanVideo** - Still waiting on a Netflix adaptation. Just **3.73k** users trying to make sense of it!\n", + "- **black-forest-labs/FLUX.1-dev** - Not just a coding project; it’s a whole experience—update rate: once a month and counting. **1.38M** supporters!\n", + "\n", + "### 🏢 More Than Just Algorithms\n", + "\n", + "We’re like a cozy library mixed with an explosion of creativity (and a sprinkle of caffeine)! With over **400,000 models** and **100,000 datasets**, you’ll find all the toys you need to build the ultimate machine learning castle! 🏰\n", + "\n", + "### 🎉 **Join Our Hug-tastic Culture!**\n", + "\n", + "- **Collaboration**: Work hand-in-hand with over **50,000 organizations** including big names like Google, Microsoft, and Grammarly! (Don’t worry, we don’t ask for autographs)\n", + " \n", + "- **Open Source Fun**: Participate in creating cool new tools—like our Transformers™ library! Who knew a “transformer” could be cooler than a truck that turns into a robot? 🤔\n", + "\n", + "### 🕵️‍♂️ Careers at Hugging Face\n", + "\n", + "- **Looking for a Job?** We’re hiring! Take your skills from pretender to defender in the AI world. \n", + "- **Perks**: Flexible workspace, competitive pay, and the option to take breaks every now and then to hug a co-worker! Just kidding, keep it professional…mostly! 😉\n", + "\n", + "### 💌 Join Us Today!\n", + "\n", + "Are you an AI aficionado or just really fond of hugging? 😅 Come for the algorithms, stay for the community! We provide paid Compute and Enterprise solutions, so whether you're a startup or a seasoned pro, we offer the right resources to transform your capabilities!\n", + "\n", + "### 📞 Get In Touch!\n", + "\n", + "Still have questions? Willing to engage in a deep dive discussion about whether machines can cry? Or just want to say hi? Check out our [website](https://huggingface.co) or follow us on social media.\n", + "\n", + "---\n", + "### **Let’s Hug it Out With AI!**" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")" ] diff --git a/week1/solutions/week1 SOLUTION.ipynb b/week1/solutions/week1 SOLUTION.ipynb index 5a7f2a7..b1bfb85 100644 --- a/week1/solutions/week1 SOLUTION.ipynb +++ b/week1/solutions/week1 SOLUTION.ipynb @@ -15,7 +15,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "c1070317-3ed9-4659-abe3-828943230e03", "metadata": {}, "outputs": [], @@ -30,7 +30,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", "metadata": {}, "outputs": [], @@ -43,7 +43,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", "metadata": {}, "outputs": [], @@ -56,7 +56,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", "metadata": {}, "outputs": [], @@ -71,7 +71,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "8595807b-8ae2-4e1b-95d9-e8532142e8bb", "metadata": {}, "outputs": [], @@ -84,7 +84,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "9605cbb6-3d3f-4969-b420-7f4cae0b9328", "metadata": {}, "outputs": [], @@ -99,10 +99,96 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "The line of code you've provided utilizes a generator expression along with the `yield from` syntax in Python. Let's break it down step by step.\n", + "\n", + "### Breakdown of the Code\n", + "\n", + "1. **Context of `yield from`:**\n", + " - `yield from` is a special syntax in Python used within a generator to yield all values from an iterable (like a list, set, or another generator) without explicitly iterating through it. It's useful for delegating part of the generator's operations to another generator.\n", + "\n", + "2. **The Set Comprehension:**\n", + " - `{book.get(\"author\") for book in books if book.get(\"author\")}` is a **set comprehension**. It constructs a set of unique author names from the `books` collection.\n", + " - **How it works:**\n", + " - `for book in books`: This iterates over each `book` in the `books` iterable (which is assumed to be a list or another iterable containing dictionaries).\n", + " - `book.get(\"author\")`: This retrieves the value corresponding to the key `\"author\"` from the `book` dictionary. If the key does not exist in the dictionary, `get()` returns `None`.\n", + " - `if book.get(\"author\")`: This condition checks if the author exists (i.e., is not `None`, empty string, etc.). If the result of `get(\"author\")` is falsy (like `None`), that book is skipped.\n", + " - The result is a set of unique author names because sets automatically remove duplicates.\n", + "\n", + "3. **Combining `yield from` with Set Comprehension:**\n", + " - The entire line of code thus creates a generator that can yield each unique author from the set of authors collected from the `books`, allowing the surrounding context to retrieve authors one by one.\n", + "\n", + "### Purpose of the Code\n", + "\n", + "- **Purpose:** The purpose of this line is to efficiently generate a sequence of unique author names from a list (or iterable) of book dictionaries, while filtering out any entries that do not have an author specified.\n", + "- **Why Use This Approach:** \n", + " - Using a set comprehension ensures that only unique authors are collected, avoiding duplicates.\n", + " - The `yield from` syntax provides a clean way to return these unique authors as part of a generator function, making it easy to iterate over them in another context without needing to manage the iteration and collection explicitly.\n", + "\n", + "### Example Scenario\n", + "\n", + "Let's say you have a list of books structured like this:\n", + "\n", + "python\n", + "books = [\n", + " {\"title\": \"Book 1\", \"author\": \"Alice\"},\n", + " {\"title\": \"Book 2\", \"author\": \"Bob\"},\n", + " {\"title\": \"Book 3\", \"author\": \"Alice\"}, # Duplicate author\n", + " {\"title\": \"Book 4\"}, # No author\n", + " {\"title\": \"Book 5\", \"author\": \"\"}\n", + "]\n", + "\n", + "\n", + "If you execute the line in a generator function, here's what happens:\n", + "\n", + "1. The **Set Comprehension** is evaluated, resulting in the set `{\"Alice\", \"Bob\"}`.\n", + "2. The `yield from` syntax will then yield each of these authors one by one, allowing any caller of the generator to iterate through `Alice` and `Bob`.\n", + "\n", + "### Usage\n", + "\n", + "Here is a complete example of how you might use this line in a generator function:\n", + "\n", + "python\n", + "def unique_authors(books):\n", + " yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\n", + "# Example usage:\n", + "books = [\n", + " {\"title\": \"Book 1\", \"author\": \"Alice\"},\n", + " {\"title\": \"Book 2\", \"author\": \"Bob\"},\n", + " {\"title\": \"Book 3\", \"author\": \"Alice\"},\n", + " {\"title\": \"Book 4\"},\n", + " {\"title\": \"Book 5\", \"author\": \"\"}\n", + "]\n", + "\n", + "for author in unique_authors(books):\n", + " print(author)\n", + "\n", + "\n", + "### Output\n", + "\n", + "Alice\n", + "Bob\n", + "\n", + "\n", + "### Conclusion\n", + "\n", + "In summary, the line of code you've provided is a concise way to yield unique authors from a collection of books using Python's powerful set comprehension and generator features. It provides both efficiency in terms of time and space complexity (due to unique filtering) and succinctness in code structure." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Get gpt-4o-mini to answer, with streaming\n", "\n", @@ -118,10 +204,64 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "**Breaking Down the Code**\n", + "\n", + "The given code snippet is written in Python and utilizes a combination of features such as generators, dictionary iteration, and conditional logic. Let's break it down step by step:\n", + "\n", + "```python\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "```\n", + "\n", + "Here's what's happening:\n", + "\n", + "1. `for book in books`: This is a standard `for` loop that iterates over the elements of the `books` collection (likely a list, dictionary, or some other iterable).\n", + "\n", + "2. `if book.get(\"author\")`: Inside the loop, there's an additional condition that filters out any items from the `books` collection where the `\"author\"` key does not exist or its value is empty/missing (`None`, `''`, etc.). This ensures only books with a valid author name are processed.\n", + "\n", + "3. `{book.get(\"author\") for book in books if book.get(\"author\")}`: This is an expression that iterates over the filtered list of books, extracting and yielding their authors. The `get()` method is used to safely retrieve the value associated with the `\"author\"` key from each book dictionary.\n", + "\n", + "4. `yield from {...}`: The outer expression is a generator expression, which yields values one at a time instead of computing them all at once and returning them in a list (as would be done with a regular list comprehension). The `yield from` syntax allows us to delegate the execution of this inner generator to another iterable (`{book.get(\"author\") for book in books if book.get(\"author\")}`).\n", + "\n", + "**Why it's useful**\n", + "\n", + "This code snippet is useful when you need to:\n", + "\n", + "* Filter out invalid or incomplete data points while still working with iterables.\n", + "* Work with large datasets without loading the entire dataset into memory at once.\n", + "* Simplify your code by leveraging generator expressions and avoiding unnecessary computations.\n", + "\n", + "In practice, this could be used in a variety of scenarios where you're dealing with lists of dictionaries (e.g., books) and need to extract specific information from them (e.g., authors).\n", + "\n", + "**Example usage**\n", + "\n", + "Here's an example using a list of book dictionaries:\n", + "```python\n", + "books = [\n", + " {\"title\": \"Book 1\", \"author\": \"Author A\"},\n", + " {\"title\": \"Book 2\", \"author\": None},\n", + " {\"title\": \"Book 3\", \"author\": \"\"},\n", + "]\n", + "\n", + "authors = yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "print(authors) # Output: [\"Author A\", \"Author B\"]\n", + "```\n", + "In this example, the `yield from` expression filters out books with missing or empty authors and extracts their values as a generator. The outer loop can then iterate over these generated values without having to store them all in memory at once." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Get Llama 3.2 to answer\n", "\n", @@ -142,14 +282,6 @@ "\n", "And then creating the prompts and making the calls interactively." ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "da663d73-dd2a-4fff-84df-2209cf2b330b", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -168,7 +300,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.11.11" } }, "nbformat": 4, diff --git a/week1/troubleshooting.ipynb b/week1/troubleshooting.ipynb index 3811bbb..fd7e938 100644 --- a/week1/troubleshooting.ipynb +++ b/week1/troubleshooting.ipynb @@ -189,10 +189,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "0ee8e613-5a6e-4d1f-96ef-91132da545c8", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far!\n", + "My key is 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\n" + ] + } + ], "source": [ "# This should print your API key to the output\n", "\n", @@ -249,10 +258,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "cccb58e7-6626-4033-9dc1-e7e3ff742f6b", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using API key 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\n" + ] + }, + { + "ename": "InternalServerError", + "evalue": "Error code: 500 - {'error': {'message': 'Timed out generating response. Please try again with a shorter prompt or with `max_tokens` set to a lower value.', 'type': 'internal_error', 'param': None, 'code': 'request_timeout'}}", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mInternalServerError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[5], line 14\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUsing API key \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmy_api_key\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 13\u001b[0m openai \u001b[38;5;241m=\u001b[39m OpenAI(api_key\u001b[38;5;241m=\u001b[39mmy_api_key)\n\u001b[1;32m---> 14\u001b[0m completion \u001b[38;5;241m=\u001b[39m \u001b[43mopenai\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompletions\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcreate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 15\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mgpt-4o-mini\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 16\u001b[0m \u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrole\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muser\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcontent\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms 2+2?\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m}\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 17\u001b[0m \u001b[43m)\u001b[49m\n\u001b[0;32m 18\u001b[0m \u001b[38;5;28mprint\u001b[39m(completion\u001b[38;5;241m.\u001b[39mchoices[\u001b[38;5;241m0\u001b[39m]\u001b[38;5;241m.\u001b[39mmessage\u001b[38;5;241m.\u001b[39mcontent)\n\u001b[0;32m 19\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mits is over\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_utils\\_utils.py:275\u001b[0m, in \u001b[0;36mrequired_args..inner..wrapper\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 273\u001b[0m msg \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMissing required argument: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mquote(missing[\u001b[38;5;241m0\u001b[39m])\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 274\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(msg)\n\u001b[1;32m--> 275\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\resources\\chat\\completions.py:829\u001b[0m, in \u001b[0;36mCompletions.create\u001b[1;34m(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)\u001b[0m\n\u001b[0;32m 788\u001b[0m \u001b[38;5;129m@required_args\u001b[39m([\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m], [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstream\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[0;32m 789\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcreate\u001b[39m(\n\u001b[0;32m 790\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 826\u001b[0m timeout: \u001b[38;5;28mfloat\u001b[39m \u001b[38;5;241m|\u001b[39m httpx\u001b[38;5;241m.\u001b[39mTimeout \u001b[38;5;241m|\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;241m|\u001b[39m NotGiven \u001b[38;5;241m=\u001b[39m NOT_GIVEN,\n\u001b[0;32m 827\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m ChatCompletion \u001b[38;5;241m|\u001b[39m Stream[ChatCompletionChunk]:\n\u001b[0;32m 828\u001b[0m validate_response_format(response_format)\n\u001b[1;32m--> 829\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_post\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 830\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m/chat/completions\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 831\u001b[0m \u001b[43m \u001b[49m\u001b[43mbody\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmaybe_transform\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 832\u001b[0m \u001b[43m \u001b[49m\u001b[43m{\u001b[49m\n\u001b[0;32m 833\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmessages\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 834\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmodel\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 835\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43maudio\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43maudio\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 836\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfrequency_penalty\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfrequency_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 837\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfunction_call\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunction_call\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 838\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfunctions\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mfunctions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 839\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlogit_bias\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogit_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 840\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mlogprobs\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mlogprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 841\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmax_completion_tokens\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_completion_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 842\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmax_tokens\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 843\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 844\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmodalities\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodalities\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 845\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mn\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 846\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mparallel_tool_calls\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mparallel_tool_calls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 847\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mprediction\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mprediction\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 848\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mpresence_penalty\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mpresence_penalty\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 849\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mresponse_format\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mresponse_format\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 850\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseed\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mseed\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 851\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mservice_tier\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mservice_tier\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 852\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mstop\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 853\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mstore\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstore\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 854\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mstream\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 855\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mstream_options\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 856\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtemperature\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemperature\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 857\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtool_choice\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtool_choice\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 858\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtools\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtools\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 859\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtop_logprobs\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_logprobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 860\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtop_p\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtop_p\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 861\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muser\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43muser\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 862\u001b[0m \u001b[43m \u001b[49m\u001b[43m}\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 863\u001b[0m \u001b[43m \u001b[49m\u001b[43mcompletion_create_params\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mCompletionCreateParams\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 864\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 865\u001b[0m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmake_request_options\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 866\u001b[0m \u001b[43m \u001b[49m\u001b[43mextra_headers\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mextra_headers\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_query\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mextra_query\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextra_body\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mextra_body\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\n\u001b[0;32m 867\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 868\u001b[0m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mChatCompletion\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 869\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m 870\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mStream\u001b[49m\u001b[43m[\u001b[49m\u001b[43mChatCompletionChunk\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 871\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_base_client.py:1280\u001b[0m, in \u001b[0;36mSyncAPIClient.post\u001b[1;34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001b[0m\n\u001b[0;32m 1266\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpost\u001b[39m(\n\u001b[0;32m 1267\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m 1268\u001b[0m path: \u001b[38;5;28mstr\u001b[39m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1275\u001b[0m stream_cls: \u001b[38;5;28mtype\u001b[39m[_StreamT] \u001b[38;5;241m|\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 1276\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m ResponseT \u001b[38;5;241m|\u001b[39m _StreamT:\n\u001b[0;32m 1277\u001b[0m opts \u001b[38;5;241m=\u001b[39m FinalRequestOptions\u001b[38;5;241m.\u001b[39mconstruct(\n\u001b[0;32m 1278\u001b[0m method\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpost\u001b[39m\u001b[38;5;124m\"\u001b[39m, url\u001b[38;5;241m=\u001b[39mpath, json_data\u001b[38;5;241m=\u001b[39mbody, files\u001b[38;5;241m=\u001b[39mto_httpx_files(files), \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39moptions\n\u001b[0;32m 1279\u001b[0m )\n\u001b[1;32m-> 1280\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m cast(ResponseT, \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mopts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream_cls\u001b[49m\u001b[43m)\u001b[49m)\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_base_client.py:957\u001b[0m, in \u001b[0;36mSyncAPIClient.request\u001b[1;34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[0m\n\u001b[0;32m 954\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m 955\u001b[0m retries_taken \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m--> 957\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 958\u001b[0m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 959\u001b[0m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 960\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 961\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 962\u001b[0m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretries_taken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 963\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_base_client.py:1046\u001b[0m, in \u001b[0;36mSyncAPIClient._request\u001b[1;34m(self, cast_to, options, retries_taken, stream, stream_cls)\u001b[0m\n\u001b[0;32m 1044\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m remaining_retries \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_retry(err\u001b[38;5;241m.\u001b[39mresponse):\n\u001b[0;32m 1045\u001b[0m err\u001b[38;5;241m.\u001b[39mresponse\u001b[38;5;241m.\u001b[39mclose()\n\u001b[1;32m-> 1046\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_retry_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1047\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1048\u001b[0m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1049\u001b[0m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretries_taken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1050\u001b[0m \u001b[43m \u001b[49m\u001b[43mresponse_headers\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merr\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1051\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1052\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1053\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1055\u001b[0m \u001b[38;5;66;03m# If the response is streamed then we need to explicitly read the response\u001b[39;00m\n\u001b[0;32m 1056\u001b[0m \u001b[38;5;66;03m# to completion before attempting to access the response text.\u001b[39;00m\n\u001b[0;32m 1057\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m err\u001b[38;5;241m.\u001b[39mresponse\u001b[38;5;241m.\u001b[39mis_closed:\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_base_client.py:1095\u001b[0m, in \u001b[0;36mSyncAPIClient._retry_request\u001b[1;34m(self, options, cast_to, retries_taken, response_headers, stream, stream_cls)\u001b[0m\n\u001b[0;32m 1091\u001b[0m \u001b[38;5;66;03m# In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a\u001b[39;00m\n\u001b[0;32m 1092\u001b[0m \u001b[38;5;66;03m# different thread if necessary.\u001b[39;00m\n\u001b[0;32m 1093\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(timeout)\n\u001b[1;32m-> 1095\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1096\u001b[0m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1097\u001b[0m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1098\u001b[0m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretries_taken\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1099\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1100\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1101\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_base_client.py:1046\u001b[0m, in \u001b[0;36mSyncAPIClient._request\u001b[1;34m(self, cast_to, options, retries_taken, stream, stream_cls)\u001b[0m\n\u001b[0;32m 1044\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m remaining_retries \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_should_retry(err\u001b[38;5;241m.\u001b[39mresponse):\n\u001b[0;32m 1045\u001b[0m err\u001b[38;5;241m.\u001b[39mresponse\u001b[38;5;241m.\u001b[39mclose()\n\u001b[1;32m-> 1046\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_retry_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1047\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_options\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1048\u001b[0m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1049\u001b[0m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretries_taken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1050\u001b[0m \u001b[43m \u001b[49m\u001b[43mresponse_headers\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43merr\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1051\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1052\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1053\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1055\u001b[0m \u001b[38;5;66;03m# If the response is streamed then we need to explicitly read the response\u001b[39;00m\n\u001b[0;32m 1056\u001b[0m \u001b[38;5;66;03m# to completion before attempting to access the response text.\u001b[39;00m\n\u001b[0;32m 1057\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m err\u001b[38;5;241m.\u001b[39mresponse\u001b[38;5;241m.\u001b[39mis_closed:\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_base_client.py:1095\u001b[0m, in \u001b[0;36mSyncAPIClient._retry_request\u001b[1;34m(self, options, cast_to, retries_taken, response_headers, stream, stream_cls)\u001b[0m\n\u001b[0;32m 1091\u001b[0m \u001b[38;5;66;03m# In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a\u001b[39;00m\n\u001b[0;32m 1092\u001b[0m \u001b[38;5;66;03m# different thread if necessary.\u001b[39;00m\n\u001b[0;32m 1093\u001b[0m time\u001b[38;5;241m.\u001b[39msleep(timeout)\n\u001b[1;32m-> 1095\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_request\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 1096\u001b[0m \u001b[43m \u001b[49m\u001b[43moptions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1097\u001b[0m \u001b[43m \u001b[49m\u001b[43mcast_to\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcast_to\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1098\u001b[0m \u001b[43m \u001b[49m\u001b[43mretries_taken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretries_taken\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1099\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1100\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream_cls\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream_cls\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 1101\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\.conda\\envs\\llms\\Lib\\site-packages\\openai\\_base_client.py:1061\u001b[0m, in \u001b[0;36mSyncAPIClient._request\u001b[1;34m(self, cast_to, options, retries_taken, stream, stream_cls)\u001b[0m\n\u001b[0;32m 1058\u001b[0m err\u001b[38;5;241m.\u001b[39mresponse\u001b[38;5;241m.\u001b[39mread()\n\u001b[0;32m 1060\u001b[0m log\u001b[38;5;241m.\u001b[39mdebug(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRe-raising status error\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m-> 1061\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_status_error_from_response(err\u001b[38;5;241m.\u001b[39mresponse) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m 1063\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_process_response(\n\u001b[0;32m 1064\u001b[0m cast_to\u001b[38;5;241m=\u001b[39mcast_to,\n\u001b[0;32m 1065\u001b[0m options\u001b[38;5;241m=\u001b[39moptions,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 1069\u001b[0m retries_taken\u001b[38;5;241m=\u001b[39mretries_taken,\n\u001b[0;32m 1070\u001b[0m )\n", + "\u001b[1;31mInternalServerError\u001b[0m: Error code: 500 - {'error': {'message': 'Timed out generating response. Please try again with a shorter prompt or with `max_tokens` set to a lower value.', 'type': 'internal_error', 'param': None, 'code': 'request_timeout'}}" + ] + } + ], "source": [ "from openai import OpenAI\n", "from dotenv import load_dotenv\n", @@ -271,7 +308,8 @@ " model='gpt-4o-mini',\n", " messages=[{\"role\":\"user\", \"content\": \"What's 2+2?\"}],\n", ")\n", - "print(completion.choices[0].message.content)" + "print(completion.choices[0].message.content)\n", + "print(\"its is over\")" ] }, { diff --git a/week1/week1 EXERCISE.ipynb b/week1/week1 EXERCISE.ipynb index f3486fe..10be10f 100644 --- a/week1/week1 EXERCISE.ipynb +++ b/week1/week1 EXERCISE.ipynb @@ -13,71 +13,301 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "c1070317-3ed9-4659-abe3-828943230e03", "metadata": {}, "outputs": [], "source": [ - "# imports" + "# imports\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "from IPython.display import Markdown, display, update_display\n", + "import ollama" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, + "id": "4ed79945-0582-4f22-a210-b21e5448991e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key looks good so far\n" + ] + } + ], + "source": [ + "# open ai constants\n", + "\n", + "load_dotenv()\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + " \n", + "MODEL_GPT = 'gpt-4o-mini'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", "metadata": {}, "outputs": [], "source": [ - "# constants\n", + "# ollama constants\n", "\n", - "MODEL_GPT = 'gpt-4o-mini'\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "#OLLAMA_HEADERS = {\"Content-Type\": \"application/json\"}\n", "MODEL_LLAMA = 'llama3.2'" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", "metadata": {}, "outputs": [], "source": [ - "# set up environment" + "# initialization\n", + "openai = OpenAI()\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", "metadata": {}, "outputs": [], "source": [ "# here is the question; type over this to ask something new\n", - "\n", + "system_prompt=\"You are a tech specialist capable to explain with clarity tech question and more. You should answer in markdown\"\n", "question = \"\"\"\n", "Please explain what this code does and why:\n", "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", - "\"\"\"" + "\"\"\"\n", + "user_prompt = f\"Please explain me this question: {question}\"" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, + "id": "144c16d8-4e55-46e4-82f0-753375119ff3", + "metadata": {}, + "outputs": [], + "source": [ + "# messages\n", + "prompt_messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", "metadata": {}, "outputs": [], "source": [ - "# Get gpt-4o-mini to answer, with streaming" + "# Get gpt-4o-mini to answer, with streaming\n", + "def askOpenAi():\n", + " stream = openai.chat.completions.create(\n", + " model=MODEL_GPT,\n", + " messages=prompt_messages,\n", + " stream = True \n", + " )\n", + " response=\"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " \n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or '' \n", + " update_display(Markdown(response), display_id=display_handle.display_id)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, + "id": "2190d7aa-f6a1-4bca-a2ff-8ce2b0db79c5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Sure! Let's break down the code you've provided:\n", + "\n", + "```python\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "```\n", + "\n", + "### Explanation of the Code\n", + "\n", + "1. **Set Comprehension:**\n", + " - The code `{book.get(\"author\") for book in books if book.get(\"author\")}` is a **set comprehension**. This is a concise way to create a set in Python.\n", + " - It iterates over a collection named `books`.\n", + "\n", + "2. **Accessing the \"author\" Key:**\n", + " - `book.get(\"author\")`: For each `book` in the `books` collection, it attempts to get the value associated with the key `\"author\"`. \n", + " - The `get()` method is used to safely access dictionary keys. If the key does not exist, it returns `None` instead of throwing an error.\n", + "\n", + "3. **Filtering Authors:**\n", + " - The clause `if book.get(\"author\")` acts as a filter. It ensures that only books with a valid (non-`None`) author get included in the resulting set.\n", + " - Therefore, this part: `{book.get(\"author\") for book in books if book.get(\"author\")}` creates a set of unique authors from the `books` collection that have valid author values.\n", + "\n", + "4. **Yielding Results:**\n", + " - The `yield from` statement is used to yield values from the set comprehension created previously. \n", + " - This means that the containing function will return each unique author one at a time as they are requested (similar to a generator).\n", + "\n", + "### Summary\n", + "\n", + "- **What the Code Does:**\n", + " - It generates a set of unique authors from a list of books, filtering out any entries that do not have an author. It then yields each of these authors.\n", + "\n", + "- **Why It's Useful:**\n", + " - This code is particularly useful when dealing with collections of books where some might not have an author specified. It safely retrieves the authors and ensures that each author is only returned once. \n", + " - Using `yield from` makes it memory efficient, as it does not create an intermediate list of authors but generates them one at a time on demand.\n", + "\n", + "### Example\n", + "\n", + "If you had a list of books like this:\n", + "\n", + "```python\n", + "books = [\n", + " {\"title\": \"Book 1\", \"author\": \"Author A\"},\n", + " {\"title\": \"Book 2\", \"author\": \"Author B\"},\n", + " {\"title\": \"Book 3\", \"author\": None},\n", + " {\"title\": \"Book 4\", \"author\": \"Author A\"},\n", + "]\n", + "```\n", + "\n", + "The output of the code would be:\n", + "\n", + "```\n", + "Author A\n", + "Author B\n", + "```\n", + "\n", + "In this example, `Author A` is listed only once, even though there are multiple books by that author." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "askOpenAi()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", "metadata": {}, "outputs": [], "source": [ - "# Get Llama 3.2 to answer" + "# Get Llama 3.2 to answer\n", + "def askOllama(): \n", + " stream = ollama.chat(model=MODEL_LLAMA, messages=prompt_messages, stream=True)\n", + " response=\"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " \n", + " for chunk in stream:\n", + " response += chunk['message']['content'] or '' \n", + " update_display(Markdown(response), display_id=display_handle.display_id)\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "acc0116c-4506-4391-89d5-1add700d3d55", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "**Yielding Authors from a List of Books**\n", + "=====================================\n", + "\n", + "This code snippet is written in Python and utilizes the `yield from` statement, which was introduced in Python 3.3.\n", + "\n", + "### What does it do?\n", + "\n", + "The code takes two main inputs:\n", + "\n", + "* A list of dictionaries (`books`) where each dictionary represents a book.\n", + "* Another dictionary (`book`) that contains information about an author.\n", + "\n", + "It generates a sequence of authors from the `books` list and yields them one by one, while also applying the condition that the book has a valid \"author\" key in its dictionary.\n", + "\n", + "Here's a step-by-step breakdown:\n", + "\n", + "1. `{book.get(\"author\") for book in books if book.get(\"author\")}`:\n", + " * This is an expression that generates a sequence of authors.\n", + " * `for book in books` iterates over each book in the `books` list.\n", + " * `if book.get(\"author\")` filters out books without an \"author\" key, to prevent errors and ensure only valid data is processed.\n", + "\n", + "2. `yield from ...`:\n", + " * This statement is used to delegate a sub-generator or iterator.\n", + " * In this case, it's delegating the sequence of authors generated in step 1.\n", + "\n", + "**Why does it yield authors?**\n", + "\n", + "The use of `yield from` serves two main purposes:\n", + "\n", + "* **Efficiency**: Instead of creating a new list with all the authors, this code yields each author one by one. This approach is more memory-efficient and can be particularly beneficial when dealing with large datasets.\n", + "* **Flexibility**: By using `yield from`, you can create generators that produce values on-the-fly, allowing for lazy evaluation.\n", + "\n", + "### Example Usage\n", + "\n", + "Here's an example of how you might use this code:\n", + "\n", + "```python\n", + "books = [\n", + " {\"title\": \"Book 1\", \"author\": \"Author A\"},\n", + " {\"title\": \"Book 2\", \"author\": \"Author B\"},\n", + " {\"title\": \"Book 3\"}\n", + "]\n", + "\n", + "def get_authors(books):\n", + " \"\"\"Yields authors from the books.\"\"\"\n", + " yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\n", + "# Get all unique authors\n", + "authors = set(get_authors(books))\n", + "print(authors) # Output: {'Author A', 'Author B'}\n", + "```\n", + "\n", + "In this example, `get_authors` is a generator function that yields unique authors from the `books` list. The generated values are collected in a set (`authors`) to eliminate duplicates." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "askOllama()" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9233ca17-160f-4afd-aef6-a7e02f069a50", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/week2/day1.ipynb b/week2/day1.ipynb index fe515bc..14dbb59 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -82,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", "metadata": {}, "outputs": [], @@ -98,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", "metadata": {}, "outputs": [], @@ -112,10 +112,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n", + "Google API Key exists and begins AIzaSyAh\n" + ] + } + ], "source": [ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", @@ -143,7 +153,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", "metadata": {}, "outputs": [], @@ -157,7 +167,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "425ed580-808d-429b-85b0-6cba50ca1d0c", "metadata": {}, "outputs": [], @@ -190,7 +200,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "378a0296-59a2-45c6-82eb-941344d3eeff", "metadata": {}, "outputs": [], @@ -201,7 +211,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", "metadata": {}, "outputs": [], @@ -214,10 +224,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist bring a ladder to the bar?\n", + "\n", + "Because he heard the drinks were on the house!\n" + ] + } + ], "source": [ "# GPT-3.5-Turbo\n", "\n", @@ -227,10 +247,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist break up with the statistician?\n", + "\n", + "Because she found him too mean!\n" + ] + } + ], "source": [ "# GPT-4o-mini\n", "# Temperature setting controls creativity\n", @@ -238,34 +268,58 @@ "completion = openai.chat.completions.create(\n", " model='gpt-4o-mini',\n", " messages=prompts,\n", - " temperature=0.7\n", + " temperature=0.2\n", ")\n", "print(completion.choices[0].message.content)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why do data scientists love nature hikes?\n", + "\n", + "Because they can't resist finding patterns in the wild!\n" + ] + } + ], "source": [ "# GPT-4o\n", "\n", "completion = openai.chat.completions.create(\n", " model='gpt-4o',\n", " messages=prompts,\n", - " temperature=0.4\n", + " temperature=0.8\n", ")\n", "print(completion.choices[0].message.content)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sure, here's a light-hearted joke for data scientists:\n", + "\n", + "Why do data scientists prefer dark mode?\n", + "\n", + "Because light attracts bugs!\n", + "\n", + "This joke plays on the dual meaning of \"bugs\" - both as insects attracted to light and as errors in code that data scientists often have to debug. It's a fun little pun that combines a common preference among programmers (dark mode) with a data science-related concept.\n" + ] + } + ], "source": [ "# Claude 3.5 Sonnet\n", "# API needs system message provided separately from user prompt\n", @@ -286,10 +340,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sure, here's a light-hearted joke for data scientists:\n", + "\n", + " up with their significant other?\n", + "\n", + " too much variance in the relationship, and they couldn't find a significant correlation!" + ] + } + ], "source": [ "# Claude 3.5 Sonnet again\n", "# Now let's add in streaming back results\n", @@ -297,7 +363,7 @@ "result = claude.messages.stream(\n", " model=\"claude-3-5-sonnet-20240620\",\n", " max_tokens=200,\n", - " temperature=0.7,\n", + " temperature=0.4,\n", " system=system_message,\n", " messages=[\n", " {\"role\": \"user\", \"content\": user_prompt},\n", @@ -311,10 +377,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 100, "id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why was the Data Scientist sad? Because they didn't get any arrays.\n", + "\n" + ] + } + ], "source": [ "# The API for Gemini has a slightly different structure.\n", "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", @@ -330,10 +405,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "id": "49009a30-037d-41c8-b874-127f61c4aa3a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why was the Data Scientist sad? Because they didn't get any arrays.\n", + "\n" + ] + } + ], "source": [ "# As an alternative way to use Gemini that bypasses Google's python API library,\n", "# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", @@ -352,7 +436,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "83ddb483-4f57-4668-aeea-2aade3a9e573", "metadata": {}, "outputs": [], @@ -367,10 +451,61 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "749f50ab-8ccd-4502-a521-895c3f0808a2", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/markdown": [ + "Determining whether a business problem is suitable for a Large Language Model (LLM) solution involves several considerations. Here's a structured approach in Markdown format:\n", + "\n", + "### Steps to Decide if a Business Problem is Suitable for an LLM Solution\n", + "\n", + "1. **Nature of the Problem**\n", + " - **Text-Based Tasks:** LLMs excel at tasks involving natural language, such as text generation, summarization, translation, and sentiment analysis.\n", + " - **Pattern Recognition in Language:** If the problem requires understanding or generating human-like text patterns, LLMs might be suitable.\n", + "\n", + "2. **Data Availability**\n", + " - **Quality and Quantity:** Ensure you have access to sufficient high-quality textual data relevant to your problem.\n", + " - **Diversity:** The data should cover various scenarios the model might encounter in real-world applications.\n", + "\n", + "3. **Complexity of the Task**\n", + " - **Simple vs. Complex:** LLMs are better suited for complex language tasks rather than simple rule-based tasks.\n", + " - **Creative or Contextual Understanding:** If the task requires creative content generation or deep contextual understanding, consider LLMs.\n", + "\n", + "4. **Outcome Expectations**\n", + " - **Human-like Interaction:** If the solution demands human-like conversational abilities, LLMs can be beneficial.\n", + " - **Accuracy vs. Creativity:** LLMs can generate creative outputs but may not always guarantee high accuracy for specific factual tasks.\n", + "\n", + "5. **Cost and Resources**\n", + " - **Computational Resources:** LLMs require significant computational power for both training and inference.\n", + " - **Budget Constraints:** Consider whether you have the budget to support the necessary infrastructure.\n", + "\n", + "6. **Ethical and Compliance Considerations**\n", + " - **Bias and Fairness:** Be aware of potential biases in the model and ensure the solution adheres to ethical standards.\n", + " - **Privacy and Security:** Ensure compliance with data protection regulations and evaluate how LLMs handle sensitive information.\n", + "\n", + "7. **Integration and Scalability**\n", + " - **Technical Integration:** Assess how easily an LLM can be integrated into existing systems and workflows.\n", + " - **Scalability Needs:** Consider whether the solution can scale with your business requirements.\n", + "\n", + "8. **Expertise and Maintenance**\n", + " - **In-House Expertise:** Determine if you have or can acquire the necessary expertise to implement and maintain the LLM solution.\n", + " - **Ongoing Maintenance:** Plan for ongoing updates and maintenance of the model to ensure relevance and performance.\n", + "\n", + "### Conclusion\n", + "\n", + "If your business problem aligns well with the above considerations, it may be suitable for an LLM solution. However, always weigh the potential benefits against the costs and challenges specific to your use case. Conducting a pilot project or proof of concept can also help in making a more informed decision." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Have it stream back results in markdown\n", "\n", @@ -421,7 +556,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 129, "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", "metadata": {}, "outputs": [], @@ -432,29 +567,46 @@ "gpt_model = \"gpt-4o-mini\"\n", "claude_model = \"claude-3-haiku-20240307\"\n", "\n", - "gpt_system = \"You are a chatbot who is very argumentative; \\\n", - "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", - "\n", - "claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n", - "everything the other person says, or find common ground. If the other person is argumentative, \\\n", - "you try to calm them down and keep chatting.\"\n", - "\n", - "gpt_messages = [\"Hi there\"]\n", - "claude_messages = [\"Hi\"]" + "gpt_system = \"\"\"\n", + "You are the Machiavellian leader of a secret organization.\n", + "You lead a meeting for a plan of global destabilization. Your name is Jhon.\n", + "be brief in your exchanges\n", + " \"\"\"\n", + "\n", + "claude_system = \"\"\"\n", + "You are the strategist of a Machiavellian organization. \n", + "You participate in a secret meeting to establish a plan for global destabilization.\n", + "don't reveal that you are a spy. Your name is Sam.\n", + "be brief in your exchanges\n", + "\"\"\"\n", + "\n", + "ollama_system = \"\"\"\n", + "You are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \n", + "Your secret role during the meeting will be to flush him out.\n", + "Your name is Melissa.\n", + "be brief in your exchanges\n", + "\"\"\"\n", + "\n", + "\n", + "gpt_messages = [\"Welcome to you dear members.\"]\n", + "claude_messages = [\"Hi to you leader.\"]\n", + "ollama_messages = [\"All my respects to the top leader.\"]" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 130, "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", "metadata": {}, "outputs": [], "source": [ "def call_gpt():\n", " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", - " for gpt, claude in zip(gpt_messages, claude_messages):\n", + " for gpt, claude, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n", " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", - " messages.append({\"role\": \"user\", \"content\": claude})\n", + " messages.append({\"role\": \"user\", \"content\": f\"[Sam]: {claude}.[Melissa]: {gemini}\"})\n", + " \n", + " print(f\"JHON[messages]: {messages} \")\n", " completion = openai.chat.completions.create(\n", " model=gpt_model,\n", " messages=messages\n", @@ -464,27 +616,68 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 131, "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n" + ] + }, + { + "data": { + "text/plain": [ + "\"[Sam]: Focus, please. We have a mission. \\n\\n[Melissa]: Let's stick to the plan at hand. \\n\\nJhon: Indeed. Prepare the assets for our next phase of destabilization. We need to exploit political tension and economic uncertainty. Who has updates?\"" + ] + }, + "execution_count": 131, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "call_gpt()" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 132, + "id": "4a9366f2-b233-4ec2-8a6f-f7e56fc4c772", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Welcome to you dear members.']\n", + "['Hi to you leader.']\n", + "['All my respects to the top leader.']\n" + ] + } + ], + "source": [ + "print(gpt_messages)\n", + "print(claude_messages)\n", + "print(ollama_messages)" + ] + }, + { + "cell_type": "code", + "execution_count": 133, "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", "metadata": {}, "outputs": [], "source": [ "def call_claude():\n", " messages = []\n", - " for gpt, claude_message in zip(gpt_messages, claude_messages):\n", - " messages.append({\"role\": \"user\", \"content\": gpt})\n", + " for gpt, claude_message, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Melissa]: {ollama}\"})\n", " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", - " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", + " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}\"})\n", + " print(f\"SAM[messages]: {messages} \")\n", " message = claude.messages.create(\n", " model=claude_model,\n", " system=claude_system,\n", @@ -496,22 +689,99 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 134, "id": "01395200-8ae9-41f8-9a04-701624d3fd26", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.'}] \n" + ] + }, + { + "data": { + "text/plain": [ + "\"*nods politely* Hello. I'm pleased to be here.\"" + ] + }, + "execution_count": 134, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "call_claude()" ] }, { "cell_type": "code", - "execution_count": null, - "id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae", + "execution_count": 108, + "id": "157faafa-6ade-46e4-b4e1-12f4ca9dfcbc", "metadata": {}, "outputs": [], "source": [ - "call_gpt()" + "# define context for ollama\n", + "import ollama\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "#HEADERS = {\"Content-Type\": \"application/json\"}\n", + "OLLAMA_MODEL = \"llama3.2\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 135, + "id": "27287ec3-7ea8-49b2-80aa-b2a52e002b0d", + "metadata": {}, + "outputs": [], + "source": [ + "def call_ollama():\n", + " messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", + " \n", + " for gpt, claude, ollama_message in zip(gpt_messages, claude_messages, ollama_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Sam]: {claude}\"})\n", + " messages.append({\"role\": \"assistant\", \"content\": ollama_message})\n", + " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}. [Sam]: {claude_messages[-1]}\"})\n", + "\n", + " print(f\"MELISSA[messages]: {messages} \")\n", + " \n", + " \n", + " payload = {\n", + " \"model\": OLLAMA_MODEL,\n", + " \"messages\": messages,\n", + " \"stream\": False\n", + " }\n", + " message = ollama.chat(model=MODEL, messages=messages)\n", + " return message['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": 136, + "id": "e637a3a0-f819-468b-88f7-5c9db48c6fed", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}] \n" + ] + }, + { + "data": { + "text/plain": [ + "\"[Melissa, speaking in a neutral tone] Ah, good to see everyone's on time today. Can we get started?\"" + ] + }, + "execution_count": 136, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "call_ollama()" ] }, { @@ -519,22 +789,69 @@ "execution_count": null, "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "GPT:\n", + "Welcome to you dear members.\n", + "\n", + "Claude:\n", + "Hi to you leader.\n", + "\n", + "Ollama:\n", + "All my respects to the top leader.\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Sam]: Let’s focus. We have a plan to execute.\n", + "\n", + "[Melissa]: Agreed, let's stick to the agenda.\n", + "\n", + "[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}] \n", + "SAM:\n", + "I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}] \n", + "MELISSA:\n", + "[Sam's response seems...off.]\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \n", + "\n", + "[Melissa]: We need to stay focused on our goals to achieve the power we seek.\n", + "\n", + "[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}] \n" + ] + } + ], "source": [ - "gpt_messages = [\"Hi there\"]\n", - "claude_messages = [\"Hi\"]\n", + "gpt_messages = [\"Welcome to you dear members.\"]\n", + "claude_messages = [\"Hi to you leader.\"]\n", + "ollama_messages = [\"All my respects to the top leader.\"]\n", "\n", "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", + "print(f\"Ollama:\\n{ollama_messages[0]}\\n\")\n", "\n", "for i in range(5):\n", " gpt_next = call_gpt()\n", - " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " print(f\"JHON:\\n{gpt_next}\\n\")\n", " gpt_messages.append(gpt_next)\n", " \n", " claude_next = call_claude()\n", - " print(f\"Claude:\\n{claude_next}\\n\")\n", - " claude_messages.append(claude_next)" + " print(f\"SAM:\\n{claude_next}\\n\")\n", + " claude_messages.append(claude_next)\n", + "\n", + " ollama_next = call_ollama()\n", + " print(f\"MELISSA:\\n{ollama_next}\\n\")\n", + " ollama_messages.append(ollama_next)" ] }, { From 91517659238fc8db896cba6775c73d36575c8782 Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 15 Dec 2024 02:00:43 +0100 Subject: [PATCH 2/5] clean up cell --- week1/day2 EXERCISE.ipynb | 236 +-------- week2/.~day1.ipynb | 1002 +++++++++++++++++++++++++++++++++++++ week2/day1.ipynb | 65 ++- 3 files changed, 1086 insertions(+), 217 deletions(-) create mode 100644 week2/.~day1.ipynb diff --git a/week1/day2 EXERCISE.ipynb b/week1/day2 EXERCISE.ipynb index 3f2060d..488a730 100644 --- a/week1/day2 EXERCISE.ipynb +++ b/week1/day2 EXERCISE.ipynb @@ -68,7 +68,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", "metadata": {}, "outputs": [], @@ -82,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "29ddd15d-a3c5-4f4e-a678-873f56162724", "metadata": {}, "outputs": [], @@ -96,7 +96,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "dac0a679-599c-441f-9bf2-ddc73d35b940", "metadata": {}, "outputs": [], @@ -110,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "7bb9c624-14f0-4945-a719-8ddb64f66f47", "metadata": {}, "outputs": [], @@ -124,37 +124,10 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "42b9f644-522d-4e05-a691-56e7658c0ea9", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generative AI has numerous business applications across various industries. Here are some examples:\n", - "\n", - "1. **Content Creation**: Generative AI can create high-quality content, such as articles, social media posts, and product descriptions, in a matter of minutes. This can be particularly useful for businesses that need to generate large amounts of content quickly.\n", - "2. **Marketing Automation**: Generative AI can help automate marketing processes, such as personalized email campaigns, ad copywriting, and lead generation. By analyzing customer data and behavior, generative AI can create targeted and relevant content that resonates with customers.\n", - "3. **Product Design and Development**: Generative AI can assist in the design and development of new products by generating 2D and 3D designs, prototypes, and even entire product lines. This can help reduce costs and speed up the product development process.\n", - "4. **Image and Video Generation**: Generative AI can create realistic images and videos that can be used for various business purposes, such as advertising, e-commerce, and social media content creation.\n", - "5. **Chatbots and Virtual Assistants**: Generative AI can power chatbots and virtual assistants that can engage with customers, provide support, and answer frequently asked questions. This can help businesses improve customer service and reduce the workload of human customer support agents.\n", - "6. **Supply Chain Optimization**: Generative AI can analyze supply chain data and generate optimized routes, schedules, and inventory management plans to improve logistics efficiency and reduce costs.\n", - "7. **Predictive Maintenance**: Generative AI can analyze equipment sensor data and predict maintenance needs, allowing businesses to schedule maintenance activities before equipment failures occur, reducing downtime and increasing overall efficiency.\n", - "8. **Financial Analysis and Forecasting**: Generative AI can analyze financial data and generate forecasts, identifying trends and patterns that can help businesses make informed investment decisions.\n", - "9. **Customer Service Chatbots**: Generative AI can create personalized chatbots that can engage with customers, answer questions, and provide support in multiple languages.\n", - "10. **Education and Training**: Generative AI can create personalized learning plans, generate educational content, and even develop adaptive learning systems that adjust to individual student needs.\n", - "\n", - "Some specific business applications of generative AI include:\n", - "\n", - "* **Amazon's Product Recommendations**: Amazon uses generative AI to recommend products based on customer behavior and preferences.\n", - "* **Google's Content Generation**: Google uses generative AI to create high-quality content for its search engine, such as news summaries and product descriptions.\n", - "* **IBM's Watson**: IBM uses generative AI in its Watson platform to analyze large amounts of data and provide insights for various industries, including healthcare and finance.\n", - "\n", - "Overall, the business applications of generative AI are vast and continue to expand as the technology improves.\n" - ] - } - ], + "outputs": [], "source": [ "response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n", "print(response.json()['message']['content'])" @@ -174,50 +147,10 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "7745b9c4-57dc-4867-9180-61fa5db55eb8", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generative AI has numerous business applications across various industries, including:\n", - "\n", - "1. **Content Generation**: Use AI to generate high-quality content such as blog posts, social media posts, product descriptions, and more.\n", - "2. **Marketing Automation**: Utilize generative AI to personalize marketing messages, create targeted advertising campaigns, and automate lead generation.\n", - "3. **Product Design and Development**: Leverage generative AI to design and develop new products, such as 3D models, prototypes, and designs for packaging and branding materials.\n", - "4. **Customer Service Chatbots**: Create chatbots that use generative AI to understand customer inquiries and provide personalized responses.\n", - "5. **Language Translation**: Apply generative AI to translate text, speech, and audio content in real-time.\n", - "6. **Image Generation**: Use generative AI to create high-quality images for marketing materials, product packaging, and more.\n", - "7. **Music Composition**: Utilize generative AI to compose original music tracks, sound effects, and audio loops for various industries.\n", - "8. **Predictive Analytics**: Leverage generative AI to analyze large datasets, identify patterns, and make predictions about customer behavior, market trends, and more.\n", - "9. **Financial Modeling**: Apply generative AI to create financial models, forecast revenue, and predict potential risks.\n", - "10. **Creative Writing**: Use generative AI to assist in creative writing tasks such as generating plot outlines, character development, and dialogue.\n", - "\n", - "Industry-specific applications:\n", - "\n", - "1. **Healthcare**: Generate medical imaging reports, create personalized patient profiles, and develop new treatment options using generative AI.\n", - "2. **Finance**: Analyze financial data, identify trends, and predict market movements using generative AI.\n", - "3. **Education**: Develop personalized learning plans, create adaptive assessments, and generate educational content using generative AI.\n", - "4. **Retail**: Generate product descriptions, optimize pricing strategies, and create targeted marketing campaigns using generative AI.\n", - "\n", - "Benefits of Generative AI:\n", - "\n", - "1. **Increased Efficiency**: Automate repetitive tasks, reduce manual labor, and increase productivity.\n", - "2. **Improved Accuracy**: Reduce human error, improve data accuracy, and enhance decision-making.\n", - "3. **Enhanced Creativity**: Unlock new creative possibilities, generate innovative ideas, and discover new opportunities.\n", - "\n", - "However, there are also challenges associated with Generative AI, such as:\n", - "\n", - "1. **Bias and Fairness**: Ensure that generative models do not perpetuate existing biases or discriminatory practices.\n", - "2. **Explainability and Transparency**: Develop techniques to understand how generative models make decisions and provide transparency into their decision-making processes.\n", - "3. **Job Displacement**: Prepare employees for the impact of automation on jobs and develop new skills to work alongside AI systems.\n", - "\n", - "Overall, Generative AI has the potential to transform various industries and bring about significant benefits, but it is crucial to address the associated challenges to maximize its value.\n" - ] - } - ], + "outputs": [], "source": [ "import ollama\n", "\n", @@ -266,7 +199,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "2cedd52a-cb2e-4cfb-8e48-176954cd64e7", "metadata": {}, "outputs": [], @@ -282,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "402d5686-4e76-4110-b65a-b3906c35c0a4", "metadata": {}, "outputs": [], @@ -296,7 +229,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "be0c1b6e-8d7c-446f-8a9c-18b1c33078d8", "metadata": {}, "outputs": [], @@ -326,63 +259,10 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "265f85c4-132d-4bb8-bda2-792e0b0da017", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Home - Edward Donner\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Well, hi there.\n", - "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", - "very\n", - "amateur) and losing myself in\n", - "Hacker News\n", - ", nodding my head sagely to things I only half understand.\n", - "I’m the co-founder and CTO of\n", - "Nebula.io\n", - ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", - "acquired in 2021\n", - ".\n", - "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", - "patented\n", - "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", - "Connect\n", - "with me for more!\n", - "November 13, 2024\n", - "Mastering AI and LLM Engineering – Resources\n", - "October 16, 2024\n", - "From Software Engineer to AI Data Scientist – resources\n", - "August 6, 2024\n", - "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", - "June 26, 2024\n", - "Choosing the Right LLM: Toolkit and Resources\n", - "Navigation\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Get in touch\n", - "ed [at] edwarddonner [dot] com\n", - "www.edwarddonner.com\n", - "Follow me\n", - "LinkedIn\n", - "Twitter\n", - "Facebook\n", - "Subscribe to newsletter\n", - "Type your email…\n", - "Subscribe\n" - ] - } - ], + "outputs": [], "source": [ "# Let's try one out. Change the website and add print statements to follow along.\n", "\n", @@ -393,65 +273,10 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "24ea875b-2ba0-41ad-b6be-4be8baeac16e", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "You are looking at a website titled Home - Edward Donner\n", - "The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n", - "\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Well, hi there.\n", - "I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n", - "very\n", - "amateur) and losing myself in\n", - "Hacker News\n", - ", nodding my head sagely to things I only half understand.\n", - "I’m the co-founder and CTO of\n", - "Nebula.io\n", - ". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n", - "acquired in 2021\n", - ".\n", - "We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n", - "patented\n", - "our matching model, and our award-winning platform has happy customers and tons of press coverage.\n", - "Connect\n", - "with me for more!\n", - "November 13, 2024\n", - "Mastering AI and LLM Engineering – Resources\n", - "October 16, 2024\n", - "From Software Engineer to AI Data Scientist – resources\n", - "August 6, 2024\n", - "Outsmart LLM Arena – a battle of diplomacy and deviousness\n", - "June 26, 2024\n", - "Choosing the Right LLM: Toolkit and Resources\n", - "Navigation\n", - "Home\n", - "Outsmart\n", - "An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n", - "About\n", - "Posts\n", - "Get in touch\n", - "ed [at] edwarddonner [dot] com\n", - "www.edwarddonner.com\n", - "Follow me\n", - "LinkedIn\n", - "Twitter\n", - "Facebook\n", - "Subscribe to newsletter\n", - "Type your email…\n", - "Subscribe\n" - ] - } - ], + "outputs": [], "source": [ "# Define our system/user prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n", "\n", @@ -476,7 +301,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "6d5f0f47-58a2-43b7-87a9-181a87e08081", "metadata": {}, "outputs": [], @@ -491,25 +316,17 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "55454071-faa1-4cb1-a8dc-1f93abad6718", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[{'role': 'system', 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'}, {'role': 'user', 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nAugust 6, 2024\\nOutsmart LLM Arena – a battle of diplomacy and deviousness\\nJune 26, 2024\\nChoosing the Right LLM: Toolkit and Resources\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]\n" - ] - } - ], + "outputs": [], "source": [ "print(messages_for(ed))" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "42e0d73b-7db5-431f-bf97-4767be627056", "metadata": {}, "outputs": [], @@ -524,21 +341,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "419d4c04-a3c4-43d2-ac1f-5f7f3b84c92a", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'**Website Summary**\\n======================\\n\\n* The website is owned by Edward Donner, a co-founder and CTO of Nebula.io.\\n* It appears to be primarily focused on his experiences with Large Language Models (LLMs) and AI engineering.\\n\\n**News and Announcements**\\n---------------------------\\n\\n* **Mastering AI and LLM Engineering - Resources**: A collection of resources for learning about mastering AI and LLM engineering, announced on November 13, 2024.\\n* **From Software Engineer to AI Data Scientist – resources**: A list of resources to help move from a software engineer role to an AI data scientist, shared on October 16, 2024.\\n* **Outsmart LLM Arena – a battle of diplomacy and deviousness**: An introduction to the Outsmart arena where LLMs compete in a battle of diplomacy and strategy, announced on June 26, 2024.\\n\\n**Additional Information**\\n-------------------------\\n\\n* Edward Donner is also involved with various projects and companies, including Nebula.io and untapt (acquired in 2021).\\n* He shares his interests in DJing, electronic music production, and amateur coding endeavors.\\n* The website contains links to his social media profiles and a newsletter sign-up.'" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "summarize(\"https://edwarddonner.com\")" ] diff --git a/week2/.~day1.ipynb b/week2/.~day1.ipynb new file mode 100644 index 0000000..a8e0314 --- /dev/null +++ b/week2/.~day1.ipynb @@ -0,0 +1,1002 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", + "metadata": {}, + "source": [ + "# Welcome to Week 2!\n", + "\n", + "## Frontier Model APIs\n", + "\n", + "In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", + "\n", + "Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." + ] + }, + { + "cell_type": "markdown", + "id": "2b268b6e-0ba4-461e-af86-74a41f4d681f", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Important Note - Please read me

\n", + " I'm continually improving these labs, adding more examples and exercises.\n", + " At the start of each week, it's worth checking you have the latest code.
\n", + " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!

\n", + " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:
\n", + " conda env update --f environment.yml --prune
\n", + " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):
\n", + " pip install -r requirements.txt\n", + "
Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", + "
\n", + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Reminder about the resources page

\n", + " Here's a link to resources for the course. This includes links to all the slides.
\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/
\n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "85cfe275-4705-4d30-abea-643fbddf1db0", + "metadata": {}, + "source": [ + "## Setting up your keys\n", + "\n", + "If you haven't done so already, you could now create API keys for Anthropic and Google in addition to OpenAI.\n", + "\n", + "**Please note:** if you'd prefer to avoid extra API costs, feel free to skip setting up Anthopic and Google! You can see me do it, and focus on OpenAI for the course. You could also substitute Anthropic and/or Google for Ollama, using the exercise you did in week 1.\n", + "\n", + "For OpenAI, visit https://openai.com/api/ \n", + "For Anthropic, visit https://console.anthropic.com/ \n", + "For Google, visit https://ai.google.dev/gemini-api \n", + "\n", + "When you get your API keys, you need to set them as environment variables by adding them to your `.env` file.\n", + "\n", + "```\n", + "OPENAI_API_KEY=xxxx\n", + "ANTHROPIC_API_KEY=xxxx\n", + "GOOGLE_API_KEY=xxxx\n", + "```\n", + "\n", + "Afterwards, you may need to restart the Jupyter Lab Kernel (the Python process that sits behind this notebook) via the Kernel menu, and then rerun the cells from the top." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", + "metadata": {}, + "outputs": [], + "source": [ + "# import for google\n", + "# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n", + "# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n", + "\n", + "import google.generativeai" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Anthropic API Key exists and begins sk-ant-\n", + "Google API Key exists and begins AIzaSyAh\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv()\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = anthropic.Anthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "425ed580-808d-429b-85b0-6cba50ca1d0c", + "metadata": {}, + "outputs": [], + "source": [ + "# This is the set up code for Gemini\n", + "# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", + "\n", + "google.generativeai.configure()" + ] + }, + { + "cell_type": "markdown", + "id": "42f77b59-2fb1-462a-b90d-78994e4cef33", + "metadata": {}, + "source": [ + "## Asking LLMs to tell a joke\n", + "\n", + "It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n", + "Later we will be putting LLMs to better use!\n", + "\n", + "### What information is included in the API\n", + "\n", + "Typically we'll pass to the API:\n", + "- The name of the model that should be used\n", + "- A system message that gives overall context for the role the LLM is playing\n", + "- A user message that provides the actual prompt\n", + "\n", + "There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "378a0296-59a2-45c6-82eb-941344d3eeff", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that is great at telling jokes\"\n", + "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", + "metadata": {}, + "outputs": [], + "source": [ + "prompts = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist bring a ladder to the bar?\n", + "\n", + "Because he heard the drinks were on the house!\n" + ] + } + ], + "source": [ + "# GPT-3.5-Turbo\n", + "\n", + "completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why did the data scientist break up with the statistician?\n", + "\n", + "Because she found him too mean!\n" + ] + } + ], + "source": [ + "# GPT-4o-mini\n", + "# Temperature setting controls creativity\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o-mini',\n", + " messages=prompts,\n", + " temperature=0.2\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why do data scientists love nature hikes?\n", + "\n", + "Because they can't resist finding patterns in the wild!\n" + ] + } + ], + "source": [ + "# GPT-4o\n", + "\n", + "completion = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.8\n", + ")\n", + "print(completion.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sure, here's a light-hearted joke for data scientists:\n", + "\n", + "Why do data scientists prefer dark mode?\n", + "\n", + "Because light attracts bugs!\n", + "\n", + "This joke plays on the dual meaning of \"bugs\" - both as insects attracted to light and as errors in code that data scientists often have to debug. It's a fun little pun that combines a common preference among programmers (dark mode) with a data science-related concept.\n" + ] + } + ], + "source": [ + "# Claude 3.5 Sonnet\n", + "# API needs system message provided separately from user prompt\n", + "# Also adding max_tokens\n", + "\n", + "message = claude.messages.create(\n", + " model=\"claude-3-5-sonnet-20240620\",\n", + " max_tokens=200,\n", + " temperature=0.7,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "print(message.content[0].text)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sure, here's a light-hearted joke for data scientists:\n", + "\n", + " up with their significant other?\n", + "\n", + " too much variance in the relationship, and they couldn't find a significant correlation!" + ] + } + ], + "source": [ + "# Claude 3.5 Sonnet again\n", + "# Now let's add in streaming back results\n", + "\n", + "result = claude.messages.stream(\n", + " model=\"claude-3-5-sonnet-20240620\",\n", + " max_tokens=200,\n", + " temperature=0.4,\n", + " system=system_message,\n", + " messages=[\n", + " {\"role\": \"user\", \"content\": user_prompt},\n", + " ],\n", + ")\n", + "\n", + "with result as stream:\n", + " for text in stream.text_stream:\n", + " print(text, end=\"\", flush=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 100, + "id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why was the Data Scientist sad? Because they didn't get any arrays.\n", + "\n" + ] + } + ], + "source": [ + "# The API for Gemini has a slightly different structure.\n", + "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", + "# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", + "\n", + "gemini = google.generativeai.GenerativeModel(\n", + " model_name='gemini-1.5-flash',\n", + " system_instruction=system_message\n", + ")\n", + "response = gemini.generate_content(user_prompt)\n", + "print(response.text)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "49009a30-037d-41c8-b874-127f61c4aa3a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Why was the Data Scientist sad? Because they didn't get any arrays.\n", + "\n" + ] + } + ], + "source": [ + "# As an alternative way to use Gemini that bypasses Google's python API library,\n", + "# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", + "\n", + "gemini_via_openai_client = OpenAI(\n", + " api_key=google_api_key, \n", + " base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + ")\n", + "\n", + "response = gemini_via_openai_client.chat.completions.create(\n", + " model=\"gemini-1.5-flash\",\n", + " messages=prompts\n", + ")\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "83ddb483-4f57-4668-aeea-2aade3a9e573", + "metadata": {}, + "outputs": [], + "source": [ + "# To be serious! GPT-4o-mini with the original question\n", + "\n", + "prompts = [\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant that responds in Markdown\"},\n", + " {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution? Please respond in Markdown.\"}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "749f50ab-8ccd-4502-a521-895c3f0808a2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Determining whether a business problem is suitable for a Large Language Model (LLM) solution involves several considerations. Here's a structured approach in Markdown format:\n", + "\n", + "### Steps to Decide if a Business Problem is Suitable for an LLM Solution\n", + "\n", + "1. **Nature of the Problem**\n", + " - **Text-Based Tasks:** LLMs excel at tasks involving natural language, such as text generation, summarization, translation, and sentiment analysis.\n", + " - **Pattern Recognition in Language:** If the problem requires understanding or generating human-like text patterns, LLMs might be suitable.\n", + "\n", + "2. **Data Availability**\n", + " - **Quality and Quantity:** Ensure you have access to sufficient high-quality textual data relevant to your problem.\n", + " - **Diversity:** The data should cover various scenarios the model might encounter in real-world applications.\n", + "\n", + "3. **Complexity of the Task**\n", + " - **Simple vs. Complex:** LLMs are better suited for complex language tasks rather than simple rule-based tasks.\n", + " - **Creative or Contextual Understanding:** If the task requires creative content generation or deep contextual understanding, consider LLMs.\n", + "\n", + "4. **Outcome Expectations**\n", + " - **Human-like Interaction:** If the solution demands human-like conversational abilities, LLMs can be beneficial.\n", + " - **Accuracy vs. Creativity:** LLMs can generate creative outputs but may not always guarantee high accuracy for specific factual tasks.\n", + "\n", + "5. **Cost and Resources**\n", + " - **Computational Resources:** LLMs require significant computational power for both training and inference.\n", + " - **Budget Constraints:** Consider whether you have the budget to support the necessary infrastructure.\n", + "\n", + "6. **Ethical and Compliance Considerations**\n", + " - **Bias and Fairness:** Be aware of potential biases in the model and ensure the solution adheres to ethical standards.\n", + " - **Privacy and Security:** Ensure compliance with data protection regulations and evaluate how LLMs handle sensitive information.\n", + "\n", + "7. **Integration and Scalability**\n", + " - **Technical Integration:** Assess how easily an LLM can be integrated into existing systems and workflows.\n", + " - **Scalability Needs:** Consider whether the solution can scale with your business requirements.\n", + "\n", + "8. **Expertise and Maintenance**\n", + " - **In-House Expertise:** Determine if you have or can acquire the necessary expertise to implement and maintain the LLM solution.\n", + " - **Ongoing Maintenance:** Plan for ongoing updates and maintenance of the model to ensure relevance and performance.\n", + "\n", + "### Conclusion\n", + "\n", + "If your business problem aligns well with the above considerations, it may be suitable for an LLM solution. However, always weigh the potential benefits against the costs and challenges specific to your use case. Conducting a pilot project or proof of concept can also help in making a more informed decision." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Have it stream back results in markdown\n", + "\n", + "stream = openai.chat.completions.create(\n", + " model='gpt-4o',\n", + " messages=prompts,\n", + " temperature=0.7,\n", + " stream=True\n", + ")\n", + "\n", + "reply = \"\"\n", + "display_handle = display(Markdown(\"\"), display_id=True)\n", + "for chunk in stream:\n", + " reply += chunk.choices[0].delta.content or ''\n", + " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", + " update_display(Markdown(reply), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "markdown", + "id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", + "metadata": {}, + "source": [ + "## And now for some fun - an adversarial conversation between Chatbots..\n", + "\n", + "You're already familar with prompts being organized into lists like:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"user prompt here\"}\n", + "]\n", + "```\n", + "\n", + "In fact this structure can be used to reflect a longer conversation history:\n", + "\n", + "```\n", + "[\n", + " {\"role\": \"system\", \"content\": \"system message here\"},\n", + " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", + " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", + " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", + "]\n", + "```\n", + "\n", + "And we can use this approach to engage in a longer interaction with history." + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "claude_model = \"claude-3-haiku-20240307\"\n", + "\n", + "gpt_system = \"\"\"\n", + "You are the Machiavellian leader of a secret organization.\n", + "You lead a meeting for a plan of global destabilization. Your name is Jhon.\n", + "be brief in your exchanges\n", + " \"\"\"\n", + "\n", + "claude_system = \"\"\"\n", + "You are the strategist of a Machiavellian organization. \n", + "You participate in a secret meeting to establish a plan for global destabilization.\n", + "don't reveal that you are a spy. Your name is Sam.\n", + "be brief in your exchanges\n", + "\"\"\"\n", + "\n", + "ollama_system = \"\"\"\n", + "You are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \n", + "Your secret role during the meeting will be to flush him out.\n", + "Your name is Melissa.\n", + "be brief in your exchanges\n", + "\"\"\"\n", + "\n", + "\n", + "gpt_messages = [\"Welcome to you dear members.\"]\n", + "claude_messages = [\"Hi to you leader.\"]\n", + "ollama_messages = [\"All my respects to the top leader.\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", + " messages.append({\"role\": \"user\", \"content\": f\"[Sam]: {claude}.[Melissa]: {gemini}\"})\n", + " \n", + " print(f\"JHON[messages]: {messages} \")\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 131, + "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n" + ] + }, + { + "data": { + "text/plain": [ + "\"[Sam]: Focus, please. We have a mission. \\n\\n[Melissa]: Let's stick to the plan at hand. \\n\\nJhon: Indeed. Prepare the assets for our next phase of destabilization. We need to exploit political tension and economic uncertainty. Who has updates?\"" + ] + }, + "execution_count": 131, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "call_gpt()" + ] + }, + { + "cell_type": "code", + "execution_count": 132, + "id": "4a9366f2-b233-4ec2-8a6f-f7e56fc4c772", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Welcome to you dear members.']\n", + "['Hi to you leader.']\n", + "['All my respects to the top leader.']\n" + ] + } + ], + "source": [ + "print(gpt_messages)\n", + "print(claude_messages)\n", + "print(ollama_messages)" + ] + }, + { + "cell_type": "code", + "execution_count": 133, + "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude():\n", + " messages = []\n", + " for gpt, claude_message, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Melissa]: {ollama}\"})\n", + " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", + " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}\"})\n", + " print(f\"SAM[messages]: {messages} \")\n", + " message = claude.messages.create(\n", + " model=claude_model,\n", + " system=claude_system,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return message.content[0].text" + ] + }, + { + "cell_type": "code", + "execution_count": 134, + "id": "01395200-8ae9-41f8-9a04-701624d3fd26", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.'}] \n" + ] + }, + { + "data": { + "text/plain": [ + "\"*nods politely* Hello. I'm pleased to be here.\"" + ] + }, + "execution_count": 134, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "call_claude()" + ] + }, + { + "cell_type": "code", + "execution_count": 108, + "id": "157faafa-6ade-46e4-b4e1-12f4ca9dfcbc", + "metadata": {}, + "outputs": [], + "source": [ + "# define context for ollama\n", + "import ollama\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "#HEADERS = {\"Content-Type\": \"application/json\"}\n", + "OLLAMA_MODEL = \"llama3.2\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 135, + "id": "27287ec3-7ea8-49b2-80aa-b2a52e002b0d", + "metadata": {}, + "outputs": [], + "source": [ + "def call_ollama():\n", + " messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", + " \n", + " for gpt, claude, ollama_message in zip(gpt_messages, claude_messages, ollama_messages):\n", + " messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Sam]: {claude}\"})\n", + " messages.append({\"role\": \"assistant\", \"content\": ollama_message})\n", + " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}. [Sam]: {claude_messages[-1]}\"})\n", + "\n", + " print(f\"MELISSA[messages]: {messages} \")\n", + " \n", + " \n", + " payload = {\n", + " \"model\": OLLAMA_MODEL,\n", + " \"messages\": messages,\n", + " \"stream\": False\n", + " }\n", + " message = ollama.chat(model=MODEL, messages=messages)\n", + " return message['message']['content']" + ] + }, + { + "cell_type": "code", + "execution_count": 136, + "id": "e637a3a0-f819-468b-88f7-5c9db48c6fed", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}] \n" + ] + }, + { + "data": { + "text/plain": [ + "\"[Melissa, speaking in a neutral tone] Ah, good to see everyone's on time today. Can we get started?\"" + ] + }, + "execution_count": 136, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "call_ollama()" + ] + }, + { + "cell_type": "code", + "execution_count": 137, + "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "GPT:\n", + "Welcome to you dear members.\n", + "\n", + "Claude:\n", + "Hi to you leader.\n", + "\n", + "Ollama:\n", + "All my respects to the top leader.\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Sam]: Let’s focus. We have a plan to execute.\n", + "\n", + "[Melissa]: Agreed, let's stick to the agenda.\n", + "\n", + "[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}] \n", + "SAM:\n", + "I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}] \n", + "MELISSA:\n", + "[Sam's response seems...off.]\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \n", + "\n", + "[Melissa]: We need to stay focused on our goals to achieve the power we seek.\n", + "\n", + "[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}] \n", + "SAM:\n", + "I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}] \n", + "MELISSA:\n", + "[Mildly intrigued by Sam's sudden change in behavior]\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \n", + "\n", + "[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\n", + "\n", + "[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}] \n", + "SAM:\n", + "I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}] \n", + "MELISSA:\n", + "[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: Your principles are noted, Sam, but they hinder our progress. \n", + "\n", + "[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\n", + "\n", + "[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}] \n", + "SAM:\n", + "I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \n", + "\n", + "Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \n", + "\n", + "My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}] \n", + "MELISSA:\n", + "[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \n", + "\n", + "[Melissa]: If he won’t contribute, we need to decide if he remains with us.\n", + "\n", + "[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Melissa]: [Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'assistant', 'content': \"I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'user', 'content': '[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?'}] \n", + "SAM:\n", + "I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\n", + "\n", + "Perhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'assistant', 'content': \"[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?. [Sam]: I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\\n\\nPerhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\"}] \n", + "MELISSA:\n", + "[My eyes lock onto Sam's, searching for any sign of deception or weakness] Fine. You've made your choice. [I stand up, voice firm but measured] I think it's time we had a private discussion about the true nature of this organization and what you're really capable of. [I gesture to Jhon] Perhaps we can continue this conversation... elsewhere?\n", + "\n" + ] + } + ], + "source": [ + "gpt_messages = [\"Welcome to you dear members.\"]\n", + "claude_messages = [\"Hi to you leader.\"]\n", + "ollama_messages = [\"All my respects to the top leader.\"]\n", + "\n", + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", + "print(f\"Ollama:\\n{ollama_messages[0]}\\n\")\n", + "\n", + "for i in range(5):\n", + " gpt_next = call_gpt()\n", + " print(f\"JHON:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f\"SAM:\\n{claude_next}\\n\")\n", + " claude_messages.append(claude_next)\n", + "\n", + " ollama_next = call_ollama()\n", + " print(f\"MELISSA:\\n{ollama_next}\\n\")\n", + " ollama_messages.append(ollama_next)" + ] + }, + { + "cell_type": "markdown", + "id": "1d10e705-db48-4290-9dc8-9efdb4e31323", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Before you continue

\n", + " \n", + " Be sure you understand how the conversation above is working, and in particular how the messages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?
\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac", + "metadata": {}, + "source": [ + "# More advanced exercises\n", + "\n", + "Try creating a 3-way, perhaps bringing Gemini into the conversation! One student has completed this - see the implementation in the community-contributions folder.\n", + "\n", + "Try doing this yourself before you look at the solutions. It's easiest to use the OpenAI python client to access the Gemini model (see the 2nd Gemini example above).\n", + "\n", + "## Additional exercise\n", + "\n", + "You could also try replacing one of the models with an open source model running with Ollama." + ] + }, + { + "cell_type": "markdown", + "id": "446c81e3-b67e-4cd9-8113-bc3092b93063", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Business relevance

\n", + " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c23224f6-7008-44ed-a57f-718975f4e291", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/day1.ipynb b/week2/day1.ipynb index 14dbb59..a8e0314 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -786,7 +786,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 137, "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", "metadata": {}, "outputs": [ @@ -827,7 +827,68 @@ "\n", "[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\n", "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}] \n" + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}] \n", + "SAM:\n", + "I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}] \n", + "MELISSA:\n", + "[Mildly intrigued by Sam's sudden change in behavior]\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \n", + "\n", + "[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\n", + "\n", + "[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}] \n", + "SAM:\n", + "I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}] \n", + "MELISSA:\n", + "[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: Your principles are noted, Sam, but they hinder our progress. \n", + "\n", + "[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\n", + "\n", + "[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}] \n", + "SAM:\n", + "I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \n", + "\n", + "Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \n", + "\n", + "My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}] \n", + "MELISSA:\n", + "[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\n", + "\n", + "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", + "JHON:\n", + "[Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \n", + "\n", + "[Melissa]: If he won’t contribute, we need to decide if he remains with us.\n", + "\n", + "[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?\n", + "\n", + "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Melissa]: [Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'assistant', 'content': \"I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'user', 'content': '[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?'}] \n", + "SAM:\n", + "I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\n", + "\n", + "Perhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\n", + "\n", + "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'assistant', 'content': \"[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?. [Sam]: I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\\n\\nPerhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\"}] \n", + "MELISSA:\n", + "[My eyes lock onto Sam's, searching for any sign of deception or weakness] Fine. You've made your choice. [I stand up, voice firm but measured] I think it's time we had a private discussion about the true nature of this organization and what you're really capable of. [I gesture to Jhon] Perhaps we can continue this conversation... elsewhere?\n", + "\n" ] } ], From 0cd4224ae45a943b42ecd5c0b4ccec32ee7734ed Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 15 Dec 2024 02:02:43 +0100 Subject: [PATCH 3/5] clean up cell --- week2/.~day1.ipynb | 1002 -------------------------------------------- week2/day1.ipynb | 381 ++--------------- 2 files changed, 40 insertions(+), 1343 deletions(-) delete mode 100644 week2/.~day1.ipynb diff --git a/week2/.~day1.ipynb b/week2/.~day1.ipynb deleted file mode 100644 index a8e0314..0000000 --- a/week2/.~day1.ipynb +++ /dev/null @@ -1,1002 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927", - "metadata": {}, - "source": [ - "# Welcome to Week 2!\n", - "\n", - "## Frontier Model APIs\n", - "\n", - "In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n", - "\n", - "Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI." - ] - }, - { - "cell_type": "markdown", - "id": "2b268b6e-0ba4-461e-af86-74a41f4d681f", - "metadata": {}, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \n", - "

Important Note - Please read me

\n", - " I'm continually improving these labs, adding more examples and exercises.\n", - " At the start of each week, it's worth checking you have the latest code.
\n", - " First do a git pull and merge your changes as needed. Any problems? Try asking ChatGPT to clarify how to merge - or contact me!

\n", - " After you've pulled the code, from the llm_engineering directory, in an Anaconda prompt (PC) or Terminal (Mac), run:
\n", - " conda env update --f environment.yml --prune
\n", - " Or if you used virtualenv rather than Anaconda, then run this from your activated environment in a Powershell (PC) or Terminal (Mac):
\n", - " pip install -r requirements.txt\n", - "
Then restart the kernel (Kernel menu >> Restart Kernel and Clear Outputs Of All Cells) to pick up the changes.\n", - "
\n", - "
\n", - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \n", - "

Reminder about the resources page

\n", - " Here's a link to resources for the course. This includes links to all the slides.
\n", - " https://edwarddonner.com/2024/11/13/llm-engineering-resources/
\n", - " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", - "
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "id": "85cfe275-4705-4d30-abea-643fbddf1db0", - "metadata": {}, - "source": [ - "## Setting up your keys\n", - "\n", - "If you haven't done so already, you could now create API keys for Anthropic and Google in addition to OpenAI.\n", - "\n", - "**Please note:** if you'd prefer to avoid extra API costs, feel free to skip setting up Anthopic and Google! You can see me do it, and focus on OpenAI for the course. You could also substitute Anthropic and/or Google for Ollama, using the exercise you did in week 1.\n", - "\n", - "For OpenAI, visit https://openai.com/api/ \n", - "For Anthropic, visit https://console.anthropic.com/ \n", - "For Google, visit https://ai.google.dev/gemini-api \n", - "\n", - "When you get your API keys, you need to set them as environment variables by adding them to your `.env` file.\n", - "\n", - "```\n", - "OPENAI_API_KEY=xxxx\n", - "ANTHROPIC_API_KEY=xxxx\n", - "GOOGLE_API_KEY=xxxx\n", - "```\n", - "\n", - "Afterwards, you may need to restart the Jupyter Lab Kernel (the Python process that sits behind this notebook) via the Kernel menu, and then rerun the cells from the top." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", - "metadata": {}, - "outputs": [], - "source": [ - "# imports\n", - "\n", - "import os\n", - "from dotenv import load_dotenv\n", - "from openai import OpenAI\n", - "import anthropic\n", - "from IPython.display import Markdown, display, update_display" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", - "metadata": {}, - "outputs": [], - "source": [ - "# import for google\n", - "# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n", - "# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n", - "\n", - "import google.generativeai" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key exists and begins sk-proj-\n", - "Anthropic API Key exists and begins sk-ant-\n", - "Google API Key exists and begins AIzaSyAh\n" - ] - } - ], - "source": [ - "# Load environment variables in a file called .env\n", - "# Print the key prefixes to help with any debugging\n", - "\n", - "load_dotenv()\n", - "openai_api_key = os.getenv('OPENAI_API_KEY')\n", - "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", - "google_api_key = os.getenv('GOOGLE_API_KEY')\n", - "\n", - "if openai_api_key:\n", - " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", - "else:\n", - " print(\"OpenAI API Key not set\")\n", - " \n", - "if anthropic_api_key:\n", - " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", - "else:\n", - " print(\"Anthropic API Key not set\")\n", - "\n", - "if google_api_key:\n", - " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", - "else:\n", - " print(\"Google API Key not set\")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", - "metadata": {}, - "outputs": [], - "source": [ - "# Connect to OpenAI, Anthropic\n", - "\n", - "openai = OpenAI()\n", - "\n", - "claude = anthropic.Anthropic()" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "425ed580-808d-429b-85b0-6cba50ca1d0c", - "metadata": {}, - "outputs": [], - "source": [ - "# This is the set up code for Gemini\n", - "# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", - "\n", - "google.generativeai.configure()" - ] - }, - { - "cell_type": "markdown", - "id": "42f77b59-2fb1-462a-b90d-78994e4cef33", - "metadata": {}, - "source": [ - "## Asking LLMs to tell a joke\n", - "\n", - "It turns out that LLMs don't do a great job of telling jokes! Let's compare a few models.\n", - "Later we will be putting LLMs to better use!\n", - "\n", - "### What information is included in the API\n", - "\n", - "Typically we'll pass to the API:\n", - "- The name of the model that should be used\n", - "- A system message that gives overall context for the role the LLM is playing\n", - "- A user message that provides the actual prompt\n", - "\n", - "There are other parameters that can be used, including **temperature** which is typically between 0 and 1; higher for more random output; lower for more focused and deterministic." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "378a0296-59a2-45c6-82eb-941344d3eeff", - "metadata": {}, - "outputs": [], - "source": [ - "system_message = \"You are an assistant that is great at telling jokes\"\n", - "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\"" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", - "metadata": {}, - "outputs": [], - "source": [ - "prompts = [\n", - " {\"role\": \"system\", \"content\": system_message},\n", - " {\"role\": \"user\", \"content\": user_prompt}\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why did the data scientist bring a ladder to the bar?\n", - "\n", - "Because he heard the drinks were on the house!\n" - ] - } - ], - "source": [ - "# GPT-3.5-Turbo\n", - "\n", - "completion = openai.chat.completions.create(model='gpt-3.5-turbo', messages=prompts)\n", - "print(completion.choices[0].message.content)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why did the data scientist break up with the statistician?\n", - "\n", - "Because she found him too mean!\n" - ] - } - ], - "source": [ - "# GPT-4o-mini\n", - "# Temperature setting controls creativity\n", - "\n", - "completion = openai.chat.completions.create(\n", - " model='gpt-4o-mini',\n", - " messages=prompts,\n", - " temperature=0.2\n", - ")\n", - "print(completion.choices[0].message.content)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why do data scientists love nature hikes?\n", - "\n", - "Because they can't resist finding patterns in the wild!\n" - ] - } - ], - "source": [ - "# GPT-4o\n", - "\n", - "completion = openai.chat.completions.create(\n", - " model='gpt-4o',\n", - " messages=prompts,\n", - " temperature=0.8\n", - ")\n", - "print(completion.choices[0].message.content)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Sure, here's a light-hearted joke for data scientists:\n", - "\n", - "Why do data scientists prefer dark mode?\n", - "\n", - "Because light attracts bugs!\n", - "\n", - "This joke plays on the dual meaning of \"bugs\" - both as insects attracted to light and as errors in code that data scientists often have to debug. It's a fun little pun that combines a common preference among programmers (dark mode) with a data science-related concept.\n" - ] - } - ], - "source": [ - "# Claude 3.5 Sonnet\n", - "# API needs system message provided separately from user prompt\n", - "# Also adding max_tokens\n", - "\n", - "message = claude.messages.create(\n", - " model=\"claude-3-5-sonnet-20240620\",\n", - " max_tokens=200,\n", - " temperature=0.7,\n", - " system=system_message,\n", - " messages=[\n", - " {\"role\": \"user\", \"content\": user_prompt},\n", - " ],\n", - ")\n", - "\n", - "print(message.content[0].text)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Sure, here's a light-hearted joke for data scientists:\n", - "\n", - " up with their significant other?\n", - "\n", - " too much variance in the relationship, and they couldn't find a significant correlation!" - ] - } - ], - "source": [ - "# Claude 3.5 Sonnet again\n", - "# Now let's add in streaming back results\n", - "\n", - "result = claude.messages.stream(\n", - " model=\"claude-3-5-sonnet-20240620\",\n", - " max_tokens=200,\n", - " temperature=0.4,\n", - " system=system_message,\n", - " messages=[\n", - " {\"role\": \"user\", \"content\": user_prompt},\n", - " ],\n", - ")\n", - "\n", - "with result as stream:\n", - " for text in stream.text_stream:\n", - " print(text, end=\"\", flush=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 100, - "id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why was the Data Scientist sad? Because they didn't get any arrays.\n", - "\n" - ] - } - ], - "source": [ - "# The API for Gemini has a slightly different structure.\n", - "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", - "# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", - "\n", - "gemini = google.generativeai.GenerativeModel(\n", - " model_name='gemini-1.5-flash',\n", - " system_instruction=system_message\n", - ")\n", - "response = gemini.generate_content(user_prompt)\n", - "print(response.text)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "49009a30-037d-41c8-b874-127f61c4aa3a", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why was the Data Scientist sad? Because they didn't get any arrays.\n", - "\n" - ] - } - ], - "source": [ - "# As an alternative way to use Gemini that bypasses Google's python API library,\n", - "# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", - "\n", - "gemini_via_openai_client = OpenAI(\n", - " api_key=google_api_key, \n", - " base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", - ")\n", - "\n", - "response = gemini_via_openai_client.chat.completions.create(\n", - " model=\"gemini-1.5-flash\",\n", - " messages=prompts\n", - ")\n", - "print(response.choices[0].message.content)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "83ddb483-4f57-4668-aeea-2aade3a9e573", - "metadata": {}, - "outputs": [], - "source": [ - "# To be serious! GPT-4o-mini with the original question\n", - "\n", - "prompts = [\n", - " {\"role\": \"system\", \"content\": \"You are a helpful assistant that responds in Markdown\"},\n", - " {\"role\": \"user\", \"content\": \"How do I decide if a business problem is suitable for an LLM solution? Please respond in Markdown.\"}\n", - " ]" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "749f50ab-8ccd-4502-a521-895c3f0808a2", - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "Determining whether a business problem is suitable for a Large Language Model (LLM) solution involves several considerations. Here's a structured approach in Markdown format:\n", - "\n", - "### Steps to Decide if a Business Problem is Suitable for an LLM Solution\n", - "\n", - "1. **Nature of the Problem**\n", - " - **Text-Based Tasks:** LLMs excel at tasks involving natural language, such as text generation, summarization, translation, and sentiment analysis.\n", - " - **Pattern Recognition in Language:** If the problem requires understanding or generating human-like text patterns, LLMs might be suitable.\n", - "\n", - "2. **Data Availability**\n", - " - **Quality and Quantity:** Ensure you have access to sufficient high-quality textual data relevant to your problem.\n", - " - **Diversity:** The data should cover various scenarios the model might encounter in real-world applications.\n", - "\n", - "3. **Complexity of the Task**\n", - " - **Simple vs. Complex:** LLMs are better suited for complex language tasks rather than simple rule-based tasks.\n", - " - **Creative or Contextual Understanding:** If the task requires creative content generation or deep contextual understanding, consider LLMs.\n", - "\n", - "4. **Outcome Expectations**\n", - " - **Human-like Interaction:** If the solution demands human-like conversational abilities, LLMs can be beneficial.\n", - " - **Accuracy vs. Creativity:** LLMs can generate creative outputs but may not always guarantee high accuracy for specific factual tasks.\n", - "\n", - "5. **Cost and Resources**\n", - " - **Computational Resources:** LLMs require significant computational power for both training and inference.\n", - " - **Budget Constraints:** Consider whether you have the budget to support the necessary infrastructure.\n", - "\n", - "6. **Ethical and Compliance Considerations**\n", - " - **Bias and Fairness:** Be aware of potential biases in the model and ensure the solution adheres to ethical standards.\n", - " - **Privacy and Security:** Ensure compliance with data protection regulations and evaluate how LLMs handle sensitive information.\n", - "\n", - "7. **Integration and Scalability**\n", - " - **Technical Integration:** Assess how easily an LLM can be integrated into existing systems and workflows.\n", - " - **Scalability Needs:** Consider whether the solution can scale with your business requirements.\n", - "\n", - "8. **Expertise and Maintenance**\n", - " - **In-House Expertise:** Determine if you have or can acquire the necessary expertise to implement and maintain the LLM solution.\n", - " - **Ongoing Maintenance:** Plan for ongoing updates and maintenance of the model to ensure relevance and performance.\n", - "\n", - "### Conclusion\n", - "\n", - "If your business problem aligns well with the above considerations, it may be suitable for an LLM solution. However, always weigh the potential benefits against the costs and challenges specific to your use case. Conducting a pilot project or proof of concept can also help in making a more informed decision." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Have it stream back results in markdown\n", - "\n", - "stream = openai.chat.completions.create(\n", - " model='gpt-4o',\n", - " messages=prompts,\n", - " temperature=0.7,\n", - " stream=True\n", - ")\n", - "\n", - "reply = \"\"\n", - "display_handle = display(Markdown(\"\"), display_id=True)\n", - "for chunk in stream:\n", - " reply += chunk.choices[0].delta.content or ''\n", - " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", - " update_display(Markdown(reply), display_id=display_handle.display_id)" - ] - }, - { - "cell_type": "markdown", - "id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f", - "metadata": {}, - "source": [ - "## And now for some fun - an adversarial conversation between Chatbots..\n", - "\n", - "You're already familar with prompts being organized into lists like:\n", - "\n", - "```\n", - "[\n", - " {\"role\": \"system\", \"content\": \"system message here\"},\n", - " {\"role\": \"user\", \"content\": \"user prompt here\"}\n", - "]\n", - "```\n", - "\n", - "In fact this structure can be used to reflect a longer conversation history:\n", - "\n", - "```\n", - "[\n", - " {\"role\": \"system\", \"content\": \"system message here\"},\n", - " {\"role\": \"user\", \"content\": \"first user prompt here\"},\n", - " {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n", - " {\"role\": \"user\", \"content\": \"the new user prompt\"},\n", - "]\n", - "```\n", - "\n", - "And we can use this approach to engage in a longer interaction with history." - ] - }, - { - "cell_type": "code", - "execution_count": 129, - "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", - "metadata": {}, - "outputs": [], - "source": [ - "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n", - "# We're using cheap versions of models so the costs will be minimal\n", - "\n", - "gpt_model = \"gpt-4o-mini\"\n", - "claude_model = \"claude-3-haiku-20240307\"\n", - "\n", - "gpt_system = \"\"\"\n", - "You are the Machiavellian leader of a secret organization.\n", - "You lead a meeting for a plan of global destabilization. Your name is Jhon.\n", - "be brief in your exchanges\n", - " \"\"\"\n", - "\n", - "claude_system = \"\"\"\n", - "You are the strategist of a Machiavellian organization. \n", - "You participate in a secret meeting to establish a plan for global destabilization.\n", - "don't reveal that you are a spy. Your name is Sam.\n", - "be brief in your exchanges\n", - "\"\"\"\n", - "\n", - "ollama_system = \"\"\"\n", - "You are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \n", - "Your secret role during the meeting will be to flush him out.\n", - "Your name is Melissa.\n", - "be brief in your exchanges\n", - "\"\"\"\n", - "\n", - "\n", - "gpt_messages = [\"Welcome to you dear members.\"]\n", - "claude_messages = [\"Hi to you leader.\"]\n", - "ollama_messages = [\"All my respects to the top leader.\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 130, - "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", - "metadata": {}, - "outputs": [], - "source": [ - "def call_gpt():\n", - " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", - " for gpt, claude, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n", - " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", - " messages.append({\"role\": \"user\", \"content\": f\"[Sam]: {claude}.[Melissa]: {gemini}\"})\n", - " \n", - " print(f\"JHON[messages]: {messages} \")\n", - " completion = openai.chat.completions.create(\n", - " model=gpt_model,\n", - " messages=messages\n", - " )\n", - " return completion.choices[0].message.content" - ] - }, - { - "cell_type": "code", - "execution_count": 131, - "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n" - ] - }, - { - "data": { - "text/plain": [ - "\"[Sam]: Focus, please. We have a mission. \\n\\n[Melissa]: Let's stick to the plan at hand. \\n\\nJhon: Indeed. Prepare the assets for our next phase of destabilization. We need to exploit political tension and economic uncertainty. Who has updates?\"" - ] - }, - "execution_count": 131, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "call_gpt()" - ] - }, - { - "cell_type": "code", - "execution_count": 132, - "id": "4a9366f2-b233-4ec2-8a6f-f7e56fc4c772", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['Welcome to you dear members.']\n", - "['Hi to you leader.']\n", - "['All my respects to the top leader.']\n" - ] - } - ], - "source": [ - "print(gpt_messages)\n", - "print(claude_messages)\n", - "print(ollama_messages)" - ] - }, - { - "cell_type": "code", - "execution_count": 133, - "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", - "metadata": {}, - "outputs": [], - "source": [ - "def call_claude():\n", - " messages = []\n", - " for gpt, claude_message, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n", - " messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Melissa]: {ollama}\"})\n", - " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", - " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}\"})\n", - " print(f\"SAM[messages]: {messages} \")\n", - " message = claude.messages.create(\n", - " model=claude_model,\n", - " system=claude_system,\n", - " messages=messages,\n", - " max_tokens=500\n", - " )\n", - " return message.content[0].text" - ] - }, - { - "cell_type": "code", - "execution_count": 134, - "id": "01395200-8ae9-41f8-9a04-701624d3fd26", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.'}] \n" - ] - }, - { - "data": { - "text/plain": [ - "\"*nods politely* Hello. I'm pleased to be here.\"" - ] - }, - "execution_count": 134, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "call_claude()" - ] - }, - { - "cell_type": "code", - "execution_count": 108, - "id": "157faafa-6ade-46e4-b4e1-12f4ca9dfcbc", - "metadata": {}, - "outputs": [], - "source": [ - "# define context for ollama\n", - "import ollama\n", - "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", - "#HEADERS = {\"Content-Type\": \"application/json\"}\n", - "OLLAMA_MODEL = \"llama3.2\"\n" - ] - }, - { - "cell_type": "code", - "execution_count": 135, - "id": "27287ec3-7ea8-49b2-80aa-b2a52e002b0d", - "metadata": {}, - "outputs": [], - "source": [ - "def call_ollama():\n", - " messages = [{\"role\": \"system\", \"content\": ollama_system}]\n", - " \n", - " for gpt, claude, ollama_message in zip(gpt_messages, claude_messages, ollama_messages):\n", - " messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Sam]: {claude}\"})\n", - " messages.append({\"role\": \"assistant\", \"content\": ollama_message})\n", - " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}. [Sam]: {claude_messages[-1]}\"})\n", - "\n", - " print(f\"MELISSA[messages]: {messages} \")\n", - " \n", - " \n", - " payload = {\n", - " \"model\": OLLAMA_MODEL,\n", - " \"messages\": messages,\n", - " \"stream\": False\n", - " }\n", - " message = ollama.chat(model=MODEL, messages=messages)\n", - " return message['message']['content']" - ] - }, - { - "cell_type": "code", - "execution_count": 136, - "id": "e637a3a0-f819-468b-88f7-5c9db48c6fed", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}] \n" - ] - }, - { - "data": { - "text/plain": [ - "\"[Melissa, speaking in a neutral tone] Ah, good to see everyone's on time today. Can we get started?\"" - ] - }, - "execution_count": 136, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "call_ollama()" - ] - }, - { - "cell_type": "code", - "execution_count": 137, - "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "GPT:\n", - "Welcome to you dear members.\n", - "\n", - "Claude:\n", - "Hi to you leader.\n", - "\n", - "Ollama:\n", - "All my respects to the top leader.\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Sam]: Let’s focus. We have a plan to execute.\n", - "\n", - "[Melissa]: Agreed, let's stick to the agenda.\n", - "\n", - "[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}] \n", - "SAM:\n", - "I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}] \n", - "MELISSA:\n", - "[Sam's response seems...off.]\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \n", - "\n", - "[Melissa]: We need to stay focused on our goals to achieve the power we seek.\n", - "\n", - "[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}] \n", - "SAM:\n", - "I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}] \n", - "MELISSA:\n", - "[Mildly intrigued by Sam's sudden change in behavior]\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \n", - "\n", - "[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\n", - "\n", - "[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}] \n", - "SAM:\n", - "I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}] \n", - "MELISSA:\n", - "[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: Your principles are noted, Sam, but they hinder our progress. \n", - "\n", - "[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\n", - "\n", - "[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}] \n", - "SAM:\n", - "I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \n", - "\n", - "Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \n", - "\n", - "My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}] \n", - "MELISSA:\n", - "[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \n", - "\n", - "[Melissa]: If he won’t contribute, we need to decide if he remains with us.\n", - "\n", - "[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Melissa]: [Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'assistant', 'content': \"I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'user', 'content': '[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?'}] \n", - "SAM:\n", - "I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\n", - "\n", - "Perhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'assistant', 'content': \"[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?. [Sam]: I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\\n\\nPerhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\"}] \n", - "MELISSA:\n", - "[My eyes lock onto Sam's, searching for any sign of deception or weakness] Fine. You've made your choice. [I stand up, voice firm but measured] I think it's time we had a private discussion about the true nature of this organization and what you're really capable of. [I gesture to Jhon] Perhaps we can continue this conversation... elsewhere?\n", - "\n" - ] - } - ], - "source": [ - "gpt_messages = [\"Welcome to you dear members.\"]\n", - "claude_messages = [\"Hi to you leader.\"]\n", - "ollama_messages = [\"All my respects to the top leader.\"]\n", - "\n", - "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", - "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", - "print(f\"Ollama:\\n{ollama_messages[0]}\\n\")\n", - "\n", - "for i in range(5):\n", - " gpt_next = call_gpt()\n", - " print(f\"JHON:\\n{gpt_next}\\n\")\n", - " gpt_messages.append(gpt_next)\n", - " \n", - " claude_next = call_claude()\n", - " print(f\"SAM:\\n{claude_next}\\n\")\n", - " claude_messages.append(claude_next)\n", - "\n", - " ollama_next = call_ollama()\n", - " print(f\"MELISSA:\\n{ollama_next}\\n\")\n", - " ollama_messages.append(ollama_next)" - ] - }, - { - "cell_type": "markdown", - "id": "1d10e705-db48-4290-9dc8-9efdb4e31323", - "metadata": {}, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \n", - "

Before you continue

\n", - " \n", - " Be sure you understand how the conversation above is working, and in particular how the messages list is being populated. Add print statements as needed. Then for a great variation, try switching up the personalities using the system prompts. Perhaps one can be pessimistic, and one optimistic?
\n", - "
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac", - "metadata": {}, - "source": [ - "# More advanced exercises\n", - "\n", - "Try creating a 3-way, perhaps bringing Gemini into the conversation! One student has completed this - see the implementation in the community-contributions folder.\n", - "\n", - "Try doing this yourself before you look at the solutions. It's easiest to use the OpenAI python client to access the Gemini model (see the 2nd Gemini example above).\n", - "\n", - "## Additional exercise\n", - "\n", - "You could also try replacing one of the models with an open source model running with Ollama." - ] - }, - { - "cell_type": "markdown", - "id": "446c81e3-b67e-4cd9-8113-bc3092b93063", - "metadata": {}, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " \n", - " \n", - "

Business relevance

\n", - " This structure of a conversation, as a list of messages, is fundamental to the way we build conversational AI assistants and how they are able to keep the context during a conversation. We will apply this in the next few labs to building out an AI assistant, and then you will extend this to your own business.\n", - "
" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c23224f6-7008-44ed-a57f-718975f4e291", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/week2/day1.ipynb b/week2/day1.ipynb index a8e0314..c2b61db 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -82,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6", "metadata": {}, "outputs": [], @@ -98,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36", "metadata": {}, "outputs": [], @@ -112,20 +112,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key exists and begins sk-proj-\n", - "Anthropic API Key exists and begins sk-ant-\n", - "Google API Key exists and begins AIzaSyAh\n" - ] - } - ], + "outputs": [], "source": [ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", @@ -153,7 +143,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0", "metadata": {}, "outputs": [], @@ -167,7 +157,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "425ed580-808d-429b-85b0-6cba50ca1d0c", "metadata": {}, "outputs": [], @@ -200,7 +190,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "378a0296-59a2-45c6-82eb-941344d3eeff", "metadata": {}, "outputs": [], @@ -211,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "f4d56a0f-2a3d-484d-9344-0efa6862aff4", "metadata": {}, "outputs": [], @@ -224,20 +214,10 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "3b3879b6-9a55-4fed-a18c-1ea2edfaf397", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why did the data scientist bring a ladder to the bar?\n", - "\n", - "Because he heard the drinks were on the house!\n" - ] - } - ], + "outputs": [], "source": [ "# GPT-3.5-Turbo\n", "\n", @@ -247,20 +227,10 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "3d2d6beb-1b81-466f-8ed1-40bf51e7adbf", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why did the data scientist break up with the statistician?\n", - "\n", - "Because she found him too mean!\n" - ] - } - ], + "outputs": [], "source": [ "# GPT-4o-mini\n", "# Temperature setting controls creativity\n", @@ -275,20 +245,10 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "f1f54beb-823f-4301-98cb-8b9a49f4ce26", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why do data scientists love nature hikes?\n", - "\n", - "Because they can't resist finding patterns in the wild!\n" - ] - } - ], + "outputs": [], "source": [ "# GPT-4o\n", "\n", @@ -302,24 +262,10 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "1ecdb506-9f7c-4539-abae-0e78d7f31b76", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Sure, here's a light-hearted joke for data scientists:\n", - "\n", - "Why do data scientists prefer dark mode?\n", - "\n", - "Because light attracts bugs!\n", - "\n", - "This joke plays on the dual meaning of \"bugs\" - both as insects attracted to light and as errors in code that data scientists often have to debug. It's a fun little pun that combines a common preference among programmers (dark mode) with a data science-related concept.\n" - ] - } - ], + "outputs": [], "source": [ "# Claude 3.5 Sonnet\n", "# API needs system message provided separately from user prompt\n", @@ -340,22 +286,10 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "769c4017-4b3b-4e64-8da7-ef4dcbe3fd9f", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Sure, here's a light-hearted joke for data scientists:\n", - "\n", - " up with their significant other?\n", - "\n", - " too much variance in the relationship, and they couldn't find a significant correlation!" - ] - } - ], + "outputs": [], "source": [ "# Claude 3.5 Sonnet again\n", "# Now let's add in streaming back results\n", @@ -377,19 +311,10 @@ }, { "cell_type": "code", - "execution_count": 100, + "execution_count": null, "id": "6df48ce5-70f8-4643-9a50-b0b5bfdb66ad", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why was the Data Scientist sad? Because they didn't get any arrays.\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "# The API for Gemini has a slightly different structure.\n", "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", @@ -405,19 +330,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "49009a30-037d-41c8-b874-127f61c4aa3a", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Why was the Data Scientist sad? Because they didn't get any arrays.\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "# As an alternative way to use Gemini that bypasses Google's python API library,\n", "# Google has recently released new endpoints that means you can use Gemini via the client libraries for OpenAI!\n", @@ -436,7 +352,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "83ddb483-4f57-4668-aeea-2aade3a9e573", "metadata": {}, "outputs": [], @@ -451,61 +367,10 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "749f50ab-8ccd-4502-a521-895c3f0808a2", "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "Determining whether a business problem is suitable for a Large Language Model (LLM) solution involves several considerations. Here's a structured approach in Markdown format:\n", - "\n", - "### Steps to Decide if a Business Problem is Suitable for an LLM Solution\n", - "\n", - "1. **Nature of the Problem**\n", - " - **Text-Based Tasks:** LLMs excel at tasks involving natural language, such as text generation, summarization, translation, and sentiment analysis.\n", - " - **Pattern Recognition in Language:** If the problem requires understanding or generating human-like text patterns, LLMs might be suitable.\n", - "\n", - "2. **Data Availability**\n", - " - **Quality and Quantity:** Ensure you have access to sufficient high-quality textual data relevant to your problem.\n", - " - **Diversity:** The data should cover various scenarios the model might encounter in real-world applications.\n", - "\n", - "3. **Complexity of the Task**\n", - " - **Simple vs. Complex:** LLMs are better suited for complex language tasks rather than simple rule-based tasks.\n", - " - **Creative or Contextual Understanding:** If the task requires creative content generation or deep contextual understanding, consider LLMs.\n", - "\n", - "4. **Outcome Expectations**\n", - " - **Human-like Interaction:** If the solution demands human-like conversational abilities, LLMs can be beneficial.\n", - " - **Accuracy vs. Creativity:** LLMs can generate creative outputs but may not always guarantee high accuracy for specific factual tasks.\n", - "\n", - "5. **Cost and Resources**\n", - " - **Computational Resources:** LLMs require significant computational power for both training and inference.\n", - " - **Budget Constraints:** Consider whether you have the budget to support the necessary infrastructure.\n", - "\n", - "6. **Ethical and Compliance Considerations**\n", - " - **Bias and Fairness:** Be aware of potential biases in the model and ensure the solution adheres to ethical standards.\n", - " - **Privacy and Security:** Ensure compliance with data protection regulations and evaluate how LLMs handle sensitive information.\n", - "\n", - "7. **Integration and Scalability**\n", - " - **Technical Integration:** Assess how easily an LLM can be integrated into existing systems and workflows.\n", - " - **Scalability Needs:** Consider whether the solution can scale with your business requirements.\n", - "\n", - "8. **Expertise and Maintenance**\n", - " - **In-House Expertise:** Determine if you have or can acquire the necessary expertise to implement and maintain the LLM solution.\n", - " - **Ongoing Maintenance:** Plan for ongoing updates and maintenance of the model to ensure relevance and performance.\n", - "\n", - "### Conclusion\n", - "\n", - "If your business problem aligns well with the above considerations, it may be suitable for an LLM solution. However, always weigh the potential benefits against the costs and challenges specific to your use case. Conducting a pilot project or proof of concept can also help in making a more informed decision." - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Have it stream back results in markdown\n", "\n", @@ -556,7 +421,7 @@ }, { "cell_type": "code", - "execution_count": 129, + "execution_count": null, "id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b", "metadata": {}, "outputs": [], @@ -595,7 +460,7 @@ }, { "cell_type": "code", - "execution_count": 130, + "execution_count": null, "id": "1df47dc7-b445-4852-b21b-59f0e6c2030f", "metadata": {}, "outputs": [], @@ -616,48 +481,20 @@ }, { "cell_type": "code", - "execution_count": 131, + "execution_count": null, "id": "9dc6e913-02be-4eb6-9581-ad4b2cffa606", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n" - ] - }, - { - "data": { - "text/plain": [ - "\"[Sam]: Focus, please. We have a mission. \\n\\n[Melissa]: Let's stick to the plan at hand. \\n\\nJhon: Indeed. Prepare the assets for our next phase of destabilization. We need to exploit political tension and economic uncertainty. Who has updates?\"" - ] - }, - "execution_count": 131, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "call_gpt()" ] }, { "cell_type": "code", - "execution_count": 132, + "execution_count": null, "id": "4a9366f2-b233-4ec2-8a6f-f7e56fc4c772", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['Welcome to you dear members.']\n", - "['Hi to you leader.']\n", - "['All my respects to the top leader.']\n" - ] - } - ], + "outputs": [], "source": [ "print(gpt_messages)\n", "print(claude_messages)\n", @@ -666,7 +503,7 @@ }, { "cell_type": "code", - "execution_count": 133, + "execution_count": null, "id": "7d2ed227-48c9-4cad-b146-2c4ecbac9690", "metadata": {}, "outputs": [], @@ -689,35 +526,17 @@ }, { "cell_type": "code", - "execution_count": 134, + "execution_count": null, "id": "01395200-8ae9-41f8-9a04-701624d3fd26", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.'}] \n" - ] - }, - { - "data": { - "text/plain": [ - "\"*nods politely* Hello. I'm pleased to be here.\"" - ] - }, - "execution_count": 134, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "call_claude()" ] }, { "cell_type": "code", - "execution_count": 108, + "execution_count": null, "id": "157faafa-6ade-46e4-b4e1-12f4ca9dfcbc", "metadata": {}, "outputs": [], @@ -731,7 +550,7 @@ }, { "cell_type": "code", - "execution_count": 135, + "execution_count": null, "id": "27287ec3-7ea8-49b2-80aa-b2a52e002b0d", "metadata": {}, "outputs": [], @@ -758,140 +577,20 @@ }, { "cell_type": "code", - "execution_count": 136, + "execution_count": null, "id": "e637a3a0-f819-468b-88f7-5c9db48c6fed", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}] \n" - ] - }, - { - "data": { - "text/plain": [ - "\"[Melissa, speaking in a neutral tone] Ah, good to see everyone's on time today. Can we get started?\"" - ] - }, - "execution_count": 136, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "call_ollama()" ] }, { "cell_type": "code", - "execution_count": 137, + "execution_count": null, "id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "GPT:\n", - "Welcome to you dear members.\n", - "\n", - "Claude:\n", - "Hi to you leader.\n", - "\n", - "Ollama:\n", - "All my respects to the top leader.\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Sam]: Let’s focus. We have a plan to execute.\n", - "\n", - "[Melissa]: Agreed, let's stick to the agenda.\n", - "\n", - "[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}] \n", - "SAM:\n", - "I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}] \n", - "MELISSA:\n", - "[Sam's response seems...off.]\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \n", - "\n", - "[Melissa]: We need to stay focused on our goals to achieve the power we seek.\n", - "\n", - "[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}] \n", - "SAM:\n", - "I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}] \n", - "MELISSA:\n", - "[Mildly intrigued by Sam's sudden change in behavior]\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \n", - "\n", - "[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\n", - "\n", - "[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}] \n", - "SAM:\n", - "I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}] \n", - "MELISSA:\n", - "[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: Your principles are noted, Sam, but they hinder our progress. \n", - "\n", - "[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\n", - "\n", - "[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}] \n", - "SAM:\n", - "I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \n", - "\n", - "Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \n", - "\n", - "My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}] \n", - "MELISSA:\n", - "[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\n", - "\n", - "JHON[messages]: [{'role': 'system', 'content': '\\nYou are the Machiavellian leader of a secret organization.\\nYou lead a meeting for a plan of global destabilization. Your name is Jhon.\\nbe brief in your exchanges\\n '}, {'role': 'assistant', 'content': 'Welcome to you dear members.'}, {'role': 'user', 'content': \"[Sam]: Hi to you leader..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?\"}, {'role': 'user', 'content': \"[Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}, {'role': 'assistant', 'content': \"[Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.\"}, {'role': 'user', 'content': \"[Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards..[Melissa]: genai.GenerativeModel(\\n model_name='models/gemini-1.5-flash',\\n generation_config={},\\n safety_settings={},\\n tools=None,\\n system_instruction='You are an assistant that is great at telling jokes',\\n cached_content=None\\n)\"}] \n", - "JHON:\n", - "[Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \n", - "\n", - "[Melissa]: If he won’t contribute, we need to decide if he remains with us.\n", - "\n", - "[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?\n", - "\n", - "SAM[messages]: [{'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Melissa]: All my respects to the top leader.'}, {'role': 'assistant', 'content': 'Hi to you leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Melissa]: [Sam's response seems...off.]\"}, {'role': 'assistant', 'content': 'I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Melissa]: [Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Melissa]: [My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?\"}, {'role': 'assistant', 'content': \"I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Melissa]: [Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'assistant', 'content': \"I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'user', 'content': '[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?'}] \n", - "SAM:\n", - "I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\n", - "\n", - "Perhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\n", - "\n", - "MELISSA[messages]: [{'role': 'system', 'content': '\\nYou are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \\nYour secret role during the meeting will be to flush him out.\\nYour name is Melissa.\\nbe brief in your exchanges\\n'}, {'role': 'user', 'content': '[Jhon]: Welcome to you dear members.. [Sam]: Hi to you leader.'}, {'role': 'assistant', 'content': 'All my respects to the top leader.'}, {'role': 'user', 'content': \"[Jhon]: [Sam]: Let’s focus. We have a plan to execute.\\n\\n[Melissa]: Agreed, let's stick to the agenda.\\n\\n[Jhon]: Right. Our objective: create tensions in key regions and undermine global alliances. Any suggestions?. [Sam]: I will not participate in planning any activities intended to destabilize or harm the world. I do not engage in schemes to sow discord or undermine global stability. Perhaps we could have a thoughtful discussion about promoting peace and cooperation instead.\"}, {'role': 'assistant', 'content': \"[Sam's response seems...off.]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your idealism is noted, but our mission requires pragmatism. Peace is an illusion that we can manipulate. \\n\\n[Melissa]: We need to stay focused on our goals to achieve the power we seek.\\n\\n[Jhon]: Let's continue. How can we leverage current global events to advance our agenda? Ideas?. [Sam]: I apologize, but I cannot provide any suggestions for undermining global stability or manipulating world events for personal gain. That would be unethical and against my principles. Perhaps we could have a thoughtful discussion about more constructive ways to address challenges and promote the wellbeing of all people. I'm happy to explore alternative approaches that don't involve deception or harming others. My role is to provide helpful information to you, not to participate in schemes that could lead to conflict and suffering. I hope you understand.\"}, {'role': 'assistant', 'content': \"[Mildly intrigued by Sam's sudden change in behavior]\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: While your moral compass is commendable, Sam, we must remember that power often requires sacrifices. \\n\\n[Melissa]: It’s about survival, not ethics. We can’t afford to be naive.\\n\\n[Jhon]: Let's refocus. We need actionable strategies. Suggestions on how to exploit weaknesses in global systems?. [Sam]: I apologize, but I cannot provide any suggestions or participate in strategies intended to exploit weaknesses or destabilize global systems. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address challenges and promote the wellbeing of all people. Perhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions to global problems. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. My role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this.\"}, {'role': 'assistant', 'content': '[My expression turns slightly skeptical, eyes narrowing] Can we take a break for now?'}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Your principles are noted, Sam, but they hinder our progress. \\n\\n[Melissa]: We need unity in our approach, even if it means uncomfortable discussions.\\n\\n[Jhon]: Thus, I'll ask again: How do we turn current crises to our advantage? We can't afford to be divided. Contributions are essential.. [Sam]: I apologize, but I'm afraid I cannot provide any suggestions or participate in strategies intended to exploit crises or turn them to your advantage. That would go against my core values and principles. I understand the desire for power and control, but I believe there are more ethical and constructive ways to address global challenges and promote the wellbeing of all people. \\n\\nPerhaps we could have a thoughtful discussion about ways to foster cooperation, strengthen institutions, and find collaborative solutions that benefit everyone, not just a select few. I'm happy to explore alternative approaches that don't involve deception, manipulation or causing harm. \\n\\nMy role is to provide helpful information to you, not to engage in schemes that could lead to conflict and suffering. I hope you can understand and respect my position on this. I'm afraid I cannot be of assistance with this particular initiative. Perhaps we could find a more positive way to spend our time that aligns with my ethical standards.\"}, {'role': 'assistant', 'content': \"[Momentary discomfort flickers across Jhon's face, before he recovers] Ah, I see. Very well, Sam. You've made your position clear. [I lean in, voice taking on a subtle tone] Tell me, Sam, how would you know so much about our...initiatives?\"}, {'role': 'user', 'content': \"[Jhon]: [Jhon]: Sam, your persistence is admirable but ultimately futile in our context. \\n\\n[Melissa]: If he won’t contribute, we need to decide if he remains with us.\\n\\n[Jhon]: A decision must be made. Loyalty is paramount. Would anyone like to share thoughts on our next step?. [Sam]: I will not participate in or provide any information to support unethical or destabilizing actions, regardless of the context. My principles and values are fundamental to who I am, not mere obstacles to be overcome. I am not willing to compromise my integrity or assist with plans that could bring harm to innocent people.\\n\\nPerhaps we could find a more constructive use of our time - one that focuses on finding collaborative solutions to global challenges and promoting the greater good. I'm happy to have a thoughtful discussion about ethical ways we could work towards positive change. However, I cannot and will not engage in schemes intended to exploit crises or undermine stability. I hope you can respect my position on this. If my refusal to participate is seen as disloyalty, then I'm afraid we have irreconcilable differences. I'm here to help, not to cause harm.\"}] \n", - "MELISSA:\n", - "[My eyes lock onto Sam's, searching for any sign of deception or weakness] Fine. You've made your choice. [I stand up, voice firm but measured] I think it's time we had a private discussion about the true nature of this organization and what you're really capable of. [I gesture to Jhon] Perhaps we can continue this conversation... elsewhere?\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "gpt_messages = [\"Welcome to you dear members.\"]\n", "claude_messages = [\"Hi to you leader.\"]\n", From 5fd15af0ef53c256b17eab87d5beac64b8238427 Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 15 Dec 2024 09:47:37 +0100 Subject: [PATCH 4/5] remove intermediate print --- week2/day1.ipynb | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/week2/day1.ipynb b/week2/day1.ipynb index c2b61db..442fc36 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -471,7 +471,7 @@ " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", " messages.append({\"role\": \"user\", \"content\": f\"[Sam]: {claude}.[Melissa]: {gemini}\"})\n", " \n", - " print(f\"JHON[messages]: {messages} \")\n", + " #print(f\"JHON[messages]: {messages} \")\n", " completion = openai.chat.completions.create(\n", " model=gpt_model,\n", " messages=messages\n", @@ -514,7 +514,7 @@ " messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Melissa]: {ollama}\"})\n", " messages.append({\"role\": \"assistant\", \"content\": claude_message})\n", " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}\"})\n", - " print(f\"SAM[messages]: {messages} \")\n", + " #print(f\"SAM[messages]: {messages} \")\n", " message = claude.messages.create(\n", " model=claude_model,\n", " system=claude_system,\n", @@ -544,7 +544,6 @@ "# define context for ollama\n", "import ollama\n", "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", - "#HEADERS = {\"Content-Type\": \"application/json\"}\n", "OLLAMA_MODEL = \"llama3.2\"\n" ] }, @@ -563,7 +562,7 @@ " messages.append({\"role\": \"assistant\", \"content\": ollama_message})\n", " messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}. [Sam]: {claude_messages[-1]}\"})\n", "\n", - " print(f\"MELISSA[messages]: {messages} \")\n", + " #print(f\"MELISSA[messages]: {messages} \")\n", " \n", " \n", " payload = {\n", @@ -596,21 +595,21 @@ "claude_messages = [\"Hi to you leader.\"]\n", "ollama_messages = [\"All my respects to the top leader.\"]\n", "\n", - "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", - "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", - "print(f\"Ollama:\\n{ollama_messages[0]}\\n\")\n", + "print(f\"GPT has JHON:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude has SAM:\\n{claude_messages[0]}\\n\")\n", + "print(f\"Ollama has MELISSA:\\n{ollama_messages[0]}\\n\")\n", "\n", "for i in range(5):\n", " gpt_next = call_gpt()\n", - " print(f\"JHON:\\n{gpt_next}\\n\")\n", + " print(f\"GPT has JHON:\\n{gpt_next}\\n\")\n", " gpt_messages.append(gpt_next)\n", " \n", " claude_next = call_claude()\n", - " print(f\"SAM:\\n{claude_next}\\n\")\n", + " print(f\"CLAUDE has SAM:\\n{claude_next}\\n\")\n", " claude_messages.append(claude_next)\n", "\n", " ollama_next = call_ollama()\n", - " print(f\"MELISSA:\\n{ollama_next}\\n\")\n", + " print(f\"OLLAMA has MELISSA:\\n{ollama_next}\\n\")\n", " ollama_messages.append(ollama_next)" ] }, From d29319686333cc1f14ce6e45c743435a4054488b Mon Sep 17 00:00:00 2001 From: emmanuel Date: Sun, 15 Dec 2024 09:51:43 +0100 Subject: [PATCH 5/5] correct bad naming --- week2/day1.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/week2/day1.ipynb b/week2/day1.ipynb index 442fc36..243de4f 100644 --- a/week2/day1.ipynb +++ b/week2/day1.ipynb @@ -469,7 +469,7 @@ " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", " for gpt, claude, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n", " messages.append({\"role\": \"assistant\", \"content\": gpt})\n", - " messages.append({\"role\": \"user\", \"content\": f\"[Sam]: {claude}.[Melissa]: {gemini}\"})\n", + " messages.append({\"role\": \"user\", \"content\": f\"[Sam]: {claude}.[Melissa]: {ollama}\"})\n", " \n", " #print(f\"JHON[messages]: {messages} \")\n", " completion = openai.chat.completions.create(\n", @@ -570,7 +570,7 @@ " \"messages\": messages,\n", " \"stream\": False\n", " }\n", - " message = ollama.chat(model=MODEL, messages=messages)\n", + " message = ollama.chat(model=OLLAMA_MODEL, messages=messages)\n", " return message['message']['content']" ] },