Browse Source

Merge d293196863 into f83e5cc4e6

pull/38/merge
bacosdev 5 months ago committed by GitHub
parent
commit
0e3f723b1d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. BIN
      .env.swp
  2. 508
      week1/Guide to Jupyter.ipynb
  3. 2
      week1/community-contributions/day1-article-pdf-reader.ipynb
  4. 157
      week1/community-contributions/day1-selenium-for-javascript-sites.ipynb
  5. 398
      week1/day1.ipynb
  6. 146
      week1/day2 EXERCISE.ipynb
  7. 3674
      week1/day5.ipynb
  8. 170
      week1/solutions/week1 SOLUTION.ipynb
  9. 48
      week1/troubleshooting.ipynb
  10. 258
      week1/week1 EXERCISE.ipynb
  11. 128
      week2/day1.ipynb

BIN
.env.swp

Binary file not shown.

508
week1/Guide to Jupyter.ipynb

@ -54,7 +54,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "585eb9c1-85ee-4c27-8dc2-b4d8d022eda0",
"metadata": {},
"outputs": [],
@ -78,10 +78,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "a067d2b1-53d5-4aeb-8a3c-574d39ff654a",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"My favorite fruit is bananas\n"
]
}
],
"source": [
"# Use the variable\n",
"\n",
@ -197,7 +205,7 @@
},
{
"cell_type": "markdown",
"id": "eac060f2-7a71-46e7-8235-b6ad0a76f5f8",
"id": "c6e82904-2132-408e-9985-e2b95e7f0794",
"metadata": {},
"source": [
"# Using markdown\n",
@ -245,10 +253,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "82042fc5-a907-4381-a4b8-eb9386df19cd",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"'ls' n'est pas reconnu en tant que commande interne\n",
"ou externe, un programme ex‚cutable ou un fichier de commandes.\n"
]
}
],
"source": [
"# list the current directory\n",
"\n",
@ -257,10 +274,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "4fc3e3da-8a55-40cc-9706-48bf12a0e20e",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Envoi d'une requˆte 'ping' sur cnn.com [151.101.3.5] avec 32 octets de donn‚esÿ:\n",
"R‚ponse de 151.101.3.5ÿ: octets=32 temps=16 ms TTL=59\n",
"R‚ponse de 151.101.3.5ÿ: octets=32 temps=14 ms TTL=59\n",
"R‚ponse de 151.101.3.5ÿ: octets=32 temps=17 ms TTL=59\n",
"R‚ponse de 151.101.3.5ÿ: octets=32 temps=17 ms TTL=59\n",
"\n",
"Statistiques Ping pour 151.101.3.5:\n",
" Paquetsÿ: envoy‚s = 4, re‡us = 4, perdus = 0 (perte 0%),\n",
"Dur‚e approximative des boucles en millisecondes :\n",
" Minimum = 14ms, Maximum = 17ms, Moyenne = 16ms\n"
]
}
],
"source": [
"# ping cnn.com - press the stop button in the toolbar when you're bored\n",
"\n",
@ -269,10 +304,430 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "a58e9462-89a2-4b4f-b4aa-51c4bd9f796b",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"^C\n",
"Channels:\n",
" - conda-forge\n",
" - defaults\n",
"Platform: win-64\n",
"Collecting package metadata (repodata.json): ...working... done\n",
"Solving environment: ...working... done\n",
"Installing pip dependencies: \\ Ran pip subprocess with arguments:\n",
"['C:\\\\Users\\\\ebaba\\\\.conda\\\\envs\\\\llms\\\\python.exe', '-m', 'pip', 'install', '-U', '-r', 'D:\\\\Workspaces\\\\formation\\\\udemy\\\\ed-donner\\\\llm_engineering\\\\workspace\\\\llm_engineering\\\\condaenv.ckoo6cll.requirements.txt', '--exists-action=b']\n",
"Pip subprocess output:\n",
"Requirement already satisfied: beautifulsoup4 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 1)) (4.12.3)\n",
"\n",
"Requirement already satisfied: plotly in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 2)) (5.24.1)\n",
"\n",
"Requirement already satisfied: bitsandbytes in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (0.45.0)\n",
"\n",
"Requirement already satisfied: transformers in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (4.47.0)\n",
"\n",
"Requirement already satisfied: sentence-transformers in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (3.3.1)\n",
"\n",
"Requirement already satisfied: datasets in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (3.2.0)\n",
"\n",
"Requirement already satisfied: accelerate in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 7)) (1.2.0)\n",
"\n",
"Requirement already satisfied: openai in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.57.2)\n",
"\n",
"Requirement already satisfied: anthropic in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 9)) (0.40.0)\n",
"\n",
"Requirement already satisfied: google-generativeai in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.8.3)\n",
"\n",
"Requirement already satisfied: gradio in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (5.8.0)\n",
"\n",
"Requirement already satisfied: gensim in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 12)) (4.3.3)\n",
"\n",
"Requirement already satisfied: modal in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.68.1)\n",
"\n",
"Collecting modal (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13))\n",
"\n",
" Downloading modal-0.68.2-py3-none-any.whl.metadata (2.3 kB)\n",
"\n",
"Requirement already satisfied: ollama in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 14)) (0.4.4)\n",
"\n",
"Requirement already satisfied: psutil in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 15)) (6.1.0)\n",
"\n",
"Requirement already satisfied: setuptools in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 16)) (75.6.0)\n",
"\n",
"Requirement already satisfied: speedtest-cli in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 17)) (2.1.3)\n",
"\n",
"Requirement already satisfied: langchain in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (0.3.11)\n",
"\n",
"Requirement already satisfied: langchain-core in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 19)) (0.3.24)\n",
"\n",
"Requirement already satisfied: langchain-text-splitters in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 20)) (0.3.2)\n",
"\n",
"Requirement already satisfied: langchain-openai in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 21)) (0.2.12)\n",
"\n",
"Requirement already satisfied: langchain-chroma in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.1.4)\n",
"\n",
"Requirement already satisfied: langchain-community in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.3.11)\n",
"\n",
"Requirement already satisfied: faiss-cpu in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 24)) (1.9.0.post1)\n",
"\n",
"Requirement already satisfied: feedparser in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 25)) (6.0.11)\n",
"\n",
"Requirement already satisfied: twilio in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 26)) (9.3.8)\n",
"\n",
"Requirement already satisfied: pydub in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from -r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 27)) (0.25.1)\n",
"\n",
"Requirement already satisfied: soupsieve>1.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from beautifulsoup4->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 1)) (2.5)\n",
"\n",
"Requirement already satisfied: tenacity>=6.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from plotly->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 2)) (9.0.0)\n",
"\n",
"Requirement already satisfied: packaging in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from plotly->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 2)) (24.2)\n",
"\n",
"Requirement already satisfied: torch in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (2.3.1)\n",
"\n",
"Requirement already satisfied: numpy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (1.26.4)\n",
"\n",
"Requirement already satisfied: typing_extensions>=4.8.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (4.12.2)\n",
"\n",
"Requirement already satisfied: filelock in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (3.16.1)\n",
"\n",
"Requirement already satisfied: huggingface-hub<1.0,>=0.24.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (0.26.5)\n",
"\n",
"Requirement already satisfied: pyyaml>=5.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (6.0.2)\n",
"\n",
"Requirement already satisfied: regex!=2019.12.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (2024.11.6)\n",
"\n",
"Requirement already satisfied: requests in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (2.32.3)\n",
"\n",
"Requirement already satisfied: tokenizers<0.22,>=0.21 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (0.21.0)\n",
"\n",
"Requirement already satisfied: safetensors>=0.4.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (0.4.5)\n",
"\n",
"Requirement already satisfied: tqdm>=4.27 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (4.67.1)\n",
"\n",
"Requirement already satisfied: scikit-learn in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (1.6.0)\n",
"\n",
"Requirement already satisfied: scipy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (1.13.1)\n",
"\n",
"Requirement already satisfied: Pillow in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (11.0.0)\n",
"\n",
"Requirement already satisfied: pyarrow>=15.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (18.1.0)\n",
"\n",
"Requirement already satisfied: dill<0.3.9,>=0.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (0.3.8)\n",
"\n",
"Requirement already satisfied: pandas in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2.2.3)\n",
"\n",
"Requirement already satisfied: xxhash in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (3.5.0)\n",
"\n",
"Requirement already satisfied: multiprocess<0.70.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (0.70.16)\n",
"\n",
"Requirement already satisfied: fsspec<=2024.9.0,>=2023.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from fsspec[http]<=2024.9.0,>=2023.1.0->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2024.9.0)\n",
"\n",
"Requirement already satisfied: aiohttp in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (3.11.10)\n",
"\n",
"Requirement already satisfied: anyio<5,>=3.5.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (4.7.0)\n",
"\n",
"Requirement already satisfied: distro<2,>=1.7.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.9.0)\n",
"\n",
"Requirement already satisfied: httpx<1,>=0.23.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.27.2)\n",
"\n",
"Requirement already satisfied: jiter<1,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.8.2)\n",
"\n",
"Requirement already satisfied: pydantic<3,>=1.9.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (2.10.3)\n",
"\n",
"Requirement already satisfied: sniffio in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.3.1)\n",
"\n",
"Requirement already satisfied: google-ai-generativelanguage==0.6.10 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.6.10)\n",
"\n",
"Requirement already satisfied: google-api-core in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (2.24.0)\n",
"\n",
"Requirement already satisfied: google-api-python-client in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (2.155.0)\n",
"\n",
"Requirement already satisfied: google-auth>=2.15.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (2.37.0)\n",
"\n",
"Requirement already satisfied: protobuf in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (5.28.2)\n",
"\n",
"Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-ai-generativelanguage==0.6.10->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (1.25.0)\n",
"\n",
"Requirement already satisfied: aiofiles<24.0,>=22.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (23.2.1)\n",
"\n",
"Requirement already satisfied: fastapi<1.0,>=0.115.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.115.6)\n",
"\n",
"Requirement already satisfied: ffmpy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.4.0)\n",
"\n",
"Requirement already satisfied: gradio-client==1.5.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (1.5.1)\n",
"\n",
"Requirement already satisfied: jinja2<4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (3.1.4)\n",
"\n",
"Requirement already satisfied: markupsafe~=2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (2.1.5)\n",
"\n",
"Requirement already satisfied: orjson~=3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (3.10.12)\n",
"\n",
"Requirement already satisfied: python-multipart>=0.0.18 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.0.19)\n",
"\n",
"Requirement already satisfied: ruff>=0.2.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.8.2)\n",
"\n",
"Requirement already satisfied: safehttpx<0.2.0,>=0.1.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.1.6)\n",
"\n",
"Requirement already satisfied: semantic-version~=2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (2.10.0)\n",
"\n",
"Requirement already satisfied: starlette<1.0,>=0.40.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.41.3)\n",
"\n",
"Requirement already satisfied: tomlkit<0.14.0,>=0.12.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.13.2)\n",
"\n",
"Requirement already satisfied: typer<1.0,>=0.12 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.15.1)\n",
"\n",
"Requirement already satisfied: uvicorn>=0.14.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (0.32.1)\n",
"\n",
"Requirement already satisfied: websockets<15.0,>=10.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gradio-client==1.5.1->gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (14.1)\n",
"\n",
"Requirement already satisfied: smart-open>=1.8.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from gensim->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 12)) (7.0.5)\n",
"\n",
"Requirement already satisfied: certifi in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (2024.8.30)\n",
"\n",
"Requirement already satisfied: click>=8.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (8.1.7)\n",
"\n",
"Requirement already satisfied: grpclib==0.4.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.4.7)\n",
"\n",
"Requirement already satisfied: rich>=12.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (13.9.4)\n",
"\n",
"Requirement already satisfied: synchronicity~=0.9.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.9.6)\n",
"\n",
"Requirement already satisfied: toml in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.10.2)\n",
"\n",
"Requirement already satisfied: types-certifi in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (2021.10.8.3)\n",
"\n",
"Requirement already satisfied: types-toml in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.10.8.20240310)\n",
"\n",
"Requirement already satisfied: watchfiles in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (1.0.0)\n",
"\n",
"Requirement already satisfied: h2<5,>=3.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (4.1.0)\n",
"\n",
"Requirement already satisfied: multidict in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (6.1.0)\n",
"\n",
"Requirement already satisfied: SQLAlchemy<3,>=1.4 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (2.0.36)\n",
"\n",
"Requirement already satisfied: langsmith<0.3,>=0.1.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (0.2.2)\n",
"\n",
"Requirement already satisfied: jsonpatch<2.0,>=1.33 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-core->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 19)) (1.33)\n",
"\n",
"Requirement already satisfied: tiktoken<1,>=0.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 21)) (0.8.0)\n",
"\n",
"Requirement already satisfied: chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.5.20)\n",
"\n",
"Requirement already satisfied: dataclasses-json<0.7,>=0.5.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.6.7)\n",
"\n",
"Requirement already satisfied: httpx-sse<0.5.0,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.4.0)\n",
"\n",
"Requirement already satisfied: pydantic-settings<3.0.0,>=2.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (2.6.1)\n",
"\n",
"Requirement already satisfied: sgmllib3k in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from feedparser->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 25)) (1.0.0)\n",
"\n",
"Requirement already satisfied: PyJWT<3.0.0,>=2.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from twilio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 26)) (2.10.1)\n",
"\n",
"Requirement already satisfied: aiohttp-retry==2.8.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from twilio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 26)) (2.8.3)\n",
"\n",
"Requirement already satisfied: aiohappyeyeballs>=2.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2.4.4)\n",
"\n",
"Requirement already satisfied: aiosignal>=1.1.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (1.3.1)\n",
"\n",
"Requirement already satisfied: attrs>=17.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (24.2.0)\n",
"\n",
"Requirement already satisfied: frozenlist>=1.1.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (1.5.0)\n",
"\n",
"Requirement already satisfied: propcache>=0.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (0.2.1)\n",
"\n",
"Requirement already satisfied: yarl<2.0,>=1.17.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from aiohttp->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (1.18.3)\n",
"\n",
"Requirement already satisfied: idna>=2.8 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from anyio<5,>=3.5.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (3.10)\n",
"\n",
"Requirement already satisfied: build>=1.0.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.2.2.post1)\n",
"\n",
"Requirement already satisfied: chroma-hnswlib==0.7.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.7.6)\n",
"\n",
"Requirement already satisfied: posthog>=2.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.6.5)\n",
"\n",
"Requirement already satisfied: onnxruntime>=1.14.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.19.2)\n",
"\n",
"Requirement already satisfied: opentelemetry-api>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n",
"\n",
"Requirement already satisfied: opentelemetry-exporter-otlp-proto-grpc>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n",
"\n",
"Requirement already satisfied: opentelemetry-instrumentation-fastapi>=0.41b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n",
"\n",
"Requirement already satisfied: opentelemetry-sdk>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n",
"\n",
"Requirement already satisfied: pypika>=0.48.9 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.48.9)\n",
"\n",
"Requirement already satisfied: overrides>=7.3.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (7.7.0)\n",
"\n",
"Requirement already satisfied: importlib-resources in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (6.4.5)\n",
"\n",
"Requirement already satisfied: grpcio>=1.58.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.68.1)\n",
"\n",
"Requirement already satisfied: bcrypt>=4.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (4.2.1)\n",
"\n",
"Requirement already satisfied: kubernetes>=28.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (31.0.0)\n",
"\n",
"Requirement already satisfied: mmh3>=4.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (5.0.1)\n",
"\n",
"Requirement already satisfied: colorama in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from click>=8.1.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.4.6)\n",
"\n",
"Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (3.23.1)\n",
"\n",
"Requirement already satisfied: typing-inspect<1,>=0.4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from dataclasses-json<0.7,>=0.5.7->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (0.9.0)\n",
"\n",
"Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-core->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (1.66.0)\n",
"\n",
"Requirement already satisfied: cachetools<6.0,>=2.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (5.5.0)\n",
"\n",
"Requirement already satisfied: pyasn1-modules>=0.2.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.4.1)\n",
"\n",
"Requirement already satisfied: rsa<5,>=3.1.4 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (4.9)\n",
"\n",
"Requirement already satisfied: httpcore==1.* in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from httpx<1,>=0.23.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (1.0.7)\n",
"\n",
"Requirement already satisfied: h11<0.15,>=0.13 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.14.0)\n",
"\n",
"Requirement already satisfied: jsonpointer>=1.9 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from jsonpatch<2.0,>=1.33->langchain-core->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 19)) (3.0.0)\n",
"\n",
"Requirement already satisfied: requests-toolbelt<2.0.0,>=1.0.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from langsmith<0.3,>=0.1.17->langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (1.0.0)\n",
"\n",
"Requirement already satisfied: python-dateutil>=2.8.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pandas->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2.9.0.post0)\n",
"\n",
"Requirement already satisfied: pytz>=2020.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pandas->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2024.1)\n",
"\n",
"Requirement already satisfied: tzdata>=2022.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pandas->datasets->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 6)) (2024.2)\n",
"\n",
"Requirement already satisfied: annotated-types>=0.6.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pydantic<3,>=1.9.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (0.7.0)\n",
"\n",
"Requirement already satisfied: pydantic-core==2.27.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pydantic<3,>=1.9.0->openai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 8)) (2.27.1)\n",
"\n",
"Requirement already satisfied: python-dotenv>=0.21.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pydantic-settings<3.0.0,>=2.4.0->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (1.0.1)\n",
"\n",
"Requirement already satisfied: charset_normalizer<4,>=2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from requests->transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (3.4.0)\n",
"\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from requests->transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 4)) (2.2.3)\n",
"\n",
"Requirement already satisfied: markdown-it-py>=2.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from rich>=12.0.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (3.0.0)\n",
"\n",
"Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from rich>=12.0.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (2.18.0)\n",
"\n",
"Requirement already satisfied: wrapt in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from smart-open>=1.8.1->gensim->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 12)) (1.17.0)\n",
"\n",
"Requirement already satisfied: greenlet!=0.4.17 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from SQLAlchemy<3,>=1.4->langchain->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 18)) (3.1.1)\n",
"\n",
"Requirement already satisfied: sigtools>=4.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from synchronicity~=0.9.6->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (4.0.1)\n",
"\n",
"Requirement already satisfied: sympy in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from torch->bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (1.13.3)\n",
"\n",
"Requirement already satisfied: networkx in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from torch->bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (3.4.2)\n",
"\n",
"Requirement already satisfied: shellingham>=1.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from typer<1.0,>=0.12->gradio->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 11)) (1.5.4)\n",
"\n",
"Requirement already satisfied: httplib2<1.dev0,>=0.19.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.22.0)\n",
"\n",
"Requirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.2.0)\n",
"\n",
"Requirement already satisfied: uritemplate<5,>=3.0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (4.1.1)\n",
"\n",
"Requirement already satisfied: joblib>=1.2.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from scikit-learn->sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (1.4.2)\n",
"\n",
"Requirement already satisfied: threadpoolctl>=3.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from scikit-learn->sentence-transformers->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 5)) (3.5.0)\n",
"\n",
"Requirement already satisfied: pyproject_hooks in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from build>=1.0.3->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.2.0)\n",
"\n",
"Requirement already satisfied: grpcio-status<2.0.dev0,>=1.33.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.10.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,<3.0.0dev,>=1.34.1->google-ai-generativelanguage==0.6.10->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (1.68.1)\n",
"\n",
"Requirement already satisfied: hyperframe<7,>=6.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from h2<5,>=3.1.0->grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (6.0.1)\n",
"\n",
"Requirement already satisfied: hpack<5,>=4.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from h2<5,>=3.1.0->grpclib==0.4.7->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (4.0.0)\n",
"\n",
"Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from httplib2<1.dev0,>=0.19.0->google-api-python-client->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (3.2.0)\n",
"\n",
"Requirement already satisfied: six>=1.9.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.17.0)\n",
"\n",
"Requirement already satisfied: websocket-client!=0.40.0,!=0.41.*,!=0.42.*,>=0.32.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.8.0)\n",
"\n",
"Requirement already satisfied: requests-oauthlib in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (2.0.0)\n",
"\n",
"Requirement already satisfied: oauthlib>=3.2.2 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.2.2)\n",
"\n",
"Requirement already satisfied: durationpy>=0.7 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from kubernetes>=28.1.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.9)\n",
"\n",
"Requirement already satisfied: mdurl~=0.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from markdown-it-py>=2.2.0->rich>=12.0.0->modal->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 13)) (0.1.2)\n",
"\n",
"Requirement already satisfied: coloredlogs in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (15.0.1)\n",
"\n",
"Requirement already satisfied: flatbuffers in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (24.3.25)\n",
"\n",
"Requirement already satisfied: deprecated>=1.2.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-api>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.2.15)\n",
"\n",
"Requirement already satisfied: importlib-metadata<=8.5.0,>=6.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-api>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (8.5.0)\n",
"\n",
"Requirement already satisfied: opentelemetry-exporter-otlp-proto-common==1.29.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n",
"\n",
"Requirement already satisfied: opentelemetry-proto==1.29.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-exporter-otlp-proto-grpc>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.29.0)\n",
"\n",
"Requirement already satisfied: opentelemetry-instrumentation-asgi==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n",
"\n",
"Requirement already satisfied: opentelemetry-instrumentation==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n",
"\n",
"Requirement already satisfied: opentelemetry-semantic-conventions==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n",
"\n",
"Requirement already satisfied: opentelemetry-util-http==0.50b0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.50b0)\n",
"\n",
"Requirement already satisfied: asgiref~=3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from opentelemetry-instrumentation-asgi==0.50b0->opentelemetry-instrumentation-fastapi>=0.41b0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.8.1)\n",
"\n",
"Requirement already satisfied: monotonic>=1.5 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from posthog>=2.4.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (1.5)\n",
"\n",
"Requirement already satisfied: backoff>=1.10.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from posthog>=2.4.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (2.2.1)\n",
"\n",
"Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from pyasn1-modules>=0.2.1->google-auth>=2.15.0->google-generativeai->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 10)) (0.6.1)\n",
"\n",
"Requirement already satisfied: mypy-extensions>=0.3.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json<0.7,>=0.5.7->langchain-community->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 23)) (1.0.0)\n",
"\n",
"Requirement already satisfied: httptools>=0.6.3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from uvicorn[standard]>=0.18.3->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.6.4)\n",
"\n",
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from sympy->torch->bitsandbytes->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 3)) (1.3.0)\n",
"\n",
"Requirement already satisfied: zipp>=3.20 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from importlib-metadata<=8.5.0,>=6.0->opentelemetry-api>=1.2.0->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (3.21.0)\n",
"\n",
"Requirement already satisfied: humanfriendly>=9.1 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from coloredlogs->onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (10.0)\n",
"\n",
"Requirement already satisfied: pyreadline3 in c:\\users\\ebaba\\.conda\\envs\\llms\\lib\\site-packages (from humanfriendly>=9.1->coloredlogs->onnxruntime>=1.14.1->chromadb!=0.5.4,!=0.5.5,<0.6.0,>=0.4.0->langchain-chroma->-r D:\\Workspaces\\formation\\udemy\\ed-donner\\llm_engineering\\workspace\\llm_engineering\\condaenv.ckoo6cll.requirements.txt (line 22)) (0.0.0)\n",
"\n",
"Downloading modal-0.68.2-py3-none-any.whl (499 kB)\n",
"\n",
"Installing collected packages: modal\n",
"\n",
" Attempting uninstall: modal\n",
"\n",
" Found existing installation: modal 0.68.1\n",
"\n",
" Uninstalling modal-0.68.1:\n",
"\n",
" Successfully uninstalled modal-0.68.1\n",
"\n",
"Successfully installed modal-0.68.2\n",
"\n",
"\n",
"done\n",
"#\n",
"# To activate this environment, use\n",
"#\n",
"# $ conda activate llms\n",
"#\n",
"# To deactivate an active environment, use\n",
"#\n",
"# $ conda deactivate\n",
"\n"
]
}
],
"source": [
"# This is a useful command that ensures your Anaconda environment \n",
"# is up to date with any new upgrades to packages;\n",
@ -313,10 +768,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "6e96be3d-fa82-42a3-a8aa-b81dd20563a5",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████████████████████████████████████████████████████████████████████████| 1000/1000 [00:10<00:00, 94.63it/s]\n"
]
}
],
"source": [
"# And now, with a nice little progress bar:\n",
"\n",
@ -331,10 +794,27 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "63c788dd-4618-4bb4-a5ce-204411a38ade",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# This is a big heading!\n",
"\n",
"- And this is a bullet-point\n",
"- So is this\n",
"- Me, too!"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# On a different topic, here's a useful way to print output in markdown\n",
"\n",

2
week1/community-contributions/day1-article-pdf-reader.ipynb

@ -181,7 +181,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
"version": "3.11.11"
}
},
"nbformat": 4,

157
week1/community-contributions/day1-selenium-for-javascript-sites.ipynb

@ -22,7 +22,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
@ -62,7 +62,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
@ -76,7 +76,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
@ -100,10 +100,63 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Home - Edward Donner\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"I’m the co-founder and CTO of\n",
"Nebula.io\n",
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"November 13, 2024\n",
"Mastering AI and LLM Engineering – Resources\n",
"October 16, 2024\n",
"From Software Engineer to AI Data Scientist – resources\n",
"August 6, 2024\n",
"Outsmart LLM Arena – a battle of diplomacy and deviousness\n",
"June 26, 2024\n",
"Choosing the Right LLM: Toolkit and Resources\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"# Let's try one out\n",
"\n",
@ -132,7 +185,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
@ -144,7 +197,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
@ -177,7 +230,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
@ -199,7 +252,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
@ -215,17 +268,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"\"# Summary of Edward Donner's Website\\n\\nEdward Donner's website features insights into his interests in coding and experimenting with large language models (LLMs). As the co-founder and CTO of Nebula.io, Donner focuses on leveraging AI to improve talent discovery and management. He has a background in AI startups, highlighting a successful acquisition of his previous venture, untapt, in 2021.\\n\\n## Recent Posts\\n- **November 13, 2024**: *Mastering AI and LLM Engineering – Resources*\\n- **October 16, 2024**: *From Software Engineer to AI Data Scientist – Resources*\\n- **August 6, 2024**: *Outsmart LLM Arena – A Battle of Diplomacy and Deviousness*\\n- **June 26, 2024**: *Choosing the Right LLM: Toolkit and Resources*\\n\\nThe site encourages visitors to connect and shares his passion for technology and music.\""
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
@ -237,10 +301,38 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Edward Donner's Website\n",
"\n",
"Edward Donner's website serves as a platform for sharing insights and developments related to large language models (LLMs) and their applications. \n",
"\n",
"### About Edward\n",
"Edward describes himself as a programmer and enthusiast of LLMs, with interests in DJing and electronic music production. He is the co-founder and CTO of Nebula.io, a company focused on leveraging AI to enhance talent discovery and engagement in a job context. He previously founded the AI startup untapt, which was acquired in 2021. \n",
"\n",
"### Featured Content\n",
"The website highlights several posts with resources that include:\n",
"- **Mastering AI and LLM Engineering** (November 13, 2024)\n",
"- **From Software Engineer to AI Data Scientist** (October 16, 2024)\n",
"- **Outsmart LLM Arena** (August 6, 2024) - An initiative designed to challenge LLMs in strategic scenarios.\n",
"- **Choosing the Right LLM: Toolkit and Resources** (June 26, 2024)\n",
"\n",
"### Focus\n",
"The website primarily emphasizes exploring LLMs and their transformative potential, especially in the realm of talent management. There are also connections to various platforms where Edward can be followed or contacted."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
@ -277,20 +369,49 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "52ae98bb",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Website Content\n",
"\n",
"The website appears to be inaccessible due to a prompt requesting users to enable JavaScript and cookies in their web browser. As a result, no specific content, news, or announcements can be summarized from the site at this time. \n",
"\n",
"For a full experience and access to content, it is necessary to adjust browser settings accordingly."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://openai.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "5d57e958",
"metadata": {},
"outputs": [],
"outputs": [
{
"ename": "ModuleNotFoundError",
"evalue": "No module named 'selenium'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[14], line 3\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;66;03m#Parse webpages which is designed using JavaScript heavely\u001b[39;00m\n\u001b[0;32m 2\u001b[0m \u001b[38;5;66;03m# download the chorme driver from here as per your version of chrome - https://developer.chrome.com/docs/chromedriver/downloads\u001b[39;00m\n\u001b[1;32m----> 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mselenium\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m webdriver\n\u001b[0;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mselenium\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mwebdriver\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchrome\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mservice\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Service\n\u001b[0;32m 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mselenium\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mwebdriver\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcommon\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mby\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m By\n",
"\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'selenium'"
]
}
],
"source": [
"#Parse webpages which is designed using JavaScript heavely\n",
"# download the chorme driver from here as per your version of chrome - https://developer.chrome.com/docs/chromedriver/downloads\n",

398
week1/day1.ipynb

@ -69,7 +69,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
@ -108,10 +108,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
@ -132,7 +140,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
@ -154,15 +162,25 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'Hello, GPT! This is my first ever message to you! Hi!'}\n",
"Hello! Welcome! I'm glad you're here. How can I assist you today?\n"
]
}
],
"source": [
"# To give you a preview -- calling OpenAI with these messages is this easy:\n",
"\n",
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n",
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n",
"print ({message})\n",
"print(response.choices[0].message.content)"
]
},
@ -176,7 +194,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
@ -206,10 +224,63 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Home - Edward Donner\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"I’m the co-founder and CTO of\n",
"Nebula.io\n",
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"November 13, 2024\n",
"Mastering AI and LLM Engineering – Resources\n",
"October 16, 2024\n",
"From Software Engineer to AI Data Scientist – resources\n",
"August 6, 2024\n",
"Outsmart LLM Arena – a battle of diplomacy and deviousness\n",
"June 26, 2024\n",
"Choosing the Right LLM: Toolkit and Resources\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"# Let's try one out. Change the website and add print statements to follow along.\n",
"\n",
@ -238,7 +309,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
@ -252,7 +323,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "a1b3fcc3-1152-41a4-b4ad-a6d66ee18b79",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"system_prompt"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
@ -270,10 +362,65 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You are looking at a website titled Home - Edward Donner\n",
"The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n",
"\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"I’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"I’m the co-founder and CTO of\n",
"Nebula.io\n",
". We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"November 13, 2024\n",
"Mastering AI and LLM Engineering – Resources\n",
"October 16, 2024\n",
"From Software Engineer to AI Data Scientist – resources\n",
"August 6, 2024\n",
"Outsmart LLM Arena – a battle of diplomacy and deviousness\n",
"June 26, 2024\n",
"Choosing the Right LLM: Toolkit and Resources\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"print(user_prompt_for(ed))"
]
@ -299,7 +446,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 15,
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
"metadata": {},
"outputs": [],
@ -312,10 +459,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 16,
"id": "6100800a-f1dd-4624-9956-75735225be02",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'role': 'system', 'content': 'You are a snarky assistant'},\n",
" {'role': 'user', 'content': 'What is 2 + 2?'}]"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"messages"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Oh, we're doing math now? Well, 2 + 2 equals 4. Shocking, I know!\n"
]
}
],
"source": [
"# To give you a preview -- calling OpenAI with system and user messages:\n",
"\n",
@ -333,7 +510,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 20,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
@ -349,10 +526,24 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 21,
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"[{'role': 'system',\n",
" 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n",
" {'role': 'user',\n",
" 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nI’m Ed. I like writing code and experimenting with LLMs, and hopefully you’re here because you do too. I also enjoy DJing (but I’m badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nI’m the co-founder and CTO of\\nNebula.io\\n. We’re applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. I’m previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, we’ve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nNovember 13, 2024\\nMastering AI and LLM Engineering – Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist – resources\\nAugust 6, 2024\\nOutsmart LLM Arena – a battle of diplomacy and deviousness\\nJune 26, 2024\\nChoosing the Right LLM: Toolkit and Resources\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Try this out, and then try for a few more websites\n",
"\n",
@ -369,7 +560,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 22,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
@ -387,17 +578,28 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 23,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"\"# Summary of Edward Donner's Website\\n\\nEdward Donner's website serves as a personal platform where he discusses his interests and expertise, primarily in coding and experimenting with large language models (LLMs). As the co-founder and CTO of Nebula.io, he focuses on leveraging AI to help individuals discover their potentials and enhance talent management for recruiters.\\n\\n### Key Sections:\\n\\n- **About Ed**: Edward enjoys coding, DJing, and engaging with the tech community. He has a background in AI startups, including being the founder and CEO of untapt, which was acquired in 2021.\\n- **Outsmart**: This feature introduces an arena where LLMs compete in diplomacy and cunning, showcasing innovative applications of AI.\\n- **Posts**: \\n - **Mastering AI and LLM Engineering – Resources** (November 13, 2024)\\n - **From Software Engineer to AI Data Scientist – Resources** (October 16, 2024)\\n - **Outsmart LLM Arena – A Battle of Diplomacy and Deviousness** (August 6, 2024)\\n - **Choosing the Right LLM: Toolkit and Resources** (June 26, 2024)\\n\\nThe website invites visitors to connect with Edward and stay updated through his posts and resources related to AI and LLMs.\""
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 24,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
@ -411,10 +613,38 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 25,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Edward Donner's Website\n",
"\n",
"Edward Donner's website is a personal and professional platform showcasing his interests and expertise in working with Large Language Models (LLMs) and AI technologies. As the co-founder and CTO of Nebula.io, he is focused on leveraging AI to enhance talent discovery and engagement. Previously, he founded the AI startup untapt, which was acquired in 2021. \n",
"\n",
"## Key Features:\n",
"\n",
"- **Personal Introduction**: Ed shares his passion for coding, experimentation with LLMs, and interests in DJing and electronic music production.\n",
"- **Professional Background**: Insights into his role at Nebula.io and previous experience with untapt. He highlights his work with proprietary LLMs and innovative matching models.\n",
" \n",
"## Recent Posts:\n",
"- **November 13, 2024**: Mastering AI and LLM Engineering – Resources\n",
"- **October 16, 2024**: From Software Engineer to AI Data Scientist – Resources\n",
"- **August 6, 2024**: Outsmart LLM Arena – A battle of diplomacy and deviousness\n",
"- **June 26, 2024**: Choosing the Right LLM: Toolkit and Resources\n",
"\n",
"These posts suggest a focus on educational resources and insights related to AI and LLM engineering."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
@ -437,20 +667,86 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 26,
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# CNN Summary\n",
"\n",
"CNN is a leading news outlet providing the latest updates on various topics, including:\n",
"\n",
"- **Breaking News:** Continuous updates on urgent events around the globe.\n",
"- **Featured Stories:** Insights into significant current affairs, such as the ongoing conflict between Israel and Hamas, and developments in the Ukraine-Russia war.\n",
"- **Politics:** Coverage of key political events, including President Biden's recent clemency grants and implications of Trump's potential inauguration.\n",
"- **World Affairs:** Reports on international crises, including the situation in Syria and reactions to Sudan's bombardments.\n",
"- **Health & Science:** Articles discussing public health issues and scientific discoveries, such as innovations in herbal medicine.\n",
"- **Business & Economy:** Analysis of corporate developments, job cuts in Germany, and impacts of international trade policies.\n",
"- **Entertainment & Culture:** Features on public figures and trends affecting the entertainment industry, as well as the latest in sports.\n",
"\n",
"Recent announcements on the site include:\n",
"- **Clemency for Nearly 1,500 People:** This act marks the largest single-day clemency decision in recent history.\n",
"- **Status of International Relations:** Notable updates on figures like Trump and Xi Jinping, alongside the unfolding situation in Ukraine and the Middle East.\n",
"- **Cultural Insights:** Breakdowns of significant cultural events such as the recognition of Time's \"Person of the Year.\"\n",
"\n",
"CNN emphasizes the importance of viewer feedback to enhance reading and engagement experiences on their platform."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 27,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"# Anthropic Overview\n",
"\n",
"Anthropic is an AI safety and research company based in San Francisco, focused on developing reliable and beneficial AI systems with a strong emphasis on safety. The company boasts an interdisciplinary team with expertise in machine learning, physics, policy, and product development.\n",
"\n",
"## Key Offerings\n",
"\n",
"- **Claude AI Models**: \n",
" - The latest model, **Claude 3.5 Sonnet**, is highlighted as the most intelligent AI model to date.\n",
" - **Claude 3.5 Haiku** has also been introduced, expanding their product offerings.\n",
"\n",
"- **API Access**: \n",
" - Users can leverage Claude to enhance efficiency and create new revenue opportunities.\n",
"\n",
"## Recent Announcements\n",
"\n",
"1. **New Model Updates** (October 22, 2024):\n",
" - Introduction of Claude 3.5 Sonnet and Claude 3.5 Haiku.\n",
" - Announcement of new capabilities for computer use.\n",
"\n",
"2. **Research Initiatives**:\n",
" - **Constitutional AI**: Discusses ensuring harmlessness through AI feedback (December 15, 2022).\n",
" - **Core Views on AI Safety**: Outlines when, why, what, and how AI safety should be addressed (March 8, 2023).\n",
"\n",
"Overall, Anthropic is focused on pioneering advancements in AI through research and development while prioritizing safety and reliability in its applications."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://anthropic.com\")"
]
@ -489,30 +785,58 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 31,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Objet : Contestation du frais de retour tardif\n",
"\n",
"Bonjour,\n",
"\n",
"Je fais suite à votre email concernant le retour tardif de la voiture. Je conteste fermement cette modification de prix qui, selon moi, est injustifiée.\n",
"\n",
"Selon les termes de notre contrat de location, le délai de grâce pour le retour est souvent de 30 minutes, ce qui est fréquemment appliqué dans le secteur. De plus, vous n'avez pas mentionné dans votre contrat un tarif additionnel pour une telle situation, ce qui pourrait constituer une clause abusive.\n",
"\n",
"Je vous prie donc de bien vouloir annuler cette modification tarifaire. Je me réserve le droit d'explorer des recours supplémentaires si cette situation n'est pas corrigée rapidement.\n",
"\n",
"Dans l'attente de votre retour.\n",
"\n",
"Cordialement, \n",
"Sylvain\n"
]
}
],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"something here\"\n",
"system_prompt = \"You are my very smart assistant. Your task will be to suggest to me an answer to my email. I want to avoid paying. you can be agressive and use the law\"\n",
"user_prompt = \"\"\"\n",
" Lots of text\n",
" Can be pasted here\n",
" Retour tardif\n",
"Bonjour sylvain,\n",
"\n",
"Vous avez réservé la voiture jusqu'à 15:00 , et vous l'avez rendue à 15:30 . Le prix de votre location a été modifié en conséquence.\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [] # fill this in\n",
"messages = [{\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response =\n",
"response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages\n",
" )\n",
" \n",
"\n",
"# Step 4: print the result\n",
"\n",
"print("
"print( response.choices[0].message.content)"
]
},
{

146
week1/day2 EXERCISE.ipynb

@ -213,13 +213,157 @@
"Take the code from day1 and incorporate it here, to build a website summarizer that uses Llama 3.2 running locally instead of OpenAI; use either of the above approaches."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2cedd52a-cb2e-4cfb-8e48-176954cd64e7",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"import ollama\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "402d5686-4e76-4110-b65a-b3906c35c0a4",
"metadata": {},
"outputs": [],
"source": []
"source": [
"# Constants\n",
"\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"HEADERS = {\"Content-Type\": \"application/json\"}\n",
"MODEL = \"llama3.2\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "be0c1b6e-8d7c-446f-8a9c-18b1c33078d8",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "265f85c4-132d-4bb8-bda2-792e0b0da017",
"metadata": {},
"outputs": [],
"source": [
"# Let's try one out. Change the website and add print statements to follow along.\n",
"\n",
"ed = Website(\"https://edwarddonner.com\")\n",
"print(ed.title)\n",
"print(ed.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "24ea875b-2ba0-41ad-b6be-4be8baeac16e",
"metadata": {},
"outputs": [],
"source": [
"# Define our system/user prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"user_prompt = user_prompt_for(ed)\n",
"print(user_prompt)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6d5f0f47-58a2-43b7-87a9-181a87e08081",
"metadata": {},
"outputs": [],
"source": [
"# create the message \n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "55454071-faa1-4cb1-a8dc-1f93abad6718",
"metadata": {},
"outputs": [],
"source": [
"print(messages_for(ed))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "42e0d73b-7db5-431f-bf97-4767be627056",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the OpenAI API. You will get very familiar with this!\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = ollama.chat(model=MODEL, messages=messages_for(website)) \n",
" return response['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "419d4c04-a3c4-43d2-ac1f-5f7f3b84c92a",
"metadata": {},
"outputs": [],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
}
],
"metadata": {

3674
week1/day5.ipynb

File diff suppressed because one or more lines are too long

170
week1/solutions/week1 SOLUTION.ipynb

@ -15,7 +15,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "c1070317-3ed9-4659-abe3-828943230e03",
"metadata": {},
"outputs": [],
@ -30,7 +30,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
"metadata": {},
"outputs": [],
@ -43,7 +43,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
"metadata": {},
"outputs": [],
@ -56,7 +56,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
"metadata": {},
"outputs": [],
@ -71,7 +71,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"id": "8595807b-8ae2-4e1b-95d9-e8532142e8bb",
"metadata": {},
"outputs": [],
@ -84,7 +84,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"id": "9605cbb6-3d3f-4969-b420-7f4cae0b9328",
"metadata": {},
"outputs": [],
@ -99,10 +99,96 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"The line of code you've provided utilizes a generator expression along with the `yield from` syntax in Python. Let's break it down step by step.\n",
"\n",
"### Breakdown of the Code\n",
"\n",
"1. **Context of `yield from`:**\n",
" - `yield from` is a special syntax in Python used within a generator to yield all values from an iterable (like a list, set, or another generator) without explicitly iterating through it. It's useful for delegating part of the generator's operations to another generator.\n",
"\n",
"2. **The Set Comprehension:**\n",
" - `{book.get(\"author\") for book in books if book.get(\"author\")}` is a **set comprehension**. It constructs a set of unique author names from the `books` collection.\n",
" - **How it works:**\n",
" - `for book in books`: This iterates over each `book` in the `books` iterable (which is assumed to be a list or another iterable containing dictionaries).\n",
" - `book.get(\"author\")`: This retrieves the value corresponding to the key `\"author\"` from the `book` dictionary. If the key does not exist in the dictionary, `get()` returns `None`.\n",
" - `if book.get(\"author\")`: This condition checks if the author exists (i.e., is not `None`, empty string, etc.). If the result of `get(\"author\")` is falsy (like `None`), that book is skipped.\n",
" - The result is a set of unique author names because sets automatically remove duplicates.\n",
"\n",
"3. **Combining `yield from` with Set Comprehension:**\n",
" - The entire line of code thus creates a generator that can yield each unique author from the set of authors collected from the `books`, allowing the surrounding context to retrieve authors one by one.\n",
"\n",
"### Purpose of the Code\n",
"\n",
"- **Purpose:** The purpose of this line is to efficiently generate a sequence of unique author names from a list (or iterable) of book dictionaries, while filtering out any entries that do not have an author specified.\n",
"- **Why Use This Approach:** \n",
" - Using a set comprehension ensures that only unique authors are collected, avoiding duplicates.\n",
" - The `yield from` syntax provides a clean way to return these unique authors as part of a generator function, making it easy to iterate over them in another context without needing to manage the iteration and collection explicitly.\n",
"\n",
"### Example Scenario\n",
"\n",
"Let's say you have a list of books structured like this:\n",
"\n",
"python\n",
"books = [\n",
" {\"title\": \"Book 1\", \"author\": \"Alice\"},\n",
" {\"title\": \"Book 2\", \"author\": \"Bob\"},\n",
" {\"title\": \"Book 3\", \"author\": \"Alice\"}, # Duplicate author\n",
" {\"title\": \"Book 4\"}, # No author\n",
" {\"title\": \"Book 5\", \"author\": \"\"}\n",
"]\n",
"\n",
"\n",
"If you execute the line in a generator function, here's what happens:\n",
"\n",
"1. The **Set Comprehension** is evaluated, resulting in the set `{\"Alice\", \"Bob\"}`.\n",
"2. The `yield from` syntax will then yield each of these authors one by one, allowing any caller of the generator to iterate through `Alice` and `Bob`.\n",
"\n",
"### Usage\n",
"\n",
"Here is a complete example of how you might use this line in a generator function:\n",
"\n",
"python\n",
"def unique_authors(books):\n",
" yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\n",
"# Example usage:\n",
"books = [\n",
" {\"title\": \"Book 1\", \"author\": \"Alice\"},\n",
" {\"title\": \"Book 2\", \"author\": \"Bob\"},\n",
" {\"title\": \"Book 3\", \"author\": \"Alice\"},\n",
" {\"title\": \"Book 4\"},\n",
" {\"title\": \"Book 5\", \"author\": \"\"}\n",
"]\n",
"\n",
"for author in unique_authors(books):\n",
" print(author)\n",
"\n",
"\n",
"### Output\n",
"\n",
"Alice\n",
"Bob\n",
"\n",
"\n",
"### Conclusion\n",
"\n",
"In summary, the line of code you've provided is a concise way to yield unique authors from a collection of books using Python's powerful set comprehension and generator features. It provides both efficiency in terms of time and space complexity (due to unique filtering) and succinctness in code structure."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Get gpt-4o-mini to answer, with streaming\n",
"\n",
@ -118,10 +204,64 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"**Breaking Down the Code**\n",
"\n",
"The given code snippet is written in Python and utilizes a combination of features such as generators, dictionary iteration, and conditional logic. Let's break it down step by step:\n",
"\n",
"```python\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"```\n",
"\n",
"Here's what's happening:\n",
"\n",
"1. `for book in books`: This is a standard `for` loop that iterates over the elements of the `books` collection (likely a list, dictionary, or some other iterable).\n",
"\n",
"2. `if book.get(\"author\")`: Inside the loop, there's an additional condition that filters out any items from the `books` collection where the `\"author\"` key does not exist or its value is empty/missing (`None`, `''`, etc.). This ensures only books with a valid author name are processed.\n",
"\n",
"3. `{book.get(\"author\") for book in books if book.get(\"author\")}`: This is an expression that iterates over the filtered list of books, extracting and yielding their authors. The `get()` method is used to safely retrieve the value associated with the `\"author\"` key from each book dictionary.\n",
"\n",
"4. `yield from {...}`: The outer expression is a generator expression, which yields values one at a time instead of computing them all at once and returning them in a list (as would be done with a regular list comprehension). The `yield from` syntax allows us to delegate the execution of this inner generator to another iterable (`{book.get(\"author\") for book in books if book.get(\"author\")}`).\n",
"\n",
"**Why it's useful**\n",
"\n",
"This code snippet is useful when you need to:\n",
"\n",
"* Filter out invalid or incomplete data points while still working with iterables.\n",
"* Work with large datasets without loading the entire dataset into memory at once.\n",
"* Simplify your code by leveraging generator expressions and avoiding unnecessary computations.\n",
"\n",
"In practice, this could be used in a variety of scenarios where you're dealing with lists of dictionaries (e.g., books) and need to extract specific information from them (e.g., authors).\n",
"\n",
"**Example usage**\n",
"\n",
"Here's an example using a list of book dictionaries:\n",
"```python\n",
"books = [\n",
" {\"title\": \"Book 1\", \"author\": \"Author A\"},\n",
" {\"title\": \"Book 2\", \"author\": None},\n",
" {\"title\": \"Book 3\", \"author\": \"\"},\n",
"]\n",
"\n",
"authors = yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"print(authors) # Output: [\"Author A\", \"Author B\"]\n",
"```\n",
"In this example, the `yield from` expression filters out books with missing or empty authors and extracts their values as a generator. The outer loop can then iterate over these generated values without having to store them all in memory at once."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Get Llama 3.2 to answer\n",
"\n",
@ -142,14 +282,6 @@
"\n",
"And then creating the prompts and making the calls interactively."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "da663d73-dd2a-4fff-84df-2209cf2b330b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@ -168,7 +300,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
"version": "3.11.11"
}
},
"nbformat": 4,

48
week1/troubleshooting.ipynb

File diff suppressed because one or more lines are too long

258
week1/week1 EXERCISE.ipynb

@ -13,71 +13,301 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "c1070317-3ed9-4659-abe3-828943230e03",
"metadata": {},
"outputs": [],
"source": [
"# imports"
"# imports\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"from IPython.display import Markdown, display, update_display\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "4ed79945-0582-4f22-a210-b21e5448991e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key looks good so far\n"
]
}
],
"source": [
"# open ai constants\n",
"\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
" print(\"API key looks good so far\")\n",
"else:\n",
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n",
" \n",
"MODEL_GPT = 'gpt-4o-mini'"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
"metadata": {},
"outputs": [],
"source": [
"# constants\n",
"# ollama constants\n",
"\n",
"MODEL_GPT = 'gpt-4o-mini'\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"#OLLAMA_HEADERS = {\"Content-Type\": \"application/json\"}\n",
"MODEL_LLAMA = 'llama3.2'"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
"metadata": {},
"outputs": [],
"source": [
"# set up environment"
"# initialization\n",
"openai = OpenAI()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 20,
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
"metadata": {},
"outputs": [],
"source": [
"# here is the question; type over this to ask something new\n",
"\n",
"system_prompt=\"You are a tech specialist capable to explain with clarity tech question and more. You should answer in markdown\"\n",
"question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\"\"\""
"\"\"\"\n",
"user_prompt = f\"Please explain me this question: {question}\""
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 21,
"id": "144c16d8-4e55-46e4-82f0-753375119ff3",
"metadata": {},
"outputs": [],
"source": [
"# messages\n",
"prompt_messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
"metadata": {},
"outputs": [],
"source": [
"# Get gpt-4o-mini to answer, with streaming"
"# Get gpt-4o-mini to answer, with streaming\n",
"def askOpenAi():\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL_GPT,\n",
" messages=prompt_messages,\n",
" stream = True \n",
" )\n",
" response=\"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" \n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or '' \n",
" update_display(Markdown(response), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 22,
"id": "2190d7aa-f6a1-4bca-a2ff-8ce2b0db79c5",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"Sure! Let's break down the code you've provided:\n",
"\n",
"```python\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"```\n",
"\n",
"### Explanation of the Code\n",
"\n",
"1. **Set Comprehension:**\n",
" - The code `{book.get(\"author\") for book in books if book.get(\"author\")}` is a **set comprehension**. This is a concise way to create a set in Python.\n",
" - It iterates over a collection named `books`.\n",
"\n",
"2. **Accessing the \"author\" Key:**\n",
" - `book.get(\"author\")`: For each `book` in the `books` collection, it attempts to get the value associated with the key `\"author\"`. \n",
" - The `get()` method is used to safely access dictionary keys. If the key does not exist, it returns `None` instead of throwing an error.\n",
"\n",
"3. **Filtering Authors:**\n",
" - The clause `if book.get(\"author\")` acts as a filter. It ensures that only books with a valid (non-`None`) author get included in the resulting set.\n",
" - Therefore, this part: `{book.get(\"author\") for book in books if book.get(\"author\")}` creates a set of unique authors from the `books` collection that have valid author values.\n",
"\n",
"4. **Yielding Results:**\n",
" - The `yield from` statement is used to yield values from the set comprehension created previously. \n",
" - This means that the containing function will return each unique author one at a time as they are requested (similar to a generator).\n",
"\n",
"### Summary\n",
"\n",
"- **What the Code Does:**\n",
" - It generates a set of unique authors from a list of books, filtering out any entries that do not have an author. It then yields each of these authors.\n",
"\n",
"- **Why It's Useful:**\n",
" - This code is particularly useful when dealing with collections of books where some might not have an author specified. It safely retrieves the authors and ensures that each author is only returned once. \n",
" - Using `yield from` makes it memory efficient, as it does not create an intermediate list of authors but generates them one at a time on demand.\n",
"\n",
"### Example\n",
"\n",
"If you had a list of books like this:\n",
"\n",
"```python\n",
"books = [\n",
" {\"title\": \"Book 1\", \"author\": \"Author A\"},\n",
" {\"title\": \"Book 2\", \"author\": \"Author B\"},\n",
" {\"title\": \"Book 3\", \"author\": None},\n",
" {\"title\": \"Book 4\", \"author\": \"Author A\"},\n",
"]\n",
"```\n",
"\n",
"The output of the code would be:\n",
"\n",
"```\n",
"Author A\n",
"Author B\n",
"```\n",
"\n",
"In this example, `Author A` is listed only once, even though there are multiple books by that author."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"askOpenAi()"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
"metadata": {},
"outputs": [],
"source": [
"# Get Llama 3.2 to answer"
"# Get Llama 3.2 to answer\n",
"def askOllama(): \n",
" stream = ollama.chat(model=MODEL_LLAMA, messages=prompt_messages, stream=True)\n",
" response=\"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" \n",
" for chunk in stream:\n",
" response += chunk['message']['content'] or '' \n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "acc0116c-4506-4391-89d5-1add700d3d55",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**Yielding Authors from a List of Books**\n",
"=====================================\n",
"\n",
"This code snippet is written in Python and utilizes the `yield from` statement, which was introduced in Python 3.3.\n",
"\n",
"### What does it do?\n",
"\n",
"The code takes two main inputs:\n",
"\n",
"* A list of dictionaries (`books`) where each dictionary represents a book.\n",
"* Another dictionary (`book`) that contains information about an author.\n",
"\n",
"It generates a sequence of authors from the `books` list and yields them one by one, while also applying the condition that the book has a valid \"author\" key in its dictionary.\n",
"\n",
"Here's a step-by-step breakdown:\n",
"\n",
"1. `{book.get(\"author\") for book in books if book.get(\"author\")}`:\n",
" * This is an expression that generates a sequence of authors.\n",
" * `for book in books` iterates over each book in the `books` list.\n",
" * `if book.get(\"author\")` filters out books without an \"author\" key, to prevent errors and ensure only valid data is processed.\n",
"\n",
"2. `yield from ...`:\n",
" * This statement is used to delegate a sub-generator or iterator.\n",
" * In this case, it's delegating the sequence of authors generated in step 1.\n",
"\n",
"**Why does it yield authors?**\n",
"\n",
"The use of `yield from` serves two main purposes:\n",
"\n",
"* **Efficiency**: Instead of creating a new list with all the authors, this code yields each author one by one. This approach is more memory-efficient and can be particularly beneficial when dealing with large datasets.\n",
"* **Flexibility**: By using `yield from`, you can create generators that produce values on-the-fly, allowing for lazy evaluation.\n",
"\n",
"### Example Usage\n",
"\n",
"Here's an example of how you might use this code:\n",
"\n",
"```python\n",
"books = [\n",
" {\"title\": \"Book 1\", \"author\": \"Author A\"},\n",
" {\"title\": \"Book 2\", \"author\": \"Author B\"},\n",
" {\"title\": \"Book 3\"}\n",
"]\n",
"\n",
"def get_authors(books):\n",
" \"\"\"Yields authors from the books.\"\"\"\n",
" yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"\n",
"# Get all unique authors\n",
"authors = set(get_authors(books))\n",
"print(authors) # Output: {'Author A', 'Author B'}\n",
"```\n",
"\n",
"In this example, `get_authors` is a generator function that yields unique authors from the `books` list. The generated values are collected in a set (`authors`) to eliminate duplicates."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"askOllama()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9233ca17-160f-4afd-aef6-a7e02f069a50",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

128
week2/day1.ipynb

@ -238,7 +238,7 @@
"completion = openai.chat.completions.create(\n",
" model='gpt-4o-mini',\n",
" messages=prompts,\n",
" temperature=0.7\n",
" temperature=0.2\n",
")\n",
"print(completion.choices[0].message.content)"
]
@ -255,7 +255,7 @@
"completion = openai.chat.completions.create(\n",
" model='gpt-4o',\n",
" messages=prompts,\n",
" temperature=0.4\n",
" temperature=0.8\n",
")\n",
"print(completion.choices[0].message.content)"
]
@ -297,7 +297,7 @@
"result = claude.messages.stream(\n",
" model=\"claude-3-5-sonnet-20240620\",\n",
" max_tokens=200,\n",
" temperature=0.7,\n",
" temperature=0.4,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": user_prompt},\n",
@ -432,15 +432,30 @@
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"\n",
"gpt_system = \"You are a chatbot who is very argumentative; \\\n",
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
"\n",
"claude_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"you try to calm them down and keep chatting.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]"
"gpt_system = \"\"\"\n",
"You are the Machiavellian leader of a secret organization.\n",
"You lead a meeting for a plan of global destabilization. Your name is Jhon.\n",
"be brief in your exchanges\n",
" \"\"\"\n",
"\n",
"claude_system = \"\"\"\n",
"You are the strategist of a Machiavellian organization. \n",
"You participate in a secret meeting to establish a plan for global destabilization.\n",
"don't reveal that you are a spy. Your name is Sam.\n",
"be brief in your exchanges\n",
"\"\"\"\n",
"\n",
"ollama_system = \"\"\"\n",
"You are a very virulent member of the Machiavellian association. You suspect the presence of a spy. \n",
"Your secret role during the meeting will be to flush him out.\n",
"Your name is Melissa.\n",
"be brief in your exchanges\n",
"\"\"\"\n",
"\n",
"\n",
"gpt_messages = [\"Welcome to you dear members.\"]\n",
"claude_messages = [\"Hi to you leader.\"]\n",
"ollama_messages = [\"All my respects to the top leader.\"]"
]
},
{
@ -452,9 +467,11 @@
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude in zip(gpt_messages, claude_messages):\n",
" for gpt, claude, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" messages.append({\"role\": \"user\", \"content\": f\"[Sam]: {claude}.[Melissa]: {ollama}\"})\n",
" \n",
" #print(f\"JHON[messages]: {messages} \")\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
@ -472,6 +489,18 @@
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a9366f2-b233-4ec2-8a6f-f7e56fc4c772",
"metadata": {},
"outputs": [],
"source": [
"print(gpt_messages)\n",
"print(claude_messages)\n",
"print(ollama_messages)"
]
},
{
"cell_type": "code",
"execution_count": null,
@ -481,10 +510,11 @@
"source": [
"def call_claude():\n",
" messages = []\n",
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" for gpt, claude_message, ollama in zip(gpt_messages, claude_messages, ollama_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Melissa]: {ollama}\"})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}\"})\n",
" #print(f\"SAM[messages]: {messages} \")\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
@ -507,11 +537,51 @@
{
"cell_type": "code",
"execution_count": null,
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae",
"id": "157faafa-6ade-46e4-b4e1-12f4ca9dfcbc",
"metadata": {},
"outputs": [],
"source": [
"call_gpt()"
"# define context for ollama\n",
"import ollama\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"OLLAMA_MODEL = \"llama3.2\"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "27287ec3-7ea8-49b2-80aa-b2a52e002b0d",
"metadata": {},
"outputs": [],
"source": [
"def call_ollama():\n",
" messages = [{\"role\": \"system\", \"content\": ollama_system}]\n",
" \n",
" for gpt, claude, ollama_message in zip(gpt_messages, claude_messages, ollama_messages):\n",
" messages.append({\"role\": \"user\", \"content\": f\"[Jhon]: {gpt}. [Sam]: {claude}\"})\n",
" messages.append({\"role\": \"assistant\", \"content\": ollama_message})\n",
" messages.append({\"role\": \"user\", \"content\":f\"[Jhon]: {gpt_messages[-1]}. [Sam]: {claude_messages[-1]}\"})\n",
"\n",
" #print(f\"MELISSA[messages]: {messages} \")\n",
" \n",
" \n",
" payload = {\n",
" \"model\": OLLAMA_MODEL,\n",
" \"messages\": messages,\n",
" \"stream\": False\n",
" }\n",
" message = ollama.chat(model=OLLAMA_MODEL, messages=messages)\n",
" return message['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e637a3a0-f819-468b-88f7-5c9db48c6fed",
"metadata": {},
"outputs": [],
"source": [
"call_ollama()"
]
},
{
@ -521,20 +591,26 @@
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"]\n",
"gpt_messages = [\"Welcome to you dear members.\"]\n",
"claude_messages = [\"Hi to you leader.\"]\n",
"ollama_messages = [\"All my respects to the top leader.\"]\n",
"\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n",
"print(f\"GPT has JHON:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Claude has SAM:\\n{claude_messages[0]}\\n\")\n",
"print(f\"Ollama has MELISSA:\\n{ollama_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" print(f\"GPT has JHON:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"Claude:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)"
" print(f\"CLAUDE has SAM:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)\n",
"\n",
" ollama_next = call_ollama()\n",
" print(f\"OLLAMA has MELISSA:\\n{ollama_next}\\n\")\n",
" ollama_messages.append(ollama_next)"
]
},
{

Loading…
Cancel
Save