Browse Source

Merge pull request #256 from jalopezmartin/community-contributions-branch

Community contributions branch
pull/264/head
Ed Donner 2 months ago committed by GitHub
parent
commit
a5e8a180a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 240
      week1/community-contributions/Ollama_websummarizer_user_input.ipynb
  2. 269
      week1/community-contributions/W1D5_Code_instructor.ipynb

240
week1/community-contributions/Ollama_websummarizer_user_input.ipynb

@ -0,0 +1,240 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9964872b-225d-4ced-93e4-fc5b279ec2ed",
"metadata": {},
"source": [
"# Webpage English summarizer with user inputs (url, ollama-based LLM) "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e49d399-d18c-4c91-8abc-cf3289e11e2f",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"# from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"import ollama, time\n",
"from tqdm import tqdm"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "46e7d809-248d-41b8-80e1-36b210041581",
"metadata": {},
"outputs": [],
"source": [
"# Define system prompt.\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a detailed summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown, in English.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e8bf237f-591f-4c32-9415-5d5d4e2522b8",
"metadata": {},
"outputs": [],
"source": [
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a detailed summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7d39ee6d-c670-41ba-a0b8-debd55bda8e3",
"metadata": {},
"outputs": [],
"source": [
"# See how this function creates exactly the format above\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "43e28ff5-2def-4a47-acdd-2e06c0666956",
"metadata": {},
"outputs": [],
"source": [
"# Constants\n",
"\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"HEADERS = {\"Content-Type\": \"application/json\"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "32f4f481-81a3-479d-817b-4e754d9af46d",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = HEADERS\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f81cfd17-8208-4192-a59f-485ff3ea74e4",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the ollama API wrapper and return the relevant component of the response\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = ollama.chat(\n",
" model=MODEL,\n",
" messages = messages_for(website)\n",
" )\n",
" return response['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7a9eedc6-2183-473d-84ca-b10d40e2a1e6",
"metadata": {},
"outputs": [],
"source": [
"# Ask the user the name of the url address\n",
"\n",
"url= str(input(\"\"\"\n",
"Please provide a valid url address:\n",
"https://\"\"\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5d012de2-0ef2-43db-9f51-fc7f989c3642",
"metadata": {},
"outputs": [],
"source": [
"# Ask the user to select a valid model\n",
"\n",
"MODEL= str(input(\"\"\"\n",
"Please select a LLM:\n",
"(examples: llama3.2, deepseek-r1:1.5b)\n",
"\"\"\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1ac8c02e-4a62-448b-a231-8c6f65891811",
"metadata": {},
"outputs": [],
"source": [
"# Let's just make sure the model is loaded\n",
"\n",
"!ollama pull {MODEL}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0544541f-11a8-4eb7-8eb6-bc032ed6d0d1",
"metadata": {},
"outputs": [],
"source": [
"print('url: https://{0}\\nModel= {1}'.format(url, MODEL))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "45518950-f2c9-43af-b897-4fe8fe48dfd8",
"metadata": {},
"outputs": [],
"source": [
"summary = summarize('https://'+ url)\n",
"for summ in tqdm(summary):\n",
" time.sleep(0.01)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "02c0c15e-216d-47c7-843d-ac27af02820b",
"metadata": {},
"outputs": [],
"source": [
"display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "985a3689-5827-4b15-b8d5-276f9b292afd",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

269
week1/community-contributions/W1D5_Code_instructor.ipynb

@ -0,0 +1,269 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "0e5dc476-e3c9-49bd-934a-35dbe0d55b13",
"metadata": {},
"source": [
"# End of week 1 exercise (with user input(question, model)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "353fba18-a9b4-4ba8-be7e-f3e3c37521ff",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "be2b859d-b3d2-41f7-8666-28ecde26e3b8",
"metadata": {},
"outputs": [],
"source": [
"# set up environment and constants\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
" print(\"API key looks good so far\")\n",
"else:\n",
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c1b2b694-11a1-4d2a-8e34-d1fb02617fa3",
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an expert coder with educational skills for beginners. \\\n",
"You are able to explain, debbug or generate code in Python, R or bash, and to provide examples of use case if applicable. \\\n",
"Please add references to relevant sources if available. If not, do not invent.\\n\"\n",
"system_prompt += \"this is an example of a response:\"\n",
"system_prompt += \"\"\"\n",
"Sure! Here’s the explanation in plain text format, suitable for Markdown:\n",
"\n",
"# Explanation of the Code\n",
"\n",
"### Code:\n",
"```python\n",
"full_name = lambda first, last: f'Full name: {first.title()} {last.title()}'\n",
"```\n",
"\n",
"### Explanation:\n",
"\n",
"1. **Lambda Function:**\n",
" - The keyword `lambda` is used to create a small, one-line anonymous function (a function without a name).\n",
" - It takes two parameters: `first` (for the first name) and `last` (for the last name).\n",
"\n",
"2. **String Formatting (`f-string`):**\n",
" - `f'Full name: {first.title()} {last.title()}'` is a formatted string (f-string).\n",
" - It inserts the values of `first` and `last` into the string while applying `.title()` to capitalize the first letter of each name.\n",
"\n",
"3. **Assigning the Function:**\n",
" - The lambda function is assigned to the variable `full_name`, so we can use `full_name()` like a regular function.\n",
"\n",
"### How to Use It:\n",
"Now, let’s call this function and see what it does.\n",
"\n",
"```python\n",
"print(full_name(\"john\", \"doe\"))\n",
"```\n",
"\n",
"#### Output:\n",
"```\n",
"Full name: John Doe\n",
"```\n",
"\n",
"### What Happens:\n",
"- `\"john\"` becomes `\"John\"` (because `.title()` capitalizes the first letter).\n",
"- `\"doe\"` becomes `\"Doe\"`.\n",
"- The output is `\"Full name: John Doe\"`.\n",
"\n",
"### Summary:\n",
"This is a simple way to create a function that formats a full name while ensuring proper capitalization. You could write the same function using `def` like this:\n",
"\n",
"```python\n",
"def full_name(first, last):\n",
" return f'Full name: {first.title()} {last.title()}'\n",
"```\n",
"\n",
"Both versions work the same way, but the `lambda` version is more compact.\n",
"\n",
"### Reference(s):\n",
"To deepen your understanding of the code snippet involving Python's lambda functions here is a resource you might find helpful:\n",
"\n",
"Ref. **Python Lambda Functions:**\n",
" - The official Python documentation provides an in-depth explanation of lambda expressions, including their syntax and use cases.\n",
" - [Lambda Expressions](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions)\n",
"\n",
"```\n",
"You can copy and paste this into any Markdown file or viewer. Let me know if you need further modifications! 😊\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f7225ab0-5ade-4c93-839c-3c80b0b23c37",
"metadata": {},
"outputs": [],
"source": [
"# display(Markdown(system_prompt))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "07fa2506-4b24-4a53-9f3f-500b4cbcb10a",
"metadata": {},
"outputs": [],
"source": [
"# user question\n",
"default_question= \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get('author') from book in books if book.get('author')}\n",
"\"\"\"\n",
"user_question= str(input(\"What code do you want me to explain?/n(Press 'Enter' for an example)\"))\n",
"\n",
"if user_question== '':\n",
" question= default_question\n",
" print(default_question)\n",
"else:\n",
" question= \"Please explain what this code does and why:\\n\" + user_question"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a6749065-fb8a-4f9f-8297-3cd33abd97bd",
"metadata": {},
"outputs": [],
"source": [
"print(question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f48df06c-edb7-4a05-9e56-910854dad0c7",
"metadata": {},
"outputs": [],
"source": [
"# user model\n",
"model_number= input(\"\"\"\n",
"Please enter the number of the model you want to use from the list below:\n",
"1 GPT-4o Mini\n",
"2 Llama 3.2\n",
"3 DeepSeek R1\n",
"4 Qwen 2.5\n",
"\"\"\")\n",
"try:\n",
" if int(model_number)==1:\n",
" model= 'gpt-4o-mini'\n",
" elif int(model_number)==2:\n",
" model= 'llama3.2'\n",
" elif int(model_number)==3:\n",
" model= 'deepseek-r1:1.5b'\n",
" elif int(model_number)==4:\n",
" model= 'qwen2.5:3b'\n",
" else:\n",
" model= ''\n",
" print(\"please provide only a number from the list\")\n",
"except:\n",
" model=''\n",
" print(\"Please provide a number or press 'Enter' to finish\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aeb6e4e5-fb63-4192-bb74-0b015dfedfb7",
"metadata": {},
"outputs": [],
"source": [
"# print(model)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fffa6021-d3f8-4855-a694-bed6d651791f",
"metadata": {},
"outputs": [],
"source": [
"messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": question}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "835374a4-3df5-4f28-82e3-6bc70514df16",
"metadata": {},
"outputs": [],
"source": [
"if int(model_number)==1:\n",
" openai= OpenAI()\n",
" stream = openai.chat.completions.create(\n",
" model=model,\n",
" messages=messages,\n",
" stream= True\n",
" )\n",
"\n",
" response = \"\"\n",
" print(\"The following answer will be generated by {0} LLM\".format(model))\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
"elif int(model_number)==2 or 3 or 4:\n",
" !ollama pull {model}\n",
" print(\"\\n\\nThe following answer will be generated by {0} LLM\\n\\n\".format(model))\n",
" response = ollama.chat(\n",
" model=model,\n",
" messages = messages)\n",
" result= response['message']['content']\n",
" display(Markdown(result))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading…
Cancel
Save