{ "cells": [ { "cell_type": "markdown", "id": "9964872b-225d-4ced-93e4-fc5b279ec2ed", "metadata": {}, "source": [ "# Webpage English summarizer with user inputs (url, ollama-based LLM) " ] }, { "cell_type": "code", "execution_count": null, "id": "4e49d399-d18c-4c91-8abc-cf3289e11e2f", "metadata": {}, "outputs": [], "source": [ "# imports\n", "\n", "import os\n", "import requests\n", "# from dotenv import load_dotenv\n", "from bs4 import BeautifulSoup\n", "from IPython.display import Markdown, display\n", "from openai import OpenAI\n", "import ollama, time\n", "from tqdm import tqdm" ] }, { "cell_type": "code", "execution_count": null, "id": "46e7d809-248d-41b8-80e1-36b210041581", "metadata": {}, "outputs": [], "source": [ "# Define system prompt.\n", "\n", "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n", "and provides a detailed summary, ignoring text that might be navigation related. \\\n", "Respond in markdown, in English.\"" ] }, { "cell_type": "code", "execution_count": null, "id": "e8bf237f-591f-4c32-9415-5d5d4e2522b8", "metadata": {}, "outputs": [], "source": [ "# A function that writes a User Prompt that asks for summaries of websites:\n", "\n", "def user_prompt_for(website):\n", " user_prompt = f\"You are looking at a website titled {website.title}\"\n", " user_prompt += \"\\nThe contents of this website is as follows; \\\n", "please provide a detailed summary of this website in markdown. \\\n", "If it includes news or announcements, then summarize these too.\\n\\n\"\n", " user_prompt += website.text\n", " return user_prompt" ] }, { "cell_type": "code", "execution_count": null, "id": "7d39ee6d-c670-41ba-a0b8-debd55bda8e3", "metadata": {}, "outputs": [], "source": [ "# See how this function creates exactly the format above\n", "\n", "def messages_for(website):\n", " return [\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n", " ]" ] }, { "cell_type": "code", "execution_count": null, "id": "43e28ff5-2def-4a47-acdd-2e06c0666956", "metadata": {}, "outputs": [], "source": [ "# Constants\n", "\n", "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", "HEADERS = {\"Content-Type\": \"application/json\"}" ] }, { "cell_type": "code", "execution_count": null, "id": "32f4f481-81a3-479d-817b-4e754d9af46d", "metadata": {}, "outputs": [], "source": [ "# A class to represent a Webpage\n", "# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n", "\n", "# Some websites need you to use proper headers when fetching them:\n", "headers = HEADERS\n", "\n", "class Website:\n", "\n", " def __init__(self, url):\n", " \"\"\"\n", " Create this Website object from the given url using the BeautifulSoup library\n", " \"\"\"\n", " self.url = url\n", " response = requests.get(url, headers=headers)\n", " soup = BeautifulSoup(response.content, 'html.parser')\n", " self.title = soup.title.string if soup.title else \"No title found\"\n", " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", " irrelevant.decompose()\n", " self.text = soup.body.get_text(separator=\"\\n\", strip=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "f81cfd17-8208-4192-a59f-485ff3ea74e4", "metadata": {}, "outputs": [], "source": [ "# And now: call the ollama API wrapper and return the relevant component of the response\n", "\n", "def summarize(url):\n", " website = Website(url)\n", " response = ollama.chat(\n", " model=MODEL,\n", " messages = messages_for(website)\n", " )\n", " return response['message']['content']" ] }, { "cell_type": "code", "execution_count": null, "id": "7a9eedc6-2183-473d-84ca-b10d40e2a1e6", "metadata": {}, "outputs": [], "source": [ "# Ask the user the name of the url address\n", "\n", "url= str(input(\"\"\"\n", "Please provide a valid url address:\n", "https://\"\"\"))" ] }, { "cell_type": "code", "execution_count": null, "id": "5d012de2-0ef2-43db-9f51-fc7f989c3642", "metadata": {}, "outputs": [], "source": [ "# Ask the user to select a valid model\n", "\n", "MODEL= str(input(\"\"\"\n", "Please select a LLM:\n", "(examples: llama3.2, deepseek-r1:1.5b)\n", "\"\"\"))" ] }, { "cell_type": "code", "execution_count": null, "id": "1ac8c02e-4a62-448b-a231-8c6f65891811", "metadata": {}, "outputs": [], "source": [ "# Let's just make sure the model is loaded\n", "\n", "!ollama pull {MODEL}" ] }, { "cell_type": "code", "execution_count": null, "id": "0544541f-11a8-4eb7-8eb6-bc032ed6d0d1", "metadata": {}, "outputs": [], "source": [ "print('url: https://{0}\\nModel= {1}'.format(url, MODEL))" ] }, { "cell_type": "code", "execution_count": null, "id": "45518950-f2c9-43af-b897-4fe8fe48dfd8", "metadata": {}, "outputs": [], "source": [ "summary = summarize('https://'+ url)\n", "for summ in tqdm(summary):\n", " time.sleep(0.01)" ] }, { "cell_type": "code", "execution_count": null, "id": "02c0c15e-216d-47c7-843d-ac27af02820b", "metadata": {}, "outputs": [], "source": [ "display(Markdown(summary))" ] }, { "cell_type": "code", "execution_count": null, "id": "985a3689-5827-4b15-b8d5-276f9b292afd", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 5 }