From c952b00397f20d34f16a763afb79d28c628001ec Mon Sep 17 00:00:00 2001 From: vahid Date: Sat, 18 Jan 2025 14:23:14 +0330 Subject: [PATCH] Add Brochurify to community contributions --- .../Brochurify/README.md | 112 +++++++++ .../Brochurify/exceptions.py | 20 ++ .../Brochurify/main.py | 16 ++ .../Brochurify/requirements.txt | 48 ++++ .../Brochurify/routes/__init__.py | 1 + .../Brochurify/routes/router.py | 4 + .../Brochurify/routes/socket/__init__.py | 1 + .../Brochurify/routes/socket/socket.py | 50 ++++ .../community-contributions/Brochurify/run.sh | 41 ++++ .../Brochurify/services/__init__.py | 1 + .../services/crawler/__crawler_base.py | 17 ++ .../Brochurify/services/crawler/__init__.py | 1 + .../Brochurify/services/crawler/bs4crawler.py | 61 +++++ .../Brochurify/services/crawler/crawler.py | 18 ++ .../Brochurify/services/llm/__init__.py | 1 + .../Brochurify/services/llm/builder.py | 24 ++ .../Brochurify/services/llm/llm_service.py | 18 ++ .../Brochurify/services/llm/open_ai.py | 73 ++++++ .../Brochurify/services/llm/summarizer_llm.py | 56 +++++ .../Brochurify/services/orchestrator.py | 23 ++ .../Brochurify/services/socket/__init__.py | 1 + .../services/socket/socket_handler.py | 44 ++++ .../Brochurify/static/index.html | 222 ++++++++++++++++++ 23 files changed, 853 insertions(+) create mode 100644 week1/community-contributions/Brochurify/README.md create mode 100644 week1/community-contributions/Brochurify/exceptions.py create mode 100644 week1/community-contributions/Brochurify/main.py create mode 100644 week1/community-contributions/Brochurify/requirements.txt create mode 100644 week1/community-contributions/Brochurify/routes/__init__.py create mode 100644 week1/community-contributions/Brochurify/routes/router.py create mode 100644 week1/community-contributions/Brochurify/routes/socket/__init__.py create mode 100644 week1/community-contributions/Brochurify/routes/socket/socket.py create mode 100755 week1/community-contributions/Brochurify/run.sh create mode 100644 week1/community-contributions/Brochurify/services/__init__.py create mode 100644 week1/community-contributions/Brochurify/services/crawler/__crawler_base.py create mode 100644 week1/community-contributions/Brochurify/services/crawler/__init__.py create mode 100644 week1/community-contributions/Brochurify/services/crawler/bs4crawler.py create mode 100644 week1/community-contributions/Brochurify/services/crawler/crawler.py create mode 100644 week1/community-contributions/Brochurify/services/llm/__init__.py create mode 100644 week1/community-contributions/Brochurify/services/llm/builder.py create mode 100644 week1/community-contributions/Brochurify/services/llm/llm_service.py create mode 100644 week1/community-contributions/Brochurify/services/llm/open_ai.py create mode 100644 week1/community-contributions/Brochurify/services/llm/summarizer_llm.py create mode 100644 week1/community-contributions/Brochurify/services/orchestrator.py create mode 100644 week1/community-contributions/Brochurify/services/socket/__init__.py create mode 100644 week1/community-contributions/Brochurify/services/socket/socket_handler.py create mode 100644 week1/community-contributions/Brochurify/static/index.html diff --git a/week1/community-contributions/Brochurify/README.md b/week1/community-contributions/Brochurify/README.md new file mode 100644 index 0000000..c7fe259 --- /dev/null +++ b/week1/community-contributions/Brochurify/README.md @@ -0,0 +1,112 @@ + +# Brochurify + +Welcome to Brochurify! This project is designed to simplify website data extraction and summarization, providing a streamlined way to generate brochures from web content. + +## Table of Contents + +1. [About the Project](#about-the-project) +2. [Features](#features) +3. [Installation](#installation) +4. [Usage](#usage) +5. [Project Structure](#project-structure) + +--- + +## About the Project + +An innovative project that simplifies website data extraction and summarization. + +Key Technologies: +- FastAPI +- WebSockets for real-time communication +- LLMs for summarization and brochure generation + +--- + +## Features + +- **Webpage Summarization:** Provide a URL and get a concise summary. +- **Brochure Creation:** Generate a visually appealing, structured brochure from a website. +- **Real-time Processing:** Instant feedback using WebSockets. + +--- + +## Installation + +Follow these steps to set up the project locally: + +### 1. **Clone the Repository:** + + ```bash + git clone https://github.com/itsnotvahid/Brochurify.git + cd Brochurify + ``` + +### 2. **Create a Virtual Environment:** + + It's recommended to use a virtual environment to manage dependencies. + + On **Linux/macOS**, run: + + ```bash + python3 -m venv venv + source venv/bin/activate + ``` + +### 3. **Install Dependencies:** + + Ensure you have `pip` installed. Then, install the required packages: + + ```bash + pip install -r requirements.txt + ``` + +### 4. **Create a `.env` File:** + + The application requires certain environment variables. Create a `.env` file in the root directory with the following content: + + ```env + OPENAI_API_KEY=your_openai_api_key_here + SOCKET_HOST=127.0.0.1 + SOCKET_PORT=8912 + STATIC_HOST=127.0.0.1 + STATIC_PORT=8913 + ``` + + Replace `your_openai_api_key_here` with your actual OpenAI API key. + +--- + +## Usage + +To run the application: + +### 1. **Start the FastAPI Server with the `run.sh` script:** + + Make sure the `run.sh` script is executable. If not, change its permissions: + + ```bash + chmod +x run.sh + ``` + + Now, run the script: + + ```bash + ./run.sh + ``` + +### 2. **Access the Application:** + + Open your browser and navigate to `"http://STATIC_HOST:STATIC_PORT" to interact with the application. + +--- + +## Project Structure + +- `main.py`: The entry point of the application. +- `api/`: Contains the API endpoints. +- `services/`: Includes the core logic for summarization and brochure generation. +- `static/`: Holds AI GENERATED static files. + +--- diff --git a/week1/community-contributions/Brochurify/exceptions.py b/week1/community-contributions/Brochurify/exceptions.py new file mode 100644 index 0000000..6d408bd --- /dev/null +++ b/week1/community-contributions/Brochurify/exceptions.py @@ -0,0 +1,20 @@ +from fastapi import WebSocketException + + +class BadUrlException(WebSocketException): + def __init__(self): + self.code = 1000 + self.reason = "There is something wrong with the url you provided,\ + please check and try again." + + +class InvalidCrawlType(WebSocketException): + def __init__(self): + self.reason = "Invalid Crawl Type" + self.code = 1000 + + +class InvalidContent(WebSocketException): + def __init__(self): + self.reason = "Bad Content, Try Again" + self.code = 1000 diff --git a/week1/community-contributions/Brochurify/main.py b/week1/community-contributions/Brochurify/main.py new file mode 100644 index 0000000..c47a554 --- /dev/null +++ b/week1/community-contributions/Brochurify/main.py @@ -0,0 +1,16 @@ +from fastapi import FastAPI +import uvicorn +from routes import router +from dotenv import load_dotenv +import os + +load_dotenv() +app = FastAPI() +app.include_router(router) + + +if __name__ == '__main__': + host = os.getenv("SOCKET_HOST", "127.101.43.41") + port = int(os.getenv("SOCKET_PORT", 8329)) + uvicorn.run("main:app", host=host, port=port, + reload=True) diff --git a/week1/community-contributions/Brochurify/requirements.txt b/week1/community-contributions/Brochurify/requirements.txt new file mode 100644 index 0000000..54f4496 --- /dev/null +++ b/week1/community-contributions/Brochurify/requirements.txt @@ -0,0 +1,48 @@ +aiohappyeyeballs==2.4.4 +aiohttp==3.11.11 +aiosignal==1.3.2 +annotated-types==0.7.0 +anyio==4.8.0 +attrs==24.3.0 +beautifulsoup4==4.12.3 +certifi==2024.12.14 +click==8.1.8 +distro==1.9.0 +dnspython==2.7.0 +email_validator==2.2.0 +fastapi==0.115.6 +fastapi-cli==0.0.7 +frozenlist==1.5.0 +h11==0.14.0 +httpcore==1.0.7 +httptools==0.6.4 +httpx==0.28.1 +idna==3.10 +Jinja2==3.1.5 +jiter==0.8.2 +markdown-it-py==3.0.0 +MarkupSafe==3.0.2 +mdurl==0.1.2 +multidict==6.1.0 +openai==1.59.7 +propcache==0.2.1 +pydantic==2.10.5 +pydantic_core==2.27.2 +Pygments==2.19.1 +python-dotenv==1.0.1 +python-multipart==0.0.20 +PyYAML==6.0.2 +rich==13.9.4 +rich-toolkit==0.13.2 +shellingham==1.5.4 +sniffio==1.3.1 +soupsieve==2.6 +starlette==0.41.3 +tqdm==4.67.1 +typer==0.15.1 +typing_extensions==4.12.2 +uvicorn==0.34.0 +uvloop==0.21.0 +watchfiles==1.0.4 +websockets==14.1 +yarl==1.18.3 diff --git a/week1/community-contributions/Brochurify/routes/__init__.py b/week1/community-contributions/Brochurify/routes/__init__.py new file mode 100644 index 0000000..2378043 --- /dev/null +++ b/week1/community-contributions/Brochurify/routes/__init__.py @@ -0,0 +1 @@ +from .router import router diff --git a/week1/community-contributions/Brochurify/routes/router.py b/week1/community-contributions/Brochurify/routes/router.py new file mode 100644 index 0000000..be101bf --- /dev/null +++ b/week1/community-contributions/Brochurify/routes/router.py @@ -0,0 +1,4 @@ +from fastapi import APIRouter +from .socket import socket_router +router = APIRouter() +router.include_router(socket_router, prefix="/socket") diff --git a/week1/community-contributions/Brochurify/routes/socket/__init__.py b/week1/community-contributions/Brochurify/routes/socket/__init__.py new file mode 100644 index 0000000..4dbf7c0 --- /dev/null +++ b/week1/community-contributions/Brochurify/routes/socket/__init__.py @@ -0,0 +1 @@ +from .socket import router as socket_router diff --git a/week1/community-contributions/Brochurify/routes/socket/socket.py b/week1/community-contributions/Brochurify/routes/socket/socket.py new file mode 100644 index 0000000..083b368 --- /dev/null +++ b/week1/community-contributions/Brochurify/routes/socket/socket.py @@ -0,0 +1,50 @@ +import json + +from services import Orchestrator +from services.crawler import CrawlerService +from fastapi import WebSocket, WebSocketDisconnect, APIRouter + +from services.llm import LLMService +from services.socket import ConnectionManager +manager = ConnectionManager() +router = APIRouter() + + +@router.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket): + user_unique_id = await manager.connect(websocket) + try: + data = await websocket.receive_text() + data = json.loads(data) + user_state = manager.get_user_state(user_unique_id) + if not user_state: + orchestrator = Orchestrator(crawler_service=CrawlerService(url=data['url'], + crawl_type=data['crawlType']), + llm_service=LLMService(model_type='openai', + model_name="gpt-4o-mini", + crawl_type='summarize')) + + status_message = dict(type="status", message="Processing your request...") + + await manager.send_message(unique_id=user_unique_id, + message=json.dumps(status_message)) + + manager.modify_user_state(user_unique_id, "crawling") + + async for update in orchestrator.stream_website_data( + user_id=user_unique_id, manager=manager, + description=data['description'], url=data['url'], + site_type=data['siteType']): + message = dict(type="message", message=update, + is_complete=False) + await manager.send_message(user_unique_id, json.dumps(message)) + + status_message = dict(type="status", message="Disconnecting YOU NOW") + message = dict(type="message", message="..", + is_complete=True) + await manager.send_message(user_unique_id, json.dumps(message)) + await manager.send_message(user_unique_id, json.dumps(status_message)) + await manager.disconnect(user_unique_id) + + except WebSocketDisconnect: + await manager.disconnect(user_unique_id) diff --git a/week1/community-contributions/Brochurify/run.sh b/week1/community-contributions/Brochurify/run.sh new file mode 100755 index 0000000..0af0bd2 --- /dev/null +++ b/week1/community-contributions/Brochurify/run.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +if [ -f .env ]; then + export $(grep -v '^#' .env | xargs) +else + echo ".env file not found. Please create it with SOCKET_HOST and SOCKET_PORT." + exit 1 +fi + +sed -i "s/SOCKET_HOST/$SOCKET_HOST/g" static/index.html +sed -i "s/SOCKET_PORT/$SOCKET_PORT/g" static/index.html + +if lsof -i:$SOCKET_PORT > /dev/null 2>&1; then + echo "Port $SOCKET_PORT is already in use. Please free the port or use a different one." + exit 1 +fi + + +echo "Starting the Python application on $SOCKET_HOST:$SOCKET_PORT..." +python main.py & +APP_PID=$! +sleep 2 + + +STATIC_DIR="./static" +if [ ! -d "$STATIC_DIR" ]; then + echo "Static directory not found at $STATIC_DIR. Please ensure it exists." + exit 1 +fi + +cd $STATIC_DIR +echo "Starting the static server in $STATIC_DIR on $STATIC_HOST:$STATIC_PORT..." +python -m http.server $STATIC_PORT --bind $STATIC_HOST & +STATIC_PID=$! + +cd .. + + +echo "Servers are running. Press Ctrl+C to stop." +trap "kill $STATIC_PID $APP_PID" SIGINT SIGTERM +wait diff --git a/week1/community-contributions/Brochurify/services/__init__.py b/week1/community-contributions/Brochurify/services/__init__.py new file mode 100644 index 0000000..5f7b669 --- /dev/null +++ b/week1/community-contributions/Brochurify/services/__init__.py @@ -0,0 +1 @@ +from .orchestrator import Orchestrator diff --git a/week1/community-contributions/Brochurify/services/crawler/__crawler_base.py b/week1/community-contributions/Brochurify/services/crawler/__crawler_base.py new file mode 100644 index 0000000..14f5d0a --- /dev/null +++ b/week1/community-contributions/Brochurify/services/crawler/__crawler_base.py @@ -0,0 +1,17 @@ +from abc import ABC, abstractmethod + + +class CrawlerBase(ABC): + + @abstractmethod + async def crawl(self): + pass + + @staticmethod + async def _fetch(session, link): + try: + async with session.get(link) as response: + response.raise_for_status() + return await response.text() + except Exception as e: + return e diff --git a/week1/community-contributions/Brochurify/services/crawler/__init__.py b/week1/community-contributions/Brochurify/services/crawler/__init__.py new file mode 100644 index 0000000..f73d62e --- /dev/null +++ b/week1/community-contributions/Brochurify/services/crawler/__init__.py @@ -0,0 +1 @@ +from .crawler import CrawlerService diff --git a/week1/community-contributions/Brochurify/services/crawler/bs4crawler.py b/week1/community-contributions/Brochurify/services/crawler/bs4crawler.py new file mode 100644 index 0000000..9cd07eb --- /dev/null +++ b/week1/community-contributions/Brochurify/services/crawler/bs4crawler.py @@ -0,0 +1,61 @@ +import aiohttp +import asyncio +from exceptions import BadUrlException +from bs4 import BeautifulSoup +from.__crawler_base import CrawlerBase + + +class BS4Crawler(CrawlerBase): + def __init__(self, url): + self.url = url + self.visited_links = list() + self.url_contents = list() + + @staticmethod + def get_soup_content(soup): + title = soup.title.string if soup.title else "No title found" + if soup.body: + for irrelevant in soup.body(["script", "style", "img", "input"]): + irrelevant.decompose() + text = title + "\t" + soup.body.get_text(separator="\n", strip=True) + return text + return "" + + async def main_page_crawl(self, session): + response = await self._fetch(session, self.url) + if isinstance(response, BaseException): + raise BadUrlException() + soup = BeautifulSoup(response, 'html.parser') + + main_page_text = self.get_soup_content(soup) + return main_page_text, soup + + async def crawl(self): + async with aiohttp.ClientSession() as session: + main_page_text, soup = await self.main_page_crawl(session) + self.url_contents.append(dict(url=self.url, content=main_page_text)) + self.visited_links.append(self.url) + links = [link.get('href') for link in soup.find_all('a')] + + requests = list() + for link in links: + if link is not None: + if link not in self.visited_links and link.startswith(self.url): + print(link) + requests.append(self.get_url_content(session, link)) + self.visited_links.append(link) + print("Starting TO gathering Links") + if requests: + responses = await asyncio.gather(*requests, return_exceptions=True) + for response in responses: + if response: + self.url_contents.append(response) + print("Crawling Done") + + async def get_url_content(self, session, link): + response = await self._fetch(session, link) + if isinstance(response, BaseException): + return None + soup = BeautifulSoup(response, 'html.parser') + text = self.get_soup_content(soup) + return dict(url=link, content=text) diff --git a/week1/community-contributions/Brochurify/services/crawler/crawler.py b/week1/community-contributions/Brochurify/services/crawler/crawler.py new file mode 100644 index 0000000..3af4593 --- /dev/null +++ b/week1/community-contributions/Brochurify/services/crawler/crawler.py @@ -0,0 +1,18 @@ +from exceptions import InvalidCrawlType +from .bs4crawler import BS4Crawler + + +class CrawlerService: + def __init__(self, url, crawl_type): + self.crawler = self.crawl_builder(url, crawl_type) + + async def crawl(self): + await self.crawler.crawl() + return self.crawler.url_contents + + @staticmethod + def crawl_builder(url, crawl_type): + if crawl_type == "normal": + return BS4Crawler(url) + raise InvalidCrawlType() + diff --git a/week1/community-contributions/Brochurify/services/llm/__init__.py b/week1/community-contributions/Brochurify/services/llm/__init__.py new file mode 100644 index 0000000..f2ae924 --- /dev/null +++ b/week1/community-contributions/Brochurify/services/llm/__init__.py @@ -0,0 +1 @@ +from .llm_service import LLMService diff --git a/week1/community-contributions/Brochurify/services/llm/builder.py b/week1/community-contributions/Brochurify/services/llm/builder.py new file mode 100644 index 0000000..fb1ccab --- /dev/null +++ b/week1/community-contributions/Brochurify/services/llm/builder.py @@ -0,0 +1,24 @@ +from .open_ai import OpenAISummarize + +SUPPORTED_MODELS = { + "openai": { + "summarize": "OpenAISummarize", + }, + "ollama": { + "summarize": "OllamaSummarize" + }, +} + + +def llm_builder(model_type: str, model_name: str, crawl_type: str): + if model_type not in SUPPORTED_MODELS: + raise ValueError(f"Unsupported model type: {model_type}") + + if crawl_type not in SUPPORTED_MODELS[model_type]: + raise ValueError(f"Crawl type '{crawl_type}' not supported for model type '{model_type}'") + + class_name = SUPPORTED_MODELS[model_type][crawl_type] + + service_class = globals()[class_name] + + return service_class(model_name) diff --git a/week1/community-contributions/Brochurify/services/llm/llm_service.py b/week1/community-contributions/Brochurify/services/llm/llm_service.py new file mode 100644 index 0000000..f2e01dd --- /dev/null +++ b/week1/community-contributions/Brochurify/services/llm/llm_service.py @@ -0,0 +1,18 @@ +from .builder import llm_builder + + +class LLMService: + def __init__(self, model_type, model_name, crawl_type): + + self.llm = llm_builder(model_type, model_name, crawl_type) + + async def generate_response(self, crawl_result, + url, + description, + site_type + ): + async for response_chunk in self.llm.generate(content=crawl_result, + url=url, description=description, + site_type=site_type + ): + yield response_chunk diff --git a/week1/community-contributions/Brochurify/services/llm/open_ai.py b/week1/community-contributions/Brochurify/services/llm/open_ai.py new file mode 100644 index 0000000..fabf4cd --- /dev/null +++ b/week1/community-contributions/Brochurify/services/llm/open_ai.py @@ -0,0 +1,73 @@ +import json +import os +from typing import Dict, List + +from openai import AsyncOpenAI +from .summarizer_llm import BaseSummarizer + + +class OpenAISummarize(BaseSummarizer): + def __init__(self, model_name: str = "gpt-4o-mini"): + self.model = model_name + api_key = os.getenv("OPENAI_API_KEY") + self.openai = AsyncOpenAI(api_key=api_key) + + async def generate(self, url, content: List[Dict], description, + site_type): + content_dict = {item['url']: item for item in content} + links = list(content_dict.keys()) + + yield f"Now I Am filtering links that i found on {url}\n" + new_links = await self.remove_unnecessary_link(url=url, + links=links, + description=description, + site_type=site_type) + yield "Links have been filtered. Advancing...\n\n" + + new_links = new_links['links'] + + filtered_content = [content_dict[link_info['url']] for link_info in new_links if + link_info['url'] in content_dict] + + yield "It's Almost Done\n" + prompt = self.get_boruchure_prompt(filtered_content) + response = await self.openai.chat.completions.create(model="gpt-4o-mini", + messages=prompt, stream=True) + + async for response_chunk in response: + yield response_chunk.choices[0].delta.content + + async def remove_unnecessary_link(self, url, links, description, + site_type): + + prompt = self.prompts_for_removing_links(url=url, + description=description, + site_type=site_type, + links=links) + links = await self.openai.chat.completions.create( + messages=prompt, + model=self.model, + response_format={"type": "json_object"} + ) + result = links.choices[0].message.content + return json.loads(result) + + @staticmethod + def get_boruchure_prompt(link_content_list): + system_prompt = "You are an assistant that analyzes \ + the contents of several relevant pages from a company website \ + and creates a short brochure about the company for prospective\ + customers, investors and recruits. Respond in markdown.\ + Include details of company culture, customers and careers/jobs if you have the information." + user_prompt = f"Here are the contents of its landing page and other relevant pages; \ + use this information to build a short brochure of the company in markdown.\n" + result = "links content are :\n\n" + for item in link_content_list: + link = item['url'] + content = item['content'] + result += f"url: {link},\t content: {content[:2000]}" + user_prompt += result + return [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt} + ] diff --git a/week1/community-contributions/Brochurify/services/llm/summarizer_llm.py b/week1/community-contributions/Brochurify/services/llm/summarizer_llm.py new file mode 100644 index 0000000..1df055a --- /dev/null +++ b/week1/community-contributions/Brochurify/services/llm/summarizer_llm.py @@ -0,0 +1,56 @@ +from abc import abstractmethod, ABC + + +class BaseSummarizer(ABC): + + @abstractmethod + async def generate(self, *args, **kwargs): + pass + + @abstractmethod + async def remove_unnecessary_link(self, *args, **kwargs): + pass + + def prompts_for_removing_links(self, url, links, description=None, site_type=None): + link_system_prompt = ( + "You are provided with a list of links found on a webpage. " + "Your task is to filter out irrelevant links and retain those that are most\ + relevant for creating a brochure. " + "Consider links that provide valuable information about the site's content,\ + such as main articles, key information pages, or other significant sections.\n" + "Exclude links that are not useful for a brochure, such as Terms of Service,\ + Privacy policies, and email links.\n" + "You should respond in JSON format as shown in the example below:\n" + "{\n" + ' "links": [\n' + ' {"type": "relevant page type", "url": "https://full.url/goes/here"},\n' + ' {"type": "another relevant page type", "url": "https://another.full.url"}\n' + " ]\n" + "}" + ) + + user_prompt = self.get_links_user_prompt(url, links, description, site_type) + + return [ + {"role": "system", "content": link_system_prompt}, + {"role": "user", "content": user_prompt}, + ] + + @staticmethod + def get_links_user_prompt(url, links, description=None, site_type=None): + user_prompt = f"Here is the list of links found on the {url}:\n" + + if site_type or description: + user_prompt += "Additional context:\n" + if site_type: + user_prompt += f"- Site type: {site_type}\n" + if description: + user_prompt += f"- User description: {description}\n" + + user_prompt += ( + "Please evaluate the following links and select those that are relevant for inclusion in a brochure. " + "Exclude links related to Terms of Service, Privacy policies, and email addresses.\n" + "Links (some might be relative links):\n" + ) + user_prompt += "\n".join(links) + return user_prompt diff --git a/week1/community-contributions/Brochurify/services/orchestrator.py b/week1/community-contributions/Brochurify/services/orchestrator.py new file mode 100644 index 0000000..871bb65 --- /dev/null +++ b/week1/community-contributions/Brochurify/services/orchestrator.py @@ -0,0 +1,23 @@ +import json +from services.socket import ConnectionManager + + +class Orchestrator: + def __init__(self, crawler_service, llm_service): + self.crawler_service = crawler_service + self.llm_service = llm_service + + async def stream_website_data(self, user_id: str, manager: ConnectionManager, + description, + site_type, + url): + + await manager.send_message(user_id, "Starting crawling process...") + crawl_result = await self.crawler_service.crawl() + + status_message = dict(type="status", message="Processing content with LLM...") + await manager.send_message(user_id, json.dumps(status_message)) + async for llm_update in self.llm_service.generate_response( + url=url, crawl_result=crawl_result, description=description, + site_type=site_type): + yield llm_update diff --git a/week1/community-contributions/Brochurify/services/socket/__init__.py b/week1/community-contributions/Brochurify/services/socket/__init__.py new file mode 100644 index 0000000..ac2b196 --- /dev/null +++ b/week1/community-contributions/Brochurify/services/socket/__init__.py @@ -0,0 +1 @@ +from .socket_handler import ConnectionManager \ No newline at end of file diff --git a/week1/community-contributions/Brochurify/services/socket/socket_handler.py b/week1/community-contributions/Brochurify/services/socket/socket_handler.py new file mode 100644 index 0000000..8bf4ecd --- /dev/null +++ b/week1/community-contributions/Brochurify/services/socket/socket_handler.py @@ -0,0 +1,44 @@ +from fastapi import WebSocket +from uuid import uuid4 + +from exceptions import InvalidContent + + +class ConnectionManager: + def __init__(self): + self.active_connections = dict() + self.user_states = dict() + + async def connect(self, websocket: WebSocket): + unique_id = str(uuid4())[:10] + await websocket.accept() + self.active_connections[unique_id] = websocket + self.user_states[unique_id] = dict() + print(f"User {unique_id} connected.") + return unique_id + + async def disconnect(self, unique_id): + if unique_id in self.active_connections: + if self.user_states[unique_id].get("connection_state", None) != 'closed': + print(f"Closing connection with user {unique_id}.") + await self.active_connections[unique_id].close(code=1000) + self.user_states[unique_id]['connection_state'] = 'closed' + del self.active_connections[unique_id] + del self.user_states[unique_id] + print(f"User {unique_id} disconnected.") + + async def send_message(self, unique_id, message): + if unique_id in self.active_connections: + await self.active_connections[unique_id].send_text(message) + + def get_user_state(self, unique_id): + user = self.user_states.get(unique_id, None) + if user is None: + raise InvalidContent() + return self.user_states[unique_id].get("user_state", None) + + def modify_user_state(self, unique_id, state): + if state == "": + self.user_states[unique_id].pop("user_state", None) + else: + self.user_states[unique_id]["user_state"] = state diff --git a/week1/community-contributions/Brochurify/static/index.html b/week1/community-contributions/Brochurify/static/index.html new file mode 100644 index 0000000..7eb71b4 --- /dev/null +++ b/week1/community-contributions/Brochurify/static/index.html @@ -0,0 +1,222 @@ + + + + + + WebSocket Chat with Markdown Streaming + + + + + + +

WebSocket Chat with Markdown Streaming

+
+ + +
+
+ + +
+
+ + +
+ + +
✅ Ready to chat!
+ + + + \ No newline at end of file