Browse Source

Add Brochurify to community contributions

pull/107/head
vahid 4 months ago
parent
commit
c952b00397
  1. 112
      week1/community-contributions/Brochurify/README.md
  2. 20
      week1/community-contributions/Brochurify/exceptions.py
  3. 16
      week1/community-contributions/Brochurify/main.py
  4. 48
      week1/community-contributions/Brochurify/requirements.txt
  5. 1
      week1/community-contributions/Brochurify/routes/__init__.py
  6. 4
      week1/community-contributions/Brochurify/routes/router.py
  7. 1
      week1/community-contributions/Brochurify/routes/socket/__init__.py
  8. 50
      week1/community-contributions/Brochurify/routes/socket/socket.py
  9. 41
      week1/community-contributions/Brochurify/run.sh
  10. 1
      week1/community-contributions/Brochurify/services/__init__.py
  11. 17
      week1/community-contributions/Brochurify/services/crawler/__crawler_base.py
  12. 1
      week1/community-contributions/Brochurify/services/crawler/__init__.py
  13. 61
      week1/community-contributions/Brochurify/services/crawler/bs4crawler.py
  14. 18
      week1/community-contributions/Brochurify/services/crawler/crawler.py
  15. 1
      week1/community-contributions/Brochurify/services/llm/__init__.py
  16. 24
      week1/community-contributions/Brochurify/services/llm/builder.py
  17. 18
      week1/community-contributions/Brochurify/services/llm/llm_service.py
  18. 73
      week1/community-contributions/Brochurify/services/llm/open_ai.py
  19. 56
      week1/community-contributions/Brochurify/services/llm/summarizer_llm.py
  20. 23
      week1/community-contributions/Brochurify/services/orchestrator.py
  21. 1
      week1/community-contributions/Brochurify/services/socket/__init__.py
  22. 44
      week1/community-contributions/Brochurify/services/socket/socket_handler.py
  23. 222
      week1/community-contributions/Brochurify/static/index.html

112
week1/community-contributions/Brochurify/README.md

@ -0,0 +1,112 @@
# Brochurify
Welcome to Brochurify! This project is designed to simplify website data extraction and summarization, providing a streamlined way to generate brochures from web content.
## Table of Contents
1. [About the Project](#about-the-project)
2. [Features](#features)
3. [Installation](#installation)
4. [Usage](#usage)
5. [Project Structure](#project-structure)
---
## About the Project
An innovative project that simplifies website data extraction and summarization.
Key Technologies:
- FastAPI
- WebSockets for real-time communication
- LLMs for summarization and brochure generation
---
## Features
- **Webpage Summarization:** Provide a URL and get a concise summary.
- **Brochure Creation:** Generate a visually appealing, structured brochure from a website.
- **Real-time Processing:** Instant feedback using WebSockets.
---
## Installation
Follow these steps to set up the project locally:
### 1. **Clone the Repository:**
```bash
git clone https://github.com/itsnotvahid/Brochurify.git
cd Brochurify
```
### 2. **Create a Virtual Environment:**
It's recommended to use a virtual environment to manage dependencies.
On **Linux/macOS**, run:
```bash
python3 -m venv venv
source venv/bin/activate
```
### 3. **Install Dependencies:**
Ensure you have `pip` installed. Then, install the required packages:
```bash
pip install -r requirements.txt
```
### 4. **Create a `.env` File:**
The application requires certain environment variables. Create a `.env` file in the root directory with the following content:
```env
OPENAI_API_KEY=your_openai_api_key_here
SOCKET_HOST=127.0.0.1
SOCKET_PORT=8912
STATIC_HOST=127.0.0.1
STATIC_PORT=8913
```
Replace `your_openai_api_key_here` with your actual OpenAI API key.
---
## Usage
To run the application:
### 1. **Start the FastAPI Server with the `run.sh` script:**
Make sure the `run.sh` script is executable. If not, change its permissions:
```bash
chmod +x run.sh
```
Now, run the script:
```bash
./run.sh
```
### 2. **Access the Application:**
Open your browser and navigate to `"http://STATIC_HOST:STATIC_PORT" to interact with the application.
---
## Project Structure
- `main.py`: The entry point of the application.
- `api/`: Contains the API endpoints.
- `services/`: Includes the core logic for summarization and brochure generation.
- `static/`: Holds AI GENERATED static files.
---

20
week1/community-contributions/Brochurify/exceptions.py

@ -0,0 +1,20 @@
from fastapi import WebSocketException
class BadUrlException(WebSocketException):
def __init__(self):
self.code = 1000
self.reason = "There is something wrong with the url you provided,\
please check and try again."
class InvalidCrawlType(WebSocketException):
def __init__(self):
self.reason = "Invalid Crawl Type"
self.code = 1000
class InvalidContent(WebSocketException):
def __init__(self):
self.reason = "Bad Content, Try Again"
self.code = 1000

16
week1/community-contributions/Brochurify/main.py

@ -0,0 +1,16 @@
from fastapi import FastAPI
import uvicorn
from routes import router
from dotenv import load_dotenv
import os
load_dotenv()
app = FastAPI()
app.include_router(router)
if __name__ == '__main__':
host = os.getenv("SOCKET_HOST", "127.101.43.41")
port = int(os.getenv("SOCKET_PORT", 8329))
uvicorn.run("main:app", host=host, port=port,
reload=True)

48
week1/community-contributions/Brochurify/requirements.txt

@ -0,0 +1,48 @@
aiohappyeyeballs==2.4.4
aiohttp==3.11.11
aiosignal==1.3.2
annotated-types==0.7.0
anyio==4.8.0
attrs==24.3.0
beautifulsoup4==4.12.3
certifi==2024.12.14
click==8.1.8
distro==1.9.0
dnspython==2.7.0
email_validator==2.2.0
fastapi==0.115.6
fastapi-cli==0.0.7
frozenlist==1.5.0
h11==0.14.0
httpcore==1.0.7
httptools==0.6.4
httpx==0.28.1
idna==3.10
Jinja2==3.1.5
jiter==0.8.2
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
multidict==6.1.0
openai==1.59.7
propcache==0.2.1
pydantic==2.10.5
pydantic_core==2.27.2
Pygments==2.19.1
python-dotenv==1.0.1
python-multipart==0.0.20
PyYAML==6.0.2
rich==13.9.4
rich-toolkit==0.13.2
shellingham==1.5.4
sniffio==1.3.1
soupsieve==2.6
starlette==0.41.3
tqdm==4.67.1
typer==0.15.1
typing_extensions==4.12.2
uvicorn==0.34.0
uvloop==0.21.0
watchfiles==1.0.4
websockets==14.1
yarl==1.18.3

1
week1/community-contributions/Brochurify/routes/__init__.py

@ -0,0 +1 @@
from .router import router

4
week1/community-contributions/Brochurify/routes/router.py

@ -0,0 +1,4 @@
from fastapi import APIRouter
from .socket import socket_router
router = APIRouter()
router.include_router(socket_router, prefix="/socket")

1
week1/community-contributions/Brochurify/routes/socket/__init__.py

@ -0,0 +1 @@
from .socket import router as socket_router

50
week1/community-contributions/Brochurify/routes/socket/socket.py

@ -0,0 +1,50 @@
import json
from services import Orchestrator
from services.crawler import CrawlerService
from fastapi import WebSocket, WebSocketDisconnect, APIRouter
from services.llm import LLMService
from services.socket import ConnectionManager
manager = ConnectionManager()
router = APIRouter()
@router.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
user_unique_id = await manager.connect(websocket)
try:
data = await websocket.receive_text()
data = json.loads(data)
user_state = manager.get_user_state(user_unique_id)
if not user_state:
orchestrator = Orchestrator(crawler_service=CrawlerService(url=data['url'],
crawl_type=data['crawlType']),
llm_service=LLMService(model_type='openai',
model_name="gpt-4o-mini",
crawl_type='summarize'))
status_message = dict(type="status", message="Processing your request...")
await manager.send_message(unique_id=user_unique_id,
message=json.dumps(status_message))
manager.modify_user_state(user_unique_id, "crawling")
async for update in orchestrator.stream_website_data(
user_id=user_unique_id, manager=manager,
description=data['description'], url=data['url'],
site_type=data['siteType']):
message = dict(type="message", message=update,
is_complete=False)
await manager.send_message(user_unique_id, json.dumps(message))
status_message = dict(type="status", message="Disconnecting YOU NOW")
message = dict(type="message", message="..",
is_complete=True)
await manager.send_message(user_unique_id, json.dumps(message))
await manager.send_message(user_unique_id, json.dumps(status_message))
await manager.disconnect(user_unique_id)
except WebSocketDisconnect:
await manager.disconnect(user_unique_id)

41
week1/community-contributions/Brochurify/run.sh

@ -0,0 +1,41 @@
#!/bin/bash
if [ -f .env ]; then
export $(grep -v '^#' .env | xargs)
else
echo ".env file not found. Please create it with SOCKET_HOST and SOCKET_PORT."
exit 1
fi
sed -i "s/SOCKET_HOST/$SOCKET_HOST/g" static/index.html
sed -i "s/SOCKET_PORT/$SOCKET_PORT/g" static/index.html
if lsof -i:$SOCKET_PORT > /dev/null 2>&1; then
echo "Port $SOCKET_PORT is already in use. Please free the port or use a different one."
exit 1
fi
echo "Starting the Python application on $SOCKET_HOST:$SOCKET_PORT..."
python main.py &
APP_PID=$!
sleep 2
STATIC_DIR="./static"
if [ ! -d "$STATIC_DIR" ]; then
echo "Static directory not found at $STATIC_DIR. Please ensure it exists."
exit 1
fi
cd $STATIC_DIR
echo "Starting the static server in $STATIC_DIR on $STATIC_HOST:$STATIC_PORT..."
python -m http.server $STATIC_PORT --bind $STATIC_HOST &
STATIC_PID=$!
cd ..
echo "Servers are running. Press Ctrl+C to stop."
trap "kill $STATIC_PID $APP_PID" SIGINT SIGTERM
wait

1
week1/community-contributions/Brochurify/services/__init__.py

@ -0,0 +1 @@
from .orchestrator import Orchestrator

17
week1/community-contributions/Brochurify/services/crawler/__crawler_base.py

@ -0,0 +1,17 @@
from abc import ABC, abstractmethod
class CrawlerBase(ABC):
@abstractmethod
async def crawl(self):
pass
@staticmethod
async def _fetch(session, link):
try:
async with session.get(link) as response:
response.raise_for_status()
return await response.text()
except Exception as e:
return e

1
week1/community-contributions/Brochurify/services/crawler/__init__.py

@ -0,0 +1 @@
from .crawler import CrawlerService

61
week1/community-contributions/Brochurify/services/crawler/bs4crawler.py

@ -0,0 +1,61 @@
import aiohttp
import asyncio
from exceptions import BadUrlException
from bs4 import BeautifulSoup
from.__crawler_base import CrawlerBase
class BS4Crawler(CrawlerBase):
def __init__(self, url):
self.url = url
self.visited_links = list()
self.url_contents = list()
@staticmethod
def get_soup_content(soup):
title = soup.title.string if soup.title else "No title found"
if soup.body:
for irrelevant in soup.body(["script", "style", "img", "input"]):
irrelevant.decompose()
text = title + "\t" + soup.body.get_text(separator="\n", strip=True)
return text
return ""
async def main_page_crawl(self, session):
response = await self._fetch(session, self.url)
if isinstance(response, BaseException):
raise BadUrlException()
soup = BeautifulSoup(response, 'html.parser')
main_page_text = self.get_soup_content(soup)
return main_page_text, soup
async def crawl(self):
async with aiohttp.ClientSession() as session:
main_page_text, soup = await self.main_page_crawl(session)
self.url_contents.append(dict(url=self.url, content=main_page_text))
self.visited_links.append(self.url)
links = [link.get('href') for link in soup.find_all('a')]
requests = list()
for link in links:
if link is not None:
if link not in self.visited_links and link.startswith(self.url):
print(link)
requests.append(self.get_url_content(session, link))
self.visited_links.append(link)
print("Starting TO gathering Links")
if requests:
responses = await asyncio.gather(*requests, return_exceptions=True)
for response in responses:
if response:
self.url_contents.append(response)
print("Crawling Done")
async def get_url_content(self, session, link):
response = await self._fetch(session, link)
if isinstance(response, BaseException):
return None
soup = BeautifulSoup(response, 'html.parser')
text = self.get_soup_content(soup)
return dict(url=link, content=text)

18
week1/community-contributions/Brochurify/services/crawler/crawler.py

@ -0,0 +1,18 @@
from exceptions import InvalidCrawlType
from .bs4crawler import BS4Crawler
class CrawlerService:
def __init__(self, url, crawl_type):
self.crawler = self.crawl_builder(url, crawl_type)
async def crawl(self):
await self.crawler.crawl()
return self.crawler.url_contents
@staticmethod
def crawl_builder(url, crawl_type):
if crawl_type == "normal":
return BS4Crawler(url)
raise InvalidCrawlType()

1
week1/community-contributions/Brochurify/services/llm/__init__.py

@ -0,0 +1 @@
from .llm_service import LLMService

24
week1/community-contributions/Brochurify/services/llm/builder.py

@ -0,0 +1,24 @@
from .open_ai import OpenAISummarize
SUPPORTED_MODELS = {
"openai": {
"summarize": "OpenAISummarize",
},
"ollama": {
"summarize": "OllamaSummarize"
},
}
def llm_builder(model_type: str, model_name: str, crawl_type: str):
if model_type not in SUPPORTED_MODELS:
raise ValueError(f"Unsupported model type: {model_type}")
if crawl_type not in SUPPORTED_MODELS[model_type]:
raise ValueError(f"Crawl type '{crawl_type}' not supported for model type '{model_type}'")
class_name = SUPPORTED_MODELS[model_type][crawl_type]
service_class = globals()[class_name]
return service_class(model_name)

18
week1/community-contributions/Brochurify/services/llm/llm_service.py

@ -0,0 +1,18 @@
from .builder import llm_builder
class LLMService:
def __init__(self, model_type, model_name, crawl_type):
self.llm = llm_builder(model_type, model_name, crawl_type)
async def generate_response(self, crawl_result,
url,
description,
site_type
):
async for response_chunk in self.llm.generate(content=crawl_result,
url=url, description=description,
site_type=site_type
):
yield response_chunk

73
week1/community-contributions/Brochurify/services/llm/open_ai.py

@ -0,0 +1,73 @@
import json
import os
from typing import Dict, List
from openai import AsyncOpenAI
from .summarizer_llm import BaseSummarizer
class OpenAISummarize(BaseSummarizer):
def __init__(self, model_name: str = "gpt-4o-mini"):
self.model = model_name
api_key = os.getenv("OPENAI_API_KEY")
self.openai = AsyncOpenAI(api_key=api_key)
async def generate(self, url, content: List[Dict], description,
site_type):
content_dict = {item['url']: item for item in content}
links = list(content_dict.keys())
yield f"Now I Am filtering links that i found on {url}\n"
new_links = await self.remove_unnecessary_link(url=url,
links=links,
description=description,
site_type=site_type)
yield "Links have been filtered. Advancing...\n\n"
new_links = new_links['links']
filtered_content = [content_dict[link_info['url']] for link_info in new_links if
link_info['url'] in content_dict]
yield "It's Almost Done\n"
prompt = self.get_boruchure_prompt(filtered_content)
response = await self.openai.chat.completions.create(model="gpt-4o-mini",
messages=prompt, stream=True)
async for response_chunk in response:
yield response_chunk.choices[0].delta.content
async def remove_unnecessary_link(self, url, links, description,
site_type):
prompt = self.prompts_for_removing_links(url=url,
description=description,
site_type=site_type,
links=links)
links = await self.openai.chat.completions.create(
messages=prompt,
model=self.model,
response_format={"type": "json_object"}
)
result = links.choices[0].message.content
return json.loads(result)
@staticmethod
def get_boruchure_prompt(link_content_list):
system_prompt = "You are an assistant that analyzes \
the contents of several relevant pages from a company website \
and creates a short brochure about the company for prospective\
customers, investors and recruits. Respond in markdown.\
Include details of company culture, customers and careers/jobs if you have the information."
user_prompt = f"Here are the contents of its landing page and other relevant pages; \
use this information to build a short brochure of the company in markdown.\n"
result = "links content are :\n\n"
for item in link_content_list:
link = item['url']
content = item['content']
result += f"url: {link},\t content: {content[:2000]}"
user_prompt += result
return [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]

56
week1/community-contributions/Brochurify/services/llm/summarizer_llm.py

@ -0,0 +1,56 @@
from abc import abstractmethod, ABC
class BaseSummarizer(ABC):
@abstractmethod
async def generate(self, *args, **kwargs):
pass
@abstractmethod
async def remove_unnecessary_link(self, *args, **kwargs):
pass
def prompts_for_removing_links(self, url, links, description=None, site_type=None):
link_system_prompt = (
"You are provided with a list of links found on a webpage. "
"Your task is to filter out irrelevant links and retain those that are most\
relevant for creating a brochure. "
"Consider links that provide valuable information about the site's content,\
such as main articles, key information pages, or other significant sections.\n"
"Exclude links that are not useful for a brochure, such as Terms of Service,\
Privacy policies, and email links.\n"
"You should respond in JSON format as shown in the example below:\n"
"{\n"
' "links": [\n'
' {"type": "relevant page type", "url": "https://full.url/goes/here"},\n'
' {"type": "another relevant page type", "url": "https://another.full.url"}\n'
" ]\n"
"}"
)
user_prompt = self.get_links_user_prompt(url, links, description, site_type)
return [
{"role": "system", "content": link_system_prompt},
{"role": "user", "content": user_prompt},
]
@staticmethod
def get_links_user_prompt(url, links, description=None, site_type=None):
user_prompt = f"Here is the list of links found on the {url}:\n"
if site_type or description:
user_prompt += "Additional context:\n"
if site_type:
user_prompt += f"- Site type: {site_type}\n"
if description:
user_prompt += f"- User description: {description}\n"
user_prompt += (
"Please evaluate the following links and select those that are relevant for inclusion in a brochure. "
"Exclude links related to Terms of Service, Privacy policies, and email addresses.\n"
"Links (some might be relative links):\n"
)
user_prompt += "\n".join(links)
return user_prompt

23
week1/community-contributions/Brochurify/services/orchestrator.py

@ -0,0 +1,23 @@
import json
from services.socket import ConnectionManager
class Orchestrator:
def __init__(self, crawler_service, llm_service):
self.crawler_service = crawler_service
self.llm_service = llm_service
async def stream_website_data(self, user_id: str, manager: ConnectionManager,
description,
site_type,
url):
await manager.send_message(user_id, "Starting crawling process...")
crawl_result = await self.crawler_service.crawl()
status_message = dict(type="status", message="Processing content with LLM...")
await manager.send_message(user_id, json.dumps(status_message))
async for llm_update in self.llm_service.generate_response(
url=url, crawl_result=crawl_result, description=description,
site_type=site_type):
yield llm_update

1
week1/community-contributions/Brochurify/services/socket/__init__.py

@ -0,0 +1 @@
from .socket_handler import ConnectionManager

44
week1/community-contributions/Brochurify/services/socket/socket_handler.py

@ -0,0 +1,44 @@
from fastapi import WebSocket
from uuid import uuid4
from exceptions import InvalidContent
class ConnectionManager:
def __init__(self):
self.active_connections = dict()
self.user_states = dict()
async def connect(self, websocket: WebSocket):
unique_id = str(uuid4())[:10]
await websocket.accept()
self.active_connections[unique_id] = websocket
self.user_states[unique_id] = dict()
print(f"User {unique_id} connected.")
return unique_id
async def disconnect(self, unique_id):
if unique_id in self.active_connections:
if self.user_states[unique_id].get("connection_state", None) != 'closed':
print(f"Closing connection with user {unique_id}.")
await self.active_connections[unique_id].close(code=1000)
self.user_states[unique_id]['connection_state'] = 'closed'
del self.active_connections[unique_id]
del self.user_states[unique_id]
print(f"User {unique_id} disconnected.")
async def send_message(self, unique_id, message):
if unique_id in self.active_connections:
await self.active_connections[unique_id].send_text(message)
def get_user_state(self, unique_id):
user = self.user_states.get(unique_id, None)
if user is None:
raise InvalidContent()
return self.user_states[unique_id].get("user_state", None)
def modify_user_state(self, unique_id, state):
if state == "":
self.user_states[unique_id].pop("user_state", None)
else:
self.user_states[unique_id]["user_state"] = state

222
week1/community-contributions/Brochurify/static/index.html

@ -0,0 +1,222 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>WebSocket Chat with Markdown Streaming</title>
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600&display=swap" rel="stylesheet">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
<style>
body {
font-family: 'Poppins', sans-serif;
background-color: #e0eafc;
text-align: center;
padding: 20px;
}
h1 {
color: #333;
font-size: 2em;
margin-bottom: 20px;
}
.input-group {
position: relative;
margin: 10px 0;
}
.input-group i {
position: absolute;
left: 10px;
top: 50%;
transform: translateY(-50%);
color: #999;
}
input {
padding: 10px;
padding-left: 30px; /* Space for the icon */
font-size: 1em;
border: 1px solid #ccc;
border-radius: 20px;
width: 300px;
margin: 5px 0;
}
button {
padding: 10px 20px;
font-size: 1em;
background: linear-gradient(to right, #ff7e5f, #feb47b);
color: white;
border: none;
border-radius: 20px;
cursor: pointer;
margin-top: 10px;
}
button:hover {
background: linear-gradient(to right, #ff9170, #fec581);
}
#messages {
list-style-type: none;
padding: 0;
margin-top: 20px;
text-align: left;
max-width: 1200px;
margin: 0 auto;
background-color: #fff;
border-radius: 20px;
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
overflow-y: auto;
max-height: 500px;
padding: 10px;
}
#messages li {
padding: 10px;
margin: 5px 0;
border-radius: 10px;
background-color: #d1e7dd;
animation: fadeIn 0.5s;
}
@keyframes fadeIn {
from { opacity: 0; }
to { opacity: 1; }
}
#status {
margin-top: 20px;
font-size: 1em;
color: #007bff;
}
.error {
color: red;
font-weight: bold;
}
</style>
</head>
<body>
<h1>WebSocket Chat with Markdown Streaming</h1>
<div class="input-group">
<i class="fas fa-link"></i>
<input type="text" id="urlInput" placeholder="Enter URL">
</div>
<div class="input-group">
<i class="fas fa-align-left"></i>
<input type="text" id="descriptionInput" placeholder="Enter Description (optional)">
</div>
<div class="input-group">
<i class="fas fa-globe"></i>
<input type="text" id="siteTypeInput" placeholder="Enter Site Type (optional)">
</div>
<button onclick="sendMessage()">Send</button>
<ul id="messages"></ul>
<div id="status">✅ Ready to chat!</div>
<script>
let ws;
let currentMessageElement = null;
let buffer = ''; // Buffer to accumulate chunks
// Connect to WebSocket server
function connectWebSocket() {
ws = new WebSocket("ws://127.0.0.1:8912/socket/ws");
ws.onopen = () => {
console.log("WebSocket connection established.");
updateStatus("✅ Connected to server.");
};
ws.onmessage = (event) => {
const data = JSON.parse(event.data);
if (data.type === "status") {
updateStatus(data.message);
} else if (data.type === "message") {
handleStreamedMessage(data.message, data.is_complete);
}
};
ws.onerror = (error) => {
console.error("WebSocket error:", error);
updateStatus("Oops! 😢 An error occurred. Please try again.", true);
};
ws.onclose = () => {
console.log("WebSocket connection closed.");
updateStatus("Connection closed. Click 'Send' to reconnect.", true);
};
}
// Update status message
function updateStatus(message, isError = false) {
const statusDiv = document.getElementById("status");
if (isError) {
message = "Oops! 😢 " + message;
} else {
message = "✅ " + message;
}
statusDiv.innerHTML = isError ? `<div class="error">${message}</div>` : message;
}
// Handle streamed messages
function handleStreamedMessage(chunk, isComplete) {
const messages = document.getElementById("messages");
buffer += chunk; // Append chunk to buffer
// Split buffer by newline characters
const parts = buffer.split(/\r?\n/);
buffer = parts.pop(); // Keep the last part in buffer if it doesn't end with newline
// Render each part as Markdown
for (let part of parts) {
if (!currentMessageElement) {
currentMessageElement = document.createElement("li");
messages.appendChild(currentMessageElement);
}
currentMessageElement.innerHTML += marked.parse(part + ' '); // Add a space to prevent Markdown issues
}
// If the message is complete, render the remaining buffer
if (isComplete && buffer !== '') {
if (!currentMessageElement) {
currentMessageElement = document.createElement("li");
messages.appendChild(currentMessageElement);
}
currentMessageElement.innerHTML += marked.parse(buffer);
buffer = '';
currentMessageElement = null;
}
// Auto-scroll to the bottom
messages.scrollTop = messages.scrollHeight;
}
// Send message to WebSocket server
function sendMessage() {
const url = document.getElementById("urlInput").value.trim();
const description = document.getElementById("descriptionInput").value.trim();
const siteType = document.getElementById("siteTypeInput").value.trim();
if (!url) {
updateStatus("Please enter a valid URL.", true);
return;
}
if (!ws || ws.readyState !== WebSocket.OPEN) {
connectWebSocket();
}
const data = {
url: url,
description: description,
siteType: siteType,
crawlType: "normal"
};
ws.send(JSON.stringify(data));
// Clear input fields
document.getElementById("urlInput").value = "";
document.getElementById("descriptionInput").value = "";
document.getElementById("siteTypeInput").value = "";
}
// Initialize WebSocket connection on page load
connectWebSocket();
</script>
</body>
</html>
Loading…
Cancel
Save