diff --git a/client/README.md b/client/README.md
new file mode 100644
index 0000000..1cc959c
--- /dev/null
+++ b/client/README.md
@@ -0,0 +1,81 @@
+# The `fabric` client
+
+This is the primary `fabric` client, which has multiple modes of operation.
+
+## Client modes
+
+You can use the client in three different modes:
+
+1. **Local Only:** You can use the client without a server, and it will use patterns it's downloaded from this repository, or ones that you specify.
+2. **Local Server:** You can run your own version of a Fabric Mill locally (on a private IP), which you can then connect to and use.
+3. **Remote Server:** You can specify a remote server that your client commands will then be calling.
+
+## Client features
+
+1. Standalone Mode: Run without needing a server.
+2. Clipboard Integration: Copy responses to the clipboard.
+3. File Output: Save responses to files for later reference.
+4. Pattern Module: Utilize specific patterns for different types of analysis.
+5. Server Mode: Operate the tool in server mode to control your own patterns and let your other apps access it.
+
+## Installation
+
+1. If you have this repository downloaded, you already have the client.
+ `git clone git@github.com:danielmiessler/fabric.git`
+2. Navigate to the client's directory:
+ `cd client`
+3. Install poetry (if you don't have it already)
+ `pip3 install poetry`
+4. Install the required packages:
+ `poetry install`
+5. Activate the virtual environment:
+ `poetry shell`
+6. Copy to path:
+ `echo export PATH=$PATH:$(pwd) >> ~/.bashrc` # or .zshrc
+7. Copy your OpenAI API key to the `.env` file in your `nvim ~/.config/fabric/` directory (or create that file and put it in)
+ `OPENAI_API_KEY=[Your_API_Key]`
+
+## Usage
+
+To use `fabric`, call it with your desired options (remember to activate the virtual environment with `poetry shell` - step 5 above):
+
+fabric [options]
+Options include:
+
+--pattern, -p: Select the module for analysis.
+--stream, -s: Stream output to another application.
+--output, -o: Save the response to a file.
+--copy, -c: Copy the response to the clipboard.
+
+Example:
+
+```bash
+# Pasting in an article about LLMs
+pbpaste | fabric --pattern extract_wisdom --output wisdom.txt | fabric --pattern summarize --stream
+```
+
+```markdown
+ONE SENTENCE SUMMARY:
+
+- The content covered the basics of LLMs and how they are used in everyday practice.
+
+MAIN POINTS:
+
+1. LLMs are large language models, and typically use the transformer architecture.
+2. LLMs used to be used for story generation, but they're now used for many AI applications.
+3. They are vulnerable to hallucination if not configured correctly, so be careful.
+
+TAKEAWAYS:
+
+1. It's possible to use LLMs for multiple AI use cases.
+2. It's important to validate that the results you're receiving are correct.
+3. The field of AI is moving faster than ever as a result of GenAI breakthroughs.
+```
+
+## Contributing
+
+We welcome contributions to Fabric, including improvements and feature additions to this client.
+
+## Credits
+
+The `fabric` client was created by Jonathan Dunn and Daniel Meissler.
diff --git a/client/fabric/__init__.py b/client/fabric/__init__.py
new file mode 100644
index 0000000..e45b128
--- /dev/null
+++ b/client/fabric/__init__.py
@@ -0,0 +1 @@
+from .fabric import main
diff --git a/client/fabric/fabric.py b/client/fabric/fabric.py
new file mode 100755
index 0000000..f6f5a23
--- /dev/null
+++ b/client/fabric/fabric.py
@@ -0,0 +1,89 @@
+from utils import Standalone, Update, Setup
+import argparse
+import sys
+import os
+
+
+script_directory = os.path.dirname(os.path.realpath(__file__))
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="An open source framework for augmenting humans using AI."
+ )
+ parser.add_argument("--text", "-t", help="Text to extract summary from")
+ parser.add_argument(
+ "--copy", "-c", help="Copy the response to the clipboard", action="store_true"
+ )
+ parser.add_argument(
+ "--output",
+ "-o",
+ help="Save the response to a file",
+ nargs="?",
+ const="analyzepaper.txt",
+ default=None,
+ )
+ parser.add_argument(
+ "--stream",
+ "-s",
+ help="Use this option if you want to see the results in realtime. NOTE: You will not be able to pipe the output into another command.",
+ action="store_true",
+ )
+ parser.add_argument(
+ "--list", "-l", help="List available patterns", action="store_true"
+ )
+ parser.add_argument("--update", "-u", help="Update patterns", action="store_true")
+ parser.add_argument("--pattern", "-p", help="The pattern (prompt) to use")
+ parser.add_argument(
+ "--setup", help="Set up your fabric instance", action="store_true"
+ )
+ parser.add_argument(
+ "--model", "-m", help="Select the model to use (GPT-4 by default)", default="gpt-4-turbo-preview"
+ )
+ parser.add_argument(
+ "--listmodels", help="List all available models", action="store_true"
+ )
+
+ args = parser.parse_args()
+ home_holder = os.path.expanduser("~")
+ config = os.path.join(home_holder, ".config", "fabric")
+ config_patterns_directory = os.path.join(config, "patterns")
+ env_file = os.path.join(config, ".env")
+ if not os.path.exists(config):
+ os.makedirs(config)
+ if args.setup:
+ Setup().run()
+ sys.exit()
+ if not os.path.exists(env_file) or not os.path.exists(config_patterns_directory):
+ print("Please run --setup to set up your API key and download patterns.")
+ sys.exit()
+ if not os.path.exists(config_patterns_directory):
+ Update()
+ sys.exit()
+ if args.update:
+ Update()
+ print("Your Patterns have been updated.")
+ sys.exit()
+ standalone = Standalone(args, args.pattern)
+ if args.list:
+ try:
+ direct = os.listdir(config_patterns_directory)
+ for d in direct:
+ print(d)
+ sys.exit()
+ except FileNotFoundError:
+ print("No patterns found")
+ sys.exit()
+ if args.listmodels:
+ standalone.fetch_available_models()
+ sys.exit()
+ if args.text is not None:
+ text = args.text
+ else:
+ text = standalone.get_cli_input()
+ if args.stream:
+ standalone.streamMessage(text)
+ else:
+ standalone.sendMessage(text)
+
+if __name__ == "__main__":
+ main()
diff --git a/client/fabric/p.py b/client/fabric/p.py
new file mode 100755
index 0000000..27daf10
--- /dev/null
+++ b/client/fabric/p.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python3
+
+import pyperclip
+
+pasted_text = pyperclip.paste()
+print(pasted_text)
diff --git a/client/fabric/utils.py b/client/fabric/utils.py
new file mode 100644
index 0000000..349b0ab
--- /dev/null
+++ b/client/fabric/utils.py
@@ -0,0 +1,403 @@
+import requests
+import os
+from openai import OpenAI
+import pyperclip
+import sys
+import platform
+from dotenv import load_dotenv
+from requests.exceptions import HTTPError
+from tqdm import tqdm
+
+current_directory = os.path.dirname(os.path.realpath(__file__))
+config_directory = os.path.expanduser("~/.config/fabric")
+env_file = os.path.join(config_directory, ".env")
+
+
+
+class Standalone:
+ def __init__(self, args, pattern="", env_file="~/.config/fabric/.env"):
+ """ Initialize the class with the provided arguments and environment file.
+
+ Args:
+ args: The arguments for initialization.
+ pattern: The pattern to be used (default is an empty string).
+ env_file: The path to the environment file (default is "~/.config/fabric/.env").
+
+ Returns:
+ None
+
+ Raises:
+ KeyError: If the "OPENAI_API_KEY" is not found in the environment variables.
+ FileNotFoundError: If no API key is found in the environment variables.
+ """
+
+ # Expand the tilde to the full path
+ env_file = os.path.expanduser(env_file)
+ load_dotenv(env_file)
+ try:
+ apikey = os.environ["OPENAI_API_KEY"]
+ self.client = OpenAI()
+ self.client.api_key = apikey
+ except KeyError:
+ print("OPENAI_API_KEY not found in environment variables.")
+
+ except FileNotFoundError:
+ print("No API key found. Use the --apikey option to set the key")
+ sys.exit()
+ self.config_pattern_directory = config_directory
+ self.pattern = pattern
+ self.args = args
+ self.model = args.model
+
+ def streamMessage(self, input_data: str):
+ """ Stream a message and handle exceptions.
+
+ Args:
+ input_data (str): The input data for the message.
+
+ Returns:
+ None: If the pattern is not found.
+
+ Raises:
+ FileNotFoundError: If the pattern file is not found.
+ """
+
+ wisdomFilePath = os.path.join(
+ config_directory, f"patterns/{self.pattern}/system.md"
+ )
+ user_message = {"role": "user", "content": f"{input_data}"}
+ wisdom_File = os.path.join(current_directory, wisdomFilePath)
+ buffer = ""
+ if self.pattern:
+ try:
+ with open(wisdom_File, "r") as f:
+ system = f.read()
+ system_message = {"role": "system", "content": system}
+ messages = [system_message, user_message]
+ except FileNotFoundError:
+ print("pattern not found")
+ return
+ else:
+ messages = [user_message]
+ try:
+ stream = self.client.chat.completions.create(
+ model=self.model,
+ messages=messages,
+ temperature=0.0,
+ top_p=1,
+ frequency_penalty=0.1,
+ presence_penalty=0.1,
+ stream=True,
+ )
+ for chunk in stream:
+ if chunk.choices[0].delta.content is not None:
+ char = chunk.choices[0].delta.content
+ buffer += char
+ if char not in ["\n", " "]:
+ print(char, end="")
+ elif char == " ":
+ print(" ", end="") # Explicitly handle spaces
+ elif char == "\n":
+ print() # Handle newlines
+ sys.stdout.flush()
+ except Exception as e:
+ print(f"Error: {e}")
+ print(e)
+ if self.args.copy:
+ pyperclip.copy(buffer)
+ if self.args.output:
+ with open(self.args.output, "w") as f:
+ f.write(buffer)
+
+ def sendMessage(self, input_data: str):
+ """ Send a message using the input data and generate a response.
+
+ Args:
+ input_data (str): The input data to be sent as a message.
+
+ Returns:
+ None
+
+ Raises:
+ FileNotFoundError: If the specified pattern file is not found.
+ """
+
+ wisdomFilePath = os.path.join(
+ config_directory, f"patterns/{self.pattern}/system.md"
+ )
+ user_message = {"role": "user", "content": f"{input_data}"}
+ wisdom_File = os.path.join(current_directory, wisdomFilePath)
+ if self.pattern:
+ try:
+ with open(wisdom_File, "r") as f:
+ system = f.read()
+ system_message = {"role": "system", "content": system}
+ messages = [system_message, user_message]
+ except FileNotFoundError:
+ print("pattern not found")
+ return
+ else:
+ messages = [user_message]
+ try:
+ response = self.client.chat.completions.create(
+ model=self.model,
+ messages=messages,
+ temperature=0.0,
+ top_p=1,
+ frequency_penalty=0.1,
+ presence_penalty=0.1,
+ )
+ print(response.choices[0].message.content)
+ except Exception as e:
+ print(f"Error: {e}")
+ print(e)
+ if self.args.copy:
+ pyperclip.copy(response.choices[0].message.content)
+ if self.args.output:
+ with open(self.args.output, "w") as f:
+ f.write(response.choices[0].message.content)
+
+ def fetch_available_models(self):
+ headers = {
+ "Authorization": f"Bearer { self.client.api_key }"
+ }
+
+ response = requests.get("https://api.openai.com/v1/models", headers=headers)
+
+ if response.status_code == 200:
+ models = response.json().get("data", [])
+ # Filter only gpt models
+ gpt_models = [model for model in models if model.get("id", "").startswith(("gpt"))]
+ # Sort the models alphabetically by their ID
+ sorted_gpt_models = sorted(gpt_models, key=lambda x: x.get("id"))
+
+ for model in sorted_gpt_models:
+ print(model.get("id"))
+ else:
+ print(f"Failed to fetch models: HTTP {response.status_code}")
+
+ def get_cli_input(self):
+ """ aided by ChatGPT; uses platform library
+ accepts either piped input or console input
+ from either Windows or Linux
+
+ Args:
+ none
+ Returns:
+ string from either user or pipe
+ """
+ system = platform.system()
+ if system == 'Windows':
+ if not sys.stdin.isatty(): # Check if input is being piped
+ return sys.stdin.readline().strip() # Read piped input
+ else:
+ return input("Enter Question: ") # Prompt user for input from console
+ else:
+ return sys.stdin.read()
+
+
+class Update:
+ def __init__(self):
+ """ Initialize the object with default values and update patterns.
+
+ This method initializes the object with default values for root_api_url, config_directory, and pattern_directory.
+ It then creates the pattern_directory if it does not exist and calls the update_patterns method to update the patterns.
+
+ Raises:
+ OSError: If there is an issue creating the pattern_directory.
+ """
+
+ self.root_api_url = "https://api.github.com/repos/danielmiessler/fabric/contents/patterns?ref=main"
+ self.config_directory = os.path.expanduser("~/.config/fabric")
+ self.pattern_directory = os.path.join(self.config_directory, "patterns")
+ os.makedirs(self.pattern_directory, exist_ok=True)
+ self.update_patterns() # Call the update process from a method.
+
+ def update_patterns(self):
+ """ Update the patterns by downloading from the GitHub directory.
+
+ Raises:
+ HTTPError: If there is an HTTP error while downloading patterns.
+ """
+
+ try:
+ self.progress_bar = tqdm(desc="Downloading Patterns…", unit="file")
+ self.get_github_directory_contents(
+ self.root_api_url, self.pattern_directory
+ )
+ # Close progress bar on success before printing the message.
+ self.progress_bar.close()
+ except HTTPError as e:
+ # Ensure progress bar is closed on HTTPError as well.
+ self.progress_bar.close()
+ if e.response.status_code == 403:
+ print(
+ "GitHub API rate limit exceeded. Please wait before trying again."
+ )
+ sys.exit()
+ else:
+ print(f"Failed to download patterns due to an HTTP error: {e}")
+ sys.exit() # Exit after handling the error.
+
+ def download_file(self, url, local_path):
+ """ Download a file from the given URL and save it to the local path.
+
+ Args:
+ url (str): The URL of the file to be downloaded.
+ local_path (str): The local path where the file will be saved.
+
+ Raises:
+ HTTPError: If an HTTP error occurs during the download process.
+ """
+
+ try:
+ response = requests.get(url)
+ response.raise_for_status()
+ with open(local_path, "wb") as f:
+ f.write(response.content)
+ self.progress_bar.update(1)
+ except HTTPError as e:
+ print(f"Failed to download file {url}. HTTP error: {e}")
+ sys.exit()
+
+ def process_item(self, item, local_dir):
+ """ Process the given item and save it to the local directory.
+
+ Args:
+ item (dict): The item to be processed, containing information about the type, download URL, name, and URL.
+ local_dir (str): The local directory where the item will be saved.
+
+ Returns:
+ None
+
+ Raises:
+ OSError: If there is an issue creating the new directory using os.makedirs.
+ """
+
+ if item["type"] == "file":
+ self.download_file(
+ item["download_url"], os.path.join(local_dir, item["name"])
+ )
+ elif item["type"] == "dir":
+ new_dir = os.path.join(local_dir, item["name"])
+ os.makedirs(new_dir, exist_ok=True)
+ self.get_github_directory_contents(item["url"], new_dir)
+
+ def get_github_directory_contents(self, api_url, local_dir):
+ """ Get the contents of a directory from GitHub API and process each item.
+
+ Args:
+ api_url (str): The URL of the GitHub API endpoint for the directory.
+ local_dir (str): The local directory where the contents will be processed.
+
+ Returns:
+ None
+
+ Raises:
+ HTTPError: If an HTTP error occurs while fetching the directory contents.
+ If the status code is 403, it prints a message about GitHub API rate limit exceeded
+ and closes the progress bar. For any other status code, it prints a message
+ about failing to fetch directory contents due to an HTTP error.
+ """
+
+ try:
+ response = requests.get(api_url)
+ response.raise_for_status()
+ jsonList = response.json()
+ for item in jsonList:
+ self.process_item(item, local_dir)
+ except HTTPError as e:
+ if e.response.status_code == 403:
+ print(
+ "GitHub API rate limit exceeded. Please wait before trying again."
+ )
+ self.progress_bar.close() # Ensure the progress bar is cleaned up properly
+ else:
+ print(f"Failed to fetch directory contents due to an HTTP error: {e}")
+
+class Setup:
+ def __init__(self):
+ """ Initialize the object.
+
+ Raises:
+ OSError: If there is an error in creating the pattern directory.
+ """
+
+ self.config_directory = os.path.expanduser("~/.config/fabric")
+ self.pattern_directory = os.path.join(self.config_directory, "patterns")
+ os.makedirs(self.pattern_directory, exist_ok=True)
+ self.env_file = os.path.join(self.config_directory, ".env")
+
+ def api_key(self, api_key):
+ """ Set the OpenAI API key in the environment file.
+
+ Args:
+ api_key (str): The API key to be set.
+
+ Returns:
+ None
+
+ Raises:
+ OSError: If the environment file does not exist or cannot be accessed.
+ """
+
+ if not os.path.exists(self.env_file):
+ with open(self.env_file, "w") as f:
+ f.write(f"OPENAI_API_KEY={api_key}")
+ print(f"OpenAI API key set to {api_key}")
+
+ def patterns(self):
+ """ Method to update patterns and exit the system.
+
+ Returns:
+ None
+ """
+
+ Update()
+ sys.exit()
+
+ def run(self):
+ """ Execute the Fabric program.
+
+ This method prompts the user for their OpenAI API key, sets the API key in the Fabric object, and then calls the patterns method.
+
+ Returns:
+ None
+ """
+
+ print("Welcome to Fabric. Let's get started.")
+ apikey = input("Please enter your OpenAI API key\n")
+ self.api_key(apikey.strip())
+ self.patterns()
+
+
+class Transcribe:
+ def youtube(video_id):
+ """
+ This method gets the transciption
+ of a YouTube video designated with the video_id
+
+ Input:
+ the video id specifing a YouTube video
+ an example url for a video: https://www.youtube.com/watch?v=vF-MQmVxnCs&t=306s
+ the video id is vF-MQmVxnCs&t=306s
+
+ Output:
+ a transcript for the video
+
+ Raises:
+ an exception and prints error
+
+
+ """
+ try:
+ transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
+ transcript = ""
+ for segment in transcript_list:
+ transcript += segment['text'] + " "
+ return transcript.strip()
+ except Exception as e:
+ print("Error:", e)
+ return None
+
+
diff --git a/client/gui/.gitignore b/client/gui/.gitignore
new file mode 100644
index 0000000..d93463d
--- /dev/null
+++ b/client/gui/.gitignore
@@ -0,0 +1,3 @@
+node_modules/
+dist/
+build/
diff --git a/client/gui/README.md b/client/gui/README.md
new file mode 100644
index 0000000..4bd2346
--- /dev/null
+++ b/client/gui/README.md
@@ -0,0 +1,21 @@
+Fabric is not just a tool; it's a transformative step towards integrating the power of GPT prompts into your digital life. With Fabric, you have the ability to create a personal API that brings advanced GPT capabilities into various aspects of your digital environment. Whether you're looking to incorporate powerful GPT prompts into command line operations or extend their functionality to a wider network through a personal API, Fabric is designed to seamlessly blend with your digital ecosystem. This tool is all about augmenting your digital interactions, enhancing productivity, and enabling a more intelligent, GPT-powered experience in every aspect of your online presence.
+
+## Features
+
+1. Text Analysis: Easily extract summaries from texts.
+2. Clipboard Integration: Conveniently copy responses to the clipboard.
+3. File Output: Save responses to files for later reference.
+4. Pattern Module: Utilize specific modules for different types of analysis.
+5. Server Mode: Operate the tool in server mode for expanded capabilities.
+6. Remote & Standalone Modes: Choose between remote and standalone operations.
+
+## Installation
+
+1. Install dependencies:
+ `npm install`
+2. Start the application:
+ `npm start`
+
+Contributing
+
+We welcome contributions to Fabric! For details on our code of conduct and the process for submitting pull requests, please read the CONTRIBUTING.md.
diff --git a/client/gui/chatgpt.js b/client/gui/chatgpt.js
new file mode 100644
index 0000000..1fe7c7f
--- /dev/null
+++ b/client/gui/chatgpt.js
@@ -0,0 +1,45 @@
+const { OpenAI } = require("openai");
+require("dotenv").config({
+ path: require("os").homedir() + "/.config/fabric/.env",
+});
+
+let openaiClient = null;
+
+// Function to initialize and get the OpenAI client
+function getOpenAIClient() {
+ if (!process.env.OPENAI_API_KEY) {
+ throw new Error(
+ "The OPENAI_API_KEY environment variable is missing or empty."
+ );
+ }
+ return new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
+}
+
+async function queryOpenAI(system, user, callback) {
+ const openai = getOpenAIClient(); // Ensure the client is initialized here
+ const messages = [
+ { role: "system", content: system },
+ { role: "user", content: user },
+ ];
+ try {
+ const stream = await openai.chat.completions.create({
+ model: "gpt-4-1106-preview", // Adjust the model as necessary.
+ messages: messages,
+ temperature: 0.0,
+ top_p: 1,
+ frequency_penalty: 0.1,
+ presence_penalty: 0.1,
+ stream: true,
+ });
+
+ for await (const chunk of stream) {
+ const message = chunk.choices[0]?.delta?.content || "";
+ callback(message); // Process each chunk of data
+ }
+ } catch (error) {
+ console.error("Error querying OpenAI:", error);
+ callback("Error querying OpenAI. Please try again.");
+ }
+}
+
+module.exports = { queryOpenAI };
diff --git a/client/gui/index.html b/client/gui/index.html
new file mode 100644
index 0000000..2a18b30
--- /dev/null
+++ b/client/gui/index.html
@@ -0,0 +1,70 @@
+
+
+