|
|
@ -1,6 +1,7 @@ |
|
|
|
import requests |
|
|
|
import requests |
|
|
|
import os |
|
|
|
import os |
|
|
|
from openai import OpenAI |
|
|
|
from openai import OpenAI |
|
|
|
|
|
|
|
import asyncio |
|
|
|
import pyperclip |
|
|
|
import pyperclip |
|
|
|
import sys |
|
|
|
import sys |
|
|
|
import platform |
|
|
|
import platform |
|
|
@ -15,7 +16,7 @@ env_file = os.path.join(config_directory, ".env") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Standalone: |
|
|
|
class Standalone: |
|
|
|
def __init__(self, args, pattern="", env_file="~/.config/fabric/.env"): |
|
|
|
def __init__(self, args, pattern="", env_file="~/.config/fabric/.env", local=False): |
|
|
|
""" Initialize the class with the provided arguments and environment file. |
|
|
|
""" Initialize the class with the provided arguments and environment file. |
|
|
|
|
|
|
|
|
|
|
|
Args: |
|
|
|
Args: |
|
|
@ -44,10 +45,24 @@ class Standalone: |
|
|
|
except FileNotFoundError: |
|
|
|
except FileNotFoundError: |
|
|
|
print("No API key found. Use the --apikey option to set the key") |
|
|
|
print("No API key found. Use the --apikey option to set the key") |
|
|
|
sys.exit() |
|
|
|
sys.exit() |
|
|
|
|
|
|
|
self.local = local |
|
|
|
self.config_pattern_directory = config_directory |
|
|
|
self.config_pattern_directory = config_directory |
|
|
|
self.pattern = pattern |
|
|
|
self.pattern = pattern |
|
|
|
self.args = args |
|
|
|
self.args = args |
|
|
|
self.model = args.model |
|
|
|
self.model = args.model |
|
|
|
|
|
|
|
if self.local: |
|
|
|
|
|
|
|
if self.args.model == 'gpt-4-turbo-preview': |
|
|
|
|
|
|
|
self.args.model = 'llama2' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def localChat(self, messages): |
|
|
|
|
|
|
|
from ollama import AsyncClient |
|
|
|
|
|
|
|
response = await AsyncClient().chat(model=self.args.model, messages=messages) |
|
|
|
|
|
|
|
print(response['message']['content']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def localStream(self, messages): |
|
|
|
|
|
|
|
from ollama import AsyncClient |
|
|
|
|
|
|
|
async for part in await AsyncClient().chat(model=self.args.model, messages=messages, stream=True): |
|
|
|
|
|
|
|
print(part['message']['content'], end='', flush=True) |
|
|
|
|
|
|
|
|
|
|
|
def streamMessage(self, input_data: str, context=""): |
|
|
|
def streamMessage(self, input_data: str, context=""): |
|
|
|
""" Stream a message and handle exceptions. |
|
|
|
""" Stream a message and handle exceptions. |
|
|
@ -87,6 +102,9 @@ class Standalone: |
|
|
|
else: |
|
|
|
else: |
|
|
|
messages = [user_message] |
|
|
|
messages = [user_message] |
|
|
|
try: |
|
|
|
try: |
|
|
|
|
|
|
|
if self.local: |
|
|
|
|
|
|
|
asyncio.run(self.localStream(messages)) |
|
|
|
|
|
|
|
else: |
|
|
|
stream = self.client.chat.completions.create( |
|
|
|
stream = self.client.chat.completions.create( |
|
|
|
model=self.model, |
|
|
|
model=self.model, |
|
|
|
messages=messages, |
|
|
|
messages=messages, |
|
|
@ -153,6 +171,9 @@ class Standalone: |
|
|
|
else: |
|
|
|
else: |
|
|
|
messages = [user_message] |
|
|
|
messages = [user_message] |
|
|
|
try: |
|
|
|
try: |
|
|
|
|
|
|
|
if self.local: |
|
|
|
|
|
|
|
asyncio.run(self.localChat(messages)) |
|
|
|
|
|
|
|
else: |
|
|
|
response = self.client.chat.completions.create( |
|
|
|
response = self.client.chat.completions.create( |
|
|
|
model=self.model, |
|
|
|
model=self.model, |
|
|
|
messages=messages, |
|
|
|
messages=messages, |
|
|
|