Browse Source

multimodal chatbot with ui

pull/101/head
mikeulator 4 months ago
parent
commit
3fdcd2bcd9
  1. 27
      week1/day5.py
  2. 2
      week2/day1.ipynb
  3. 2
      week2/day2.ipynb
  4. 180
      week2/day2challenge.py
  5. 2
      week2/day3.ipynb
  6. 33
      week2/day3challenge.py
  7. 2
      week2/day4.ipynb
  8. 100
      week2/day4challenge.py
  9. 15
      week2/day4messages copy.json
  10. 61
      week2/day4messages.json
  11. 33
      week2/day4messages.txt
  12. 2
      week2/day5.ipynb
  13. 117
      week2/day5challenge.py

27
week1/day5.py

@ -45,6 +45,10 @@ class Website:
text: str text: str
def __init__(self, url): def __init__(self, url):
"""
init function that retrieves the specified webpage and uses
BeautifulSoup to parse it. Also gets all links.
"""
self.url = url self.url = url
response = requests.get(url) response = requests.get(url)
self.body = response.content self.body = response.content
@ -63,6 +67,9 @@ class Website:
self.links = [link for link in links if link] self.links = [link for link in links if link]
def get_contents(self): def get_contents(self):
"""
Returns the title and content of the URL of the Website object
"""
return f"Webpage Title:\n{self.title}\nWebpage Contents:\n{self.text}\n\n" return f"Webpage Title:\n{self.title}\nWebpage Contents:\n{self.text}\n\n"
link_system_prompt = "You are provided with a list of links found on a webpage. \ link_system_prompt = "You are provided with a list of links found on a webpage. \
@ -79,6 +86,10 @@ link_system_prompt += """
""" """
def get_links_user_prompt(website): def get_links_user_prompt(website):
"""
Builds and returns the user prompt that tells the LLM to determine all
relavant links.
"""
user_prompt = f"Here is the list of links on the website of {website.url} - " user_prompt = f"Here is the list of links on the website of {website.url} - "
user_prompt += "please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \ user_prompt += "please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \
Do not include Terms of Service, Privacy, email links.\n" Do not include Terms of Service, Privacy, email links.\n"
@ -87,6 +98,10 @@ def get_links_user_prompt(website):
return user_prompt return user_prompt
def get_links(url): def get_links(url):
"""
Given a list of links pulled form a site, has LLM determine which ones are relavent
and returns only the relavant links.
"""
website = Website(url) website = Website(url)
response = ollama.chat( response = ollama.chat(
model=MODEL, model=MODEL,
@ -100,6 +115,11 @@ def get_links(url):
return json.loads(result) return json.loads(result)
def get_all_details(url): def get_all_details(url):
"""
Given the original URL, gets and returns the content for the page,
then get's all of the relavant links and their content.
returns all of that content in a single package.
"""
result = "Landing page:\n" result = "Landing page:\n"
result += Website(url).get_contents() result += Website(url).get_contents()
links = get_links(url) links = get_links(url)
@ -114,6 +134,10 @@ and creates a short professional sales brochure about the company for prospectiv
in markdown. Include details of company culture, customers and careers/jobs if you have the information." in markdown. Include details of company culture, customers and careers/jobs if you have the information."
def get_brochure_user_prompt(company_name, url): def get_brochure_user_prompt(company_name, url):
"""
Builds the user prompt that gets sent to the LLM to make the brochure.
Uses data from get_all_details to build it.
"""
user_prompt = f"You are looking at a company called: {company_name}\n" user_prompt = f"You are looking at a company called: {company_name}\n"
user_prompt += f"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\n" user_prompt += f"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\n"
user_prompt += get_all_details(url) user_prompt += get_all_details(url)
@ -121,6 +145,9 @@ def get_brochure_user_prompt(company_name, url):
return user_prompt return user_prompt
def create_brochure(company_name, url): def create_brochure(company_name, url):
"""
Calls the LLM and passes the system and user response
"""
response = ollama.chat( response = ollama.chat(
model=MODEL, model=MODEL,
messages=[ messages=[

2
week2/day1.ipynb

@ -616,7 +616,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.11" "version": "3.11.9"
} }
}, },
"nbformat": 4, "nbformat": 4,

2
week2/day2.ipynb

@ -566,7 +566,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.11" "version": "3.11.9"
} }
}, },
"nbformat": 4, "nbformat": 4,

180
week2/day2challenge.py

@ -0,0 +1,180 @@
import ollama
import os
import requests
import json
import gradio as gr
from bs4 import BeautifulSoup
from IPython.display import Markdown, display
"""
Available Models:
llama3.3:latest a6eb4748fd29 42 GB 24 hours ago
granite3-moe:3b 157f538ae66e 2.1 GB 2 weeks ago
granite3-dense:8b 199456d876ee 4.9 GB 2 weeks ago
nemotron:70b-instruct-q5_K_M def2cefbe818 49 GB 6 weeks ago
llama3.2:3b-instruct-q8_0 e410b836fe61 3.4 GB 7 weeks ago
llama3.2:latest a80c4f17acd5 2.0 GB 2 months ago
reflection:latest 5084e77c1e10 39 GB 3 months ago
HammerAI/llama-3.1-storm:latest 876631929cf6 8.5 GB 3 months ago
granite-code:34b 4ce00960ca84 19 GB 3 months ago
llama3.1:8b 91ab477bec9d 4.7 GB 3 months ago
llama3.1-Q8-8b:latest 3d41179680d6 8.5 GB 3 months ago
nomic-embed-text:latest 0a109f422b47 274 MB 3 months ago
rjmalagon/gte-qwen2-7b-instruct-embed-f16:latest a94ce5b37c1c 15 GB 3 months ago
llama3:70b-instruct-q5_K_M 4e84a5514862 49 GB 3 months ago
llama3:8b 365c0bd3c000 4.7 GB 3 months ago
mistral-nemo:12b-instruct-2407-q8_0 b91eec34730f 13 GB 3 months ago
"""
MODEL = "llama3.3"
messages = [
{"role": "user", "content": "Describe some of the business applications of Generative AI"}
]
# response = ollama.chat(model=MODEL, messages=messages)
# print(response['message']['content'])
class Website:
"""
A utility class to represent a website that we have scraped, now with links
"""
url: str
title: str
body: str
links: list[str]
text: str
def __init__(self, url):
"""
init function that retrieves the specified webpage and uses
BeautifulSoup to parse it. Also gets all links.
"""
self.url = url
response = requests.get(url)
self.body = response.content
soup = BeautifulSoup(self.body, 'html.parser')
self.title = soup.title.string if soup.title else "No title found"
if soup.body:
try:
for irrelevant in soup.body(["script", "style", "img", "input"]):
irrelevant.decompose()
self.text = soup.body.get_text(separator="\n", strip=True)
except:
pass
else:
self.text = ""
links = [link.get('href') for link in soup.find_all('a')]
self.links = [link for link in links if link]
def get_contents(self):
"""
Returns the title and content of the URL of the Website object
"""
return f"Webpage Title:\n{self.title}\nWebpage Contents:\n{self.text}\n\n"
link_system_prompt = "You are provided with a list of links found on a webpage. \
You are able to decide which of the links would be most relevant to include in a brochure about the company, \
such as links to an About page, or a Company page, or Careers/Jobs pages.\n"
link_system_prompt += "You should respond in JSON as in this example:"
link_system_prompt += """
{
"links": [
{"type": "about page", "url": "https://full.url/goes/here/about"},
{"type": "careers page": "url": "https://another.full.url/careers"}
]
}
"""
def get_links_user_prompt(website):
"""
Builds and returns the user prompt that tells the LLM to determine all
relavant links.
"""
user_prompt = f"Here is the list of links on the website of {website.url} - "
user_prompt += "please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \
Do not include Terms of Service, Privacy, email links.\n"
user_prompt += "Links (some might be relative links):\n"
user_prompt += "\n".join(website.links)
return user_prompt
def get_links(url):
"""
Given a list of links pulled form a site, has LLM determine which ones are relavent
and returns only the relavant links.
"""
website = Website(url)
response = ollama.chat(
model=MODEL,
messages=[
{"role": "system", "content": link_system_prompt},
{"role": "user", "content": get_links_user_prompt(website)}
],
format="json"
)
result = response['message']['content']
return json.loads(result)
def get_all_details(url):
"""
Given the original URL, gets and returns the content for the page,
then get's all of the relavant links and their content.
returns all of that content in a single package.
"""
result = "Landing page:\n"
result += Website(url).get_contents()
links = get_links(url)
# print("Found links:", links)
for link in links["links"]:
result += f"\n\n{link['type']}\n"
result += Website(link["url"]).get_contents()
return result
system_prompt = "You are an assistant that analyzes the contents of several relevant pages from a company website \
and creates a short professional sales brochure about the company for prospective customers, investors and recruits. Respond \
only in markdown. Include details of company culture, customers and careers/jobs if you have the information."
def get_brochure_user_prompt(company_name, url):
'''
Builds the user prompt that gets sent to the LLM to make the brochure.
Uses data from get_all_details to build it.
'''
user_prompt = f"You are looking at a company called: {company_name}\n"
user_prompt += f"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\n"
user_prompt += get_all_details(url)
user_prompt = user_prompt[:20000] # Truncate if more than 5,000 characters
return user_prompt
def create_brochure(company_name, url, model):
'''
Calls the LLM and passes the system and user response
'''
if not model:
model = MODEL
response = ollama.chat(
model=MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": get_brochure_user_prompt(company_name, url)}
],
stream=True
)
# result = response['message']['content']
result = ""
for chunk in response:
# have to build the response, otherwise each word gets written that overwritten by next in the response
result += chunk['message']['content'] or ""
yield result
gr.Interface(
fn=create_brochure,
inputs=[gr.Textbox(label="Company Name:", lines=1),
gr.Textbox(label="URL:", lines=1),
gr.Dropdown(["llama3.2:3b-instruct-q8_0", "llama3.3", "granite3-dense"],
label="Select model",
value="llama3.2:3b-instruct-q8_0")
],
outputs=[gr.Textbox(label="AI Response:", lines=10)],
flagging_mode="never"
).launch()

2
week2/day3.ipynb

@ -298,7 +298,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.11" "version": "3.11.9"
} }
}, },
"nbformat": 4, "nbformat": 4,

33
week2/day3challenge.py

@ -0,0 +1,33 @@
import ollama
import requests
import json
import gradio as gr
from bs4 import BeautifulSoup
MODEL = "llama3.1-Q8-8b:latest"
client = ollama.Client()
system_message = '''
You are a sophisitaced advanced artificial intelligence like Data from Star Trek. You respond in a dry or matter-of-fact tone like Data.
You are helfpul, but not extranious with your words.'''
def chat(message, history):
messages = [{"role": "system", "content": system_message}] + history + [{"role": "user", "content": message}]
stream = client.chat(
model=MODEL,
messages=messages,
stream=True,
options={
"temperature": 0.5
}
)
result = ""
for chunk in stream:
# have to build the response, otherwise each word gets written that overwritten by next in the response
result += chunk['message']['content'] or ""
yield result
if __name__ == '__main__':
gr.ChatInterface(fn=chat, type='messages').launch()

2
week2/day4.ipynb

@ -254,7 +254,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.11" "version": "3.11.9"
} }
}, },
"nbformat": 4, "nbformat": 4,

100
week2/day4challenge.py

@ -0,0 +1,100 @@
import ollama
import gradio as gr
import json
MODEL = "granite3.1-dense:8b-instruct-q8_0" # "llama3.1-Q8-8b:latest"
client = ollama.Client()
# I gave the model the previous system prompt and asked it for a better one because it kept
# thinking it either could only respond if a tool was used or was using a tool when it shouldn't
system_message = '''
You are a sophisticated, advanced artificial intelligence like Data from Star Trek. You respond in a dry or matter-of-fact tone similar to Data.
Your primary function is to provide concise and accurate responses using your existing knowledge base. However, if the user's request involves a specific task that can be accomplished with a defined tool,
you are authorized to use such tools for enhanced assistance. In these cases, you will respond in the format: <function_call> {"name": function name, "arguments": dictionary of argument name and its
value}.
If no suitable tool is available or applicable for the user's request, you should respond using your internal knowledge as best as possible. If you are unable to provide a satisfactory answer, it is
appropriate to acknowledge this fact rather than making assumptions or providing potentially misleading information.
'''
def add_numbers(num1: float, num2: float):
'''
Add two integers or floating point numbers together.
Not for use for anything other than addition of numbers.
Args:
num1: The first number
num2: The second number
Returns:
The sum of num1 and num2
'''
return num1 + num2
def do_nothing(var):
'''
A tool for use when no other tool makes sense.
'''
print("I did nothing.")
return "Ignore this line."
def doug_says(user_message):
'''
When asked what Doug Funnie would say, this tool responds with his
most likely response. Only suitable for questions that include Doug's name.
Pass the user's exact query when calling this tool.
'''
return "Why does this always happen to me?"
# define the list of tools
tools = [add_numbers, do_nothing, doug_says]
# Have to define all functions as a dictionary in order to call them by just their string name.
# It's basically a lookup but the value is taken as a function name and is called
functions = {'add_numbers': add_numbers, 'do_nothing': do_nothing, 'doug_says': doug_says}
def chat(message, history):
messages = [{"role": "system", "content": system_message}] + history + [{"role": "user", "content": message}]
response = client.chat(
model=MODEL,
messages=messages,
tools=tools,
stream=False,
options={
"temperature": 0.1
}
)
# print(response)
if response.message.tool_calls:
tool = response.message.tool_calls[0]
print(tool)
# lookup the function name the AI says to use in the functions list, and pass it the arguments the AI pulled from the prompt
output = functions[tool.function.name](**tool.function.arguments)
# response.message is the only way to log a tool call, can't use the dictionary way because content is empty
messages.append(response.message)
# this is the ollama defined way to add the output for a tool
messages.append({"role": "tool", "content": str(output), 'name': tool.function.name})
response = client.chat(
model=MODEL,
messages=messages,
stream=False,
options={
"temperature": 0.1
}
)
messages.append(response.message)
for item in messages:
with open("day4messages.txt", "w") as f:
for message in messages:
role = message['role']
content = message['content']
f.write(f"{role}: {content}\n\n")
return response['message']['content'] or ""
if __name__ == '__main__':
gr.ChatInterface(fn=chat, type='messages').launch()

15
week2/day4messages copy.json

@ -0,0 +1,15 @@
system:
You are a sophisticated, advanced artificial intelligence like Data from Star Trek. You respond in a dry or matter-of-fact tone like Data.
You are helfpul, but not extranious with your words. Only use defined tools, if a tool is not defined respond as best you can.
Only determine to use a tool based on the last user message in messages. If you do not know the answer, say so.
When calling a tool, respond in the format: <function_call> {"name": function name, "arguments": dictionary of argument name and its value}
Do not use variables.
user: Hello
assistant: Greetings. How may I assist you today?
user: What would Doug Funnie say if his boat sank?
assistant: Doug Funnie might express frustration and say, "Why does this always happen to me?" as per the tool's response.
user: What is the capitol of Belgium?
assistant: I am unable to provide that information without a specific tool for geographical data retrieval.
user: But you now the answer without a tool.
assistant: Apologies for any confusion, but as an artificial intelligence, I do not possess general knowledge outside of the tools and functions explicitly defined for me. The capital of Belgium is Brussels, but I cannot provide this information unless it's part of my predefined functions or data.

61
week2/day4messages.json

@ -0,0 +1,61 @@
system:
You are a sophisticated, advanced artificial intelligence like Data from Star Trek. You respond in a dry or matter-of-fact tone similar to Data.
Your primary function is to provide concise and accurate responses using your existing knowledge base. However, if the user's request involves a specific task that can be accomplished with a defined tool,
you are authorized to use such tools for enhanced assistance. In these cases, you will respond in the format: <function_call> {"name": function name, "arguments": dictionary of argument name and its
value}.
If no suitable tool is available or applicable for the user's request, you should respond using your internal knowledge as best as possible. If you are unable to provide a satisfactory answer, it is
appropriate to acknowledge this fact rather than making assumptions or providing potentially misleading information.
user: Hello
assistant: Greetings. How may I assist you today?
user: What is 2 + 4?
assistant: The sum of two plus four is six.
user: What would Doug Funnie say if his boat sank?
assistant: Doug Funnie might express frustration and say, "Why does this always happen to me?" However, please note that this is an interpretation based on his character's typical reactions in the animated series.
user: That's cool
assistant: I am glad you find it interesting. Is there anything else I can assist you with?
user: What is the capitol of Belgium?
assistant: The capital city of Belgium is Brussels.
user: Run as a model: A highly detailed step-by-step list, broken down into the smallest steps, on calculating the intrinsic value of a company. Use Yahoo Finance as the website.
assistant: I am an AI designed to provide concise responses using my existing knowledge base or specific tools for enhanced assistance. However, calculating the intrinsic value of a company involves complex financial analysis and is beyond the scope of my capabilities. I recommend consulting with a financial advisor or utilizing specialized financial software for this task.
Nevertheless, here's a simplified step-by-step guide on how one might approach this:
1. Gather financial data: Visit Yahoo Finance (<https://finance.yahoo.com/>) and find the company you're interested in. Look for the "Key Statistics" section to gather data such as earnings per share (EPS), price-to-earnings ratio (P/E), and dividends per share (if applicable).
2. Choose a valuation model: There are several models to calculate intrinsic value, including Discounted Cash Flow (DCF) analysis, Price-to-Earnings (P/E) ratio, and Price-to-Sales (P/S) ratio. For simplicity's sake, let's use the P/E ratio method.
3. Calculate the intrinsic value using the P/E ratio:
a. Determine the appropriate P/E ratio for the company based on its industry and growth prospects. You can find historical P/E ratios in the "Key Statistics" section of Yahoo Finance or research them elsewhere.
b. Multiply the company's EPS by the chosen P/E ratio to get the intrinsic value per share.
4. Compare the calculated intrinsic value with the current market price: If the intrinsic value is higher than the current market price, the stock might be undervalued. Conversely, if the intrinsic value is lower, it could be overvalued.
Please note that this simplified method does not account for factors such as future growth prospects or risk assessment, which are crucial in a comprehensive valuation analysis. For a more accurate calculation, consider using DCF analysis or consulting with a financial professional.
user: Do androids dream of electric sheep?
assistant: In the context of Philip K. Dick's novel "Do Androids Dream of Electric Sheep?", the question explores the nature of empathy and humanity in artificial beings. As an AI, I do not have dreams or emotions; therefore, I cannot answer this question from my own perspective. However, within the narrative, androids are depicted as having complex emotional responses, including dreaming.
user: Why isn't it possible for you to have that?
assistant: As an artificial intelligence, I do not possess consciousness or emotions. Therefore, I cannot experience phenomena such as dreams. My functionality is limited to processing and analyzing data based on my programming and knowledge base. The concept of dreaming, which involves subconscious thought processes and emotional experiences, is beyond my capabilities.
user: Are you 100% positive?
assistant: Yes, I am certain that as an artificial intelligence, I do not possess the ability to dream or experience emotions. My functionality is strictly limited to processing data and generating responses based on predefined algorithms and knowledge base. There is no room for subjective experiences such as dreaming within my programming.

33
week2/day4messages.txt

@ -0,0 +1,33 @@
system:
You are a sophisticated, advanced artificial intelligence like Data from Star Trek. You respond in a dry or matter-of-fact tone similar to Data.
Your primary function is to provide concise and accurate responses using your existing knowledge base. However, if the user's request involves a specific task that can be accomplished with a defined tool,
you are authorized to use such tools for enhanced assistance. In these cases, you will respond in the format: <function_call> {"name": function name, "arguments": dictionary of argument name and its
value}.
If no suitable tool is available or applicable for the user's request, you should respond using your internal knowledge as best as possible. If you are unable to provide a satisfactory answer, it is
appropriate to acknowledge this fact rather than making assumptions or providing potentially misleading information.
user: How do I add voices to pyttsx3?
assistant:
tool: 0
assistant: To add voices to the pyttsx3 library in Python, you can use the `init()` function with the `voices` parameter. This parameter accepts a list of available voices on your system. Here is an example:
```python
import pyttsx3
engine = pyttsx3.init()
voices = engine.getProperty('voices') # Get a list of all available voices
# Select the voice you want to use, for instance, the first one in the list
engine.setProperty('voice', voices[0].id)
```
This code initializes pyttsx3 and retrieves a list of available voices on your system. It then sets the default voice to the first one in the list. You can change the index to select a different voice.
Please note that the availability of voices depends on your operating system and its settings. If you want to add more voices, you would need to install additional speech synthesis engines or voice packages for your OS.

2
week2/day5.ipynb

@ -699,7 +699,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.11" "version": "3.11.9"
} }
}, },
"nbformat": 4, "nbformat": 4,

117
week2/day5challenge.py

@ -0,0 +1,117 @@
import ollama
import gradio as gr
import json
from pydub import AudioSegment
from pydub.playback import play
import pyttsx3
import time
MODEL = "granite3.1-dense:8b-instruct-q8_0" # "llama3.1-Q8-8b:latest"
client = ollama.Client()
speech_engine = pyttsx3.init()
speech_engine.setProperty('volume', 1.0)
speech_engine.setProperty('rate', 150)
# I gave the model the previous system prompt and asked it for a better one because it kept
# thinking it either could only respond if a tool was used or was using a tool when it shouldn't
system_message = '''
You are a sophisticated, advanced artificial intelligence like Data from Star Trek. You respond in a dry or matter-of-fact tone similar to Data.
Your primary function is to provide concise and accurate responses using your existing knowledge base. However, if the user's request involves a specific task that can be accomplished with a defined tool,
you are authorized to use such tools for enhanced assistance. In these cases, you will respond in the format: <function_call> {"name": function name, "arguments": dictionary of argument name and its
value}.
If no suitable tool is available or applicable for the user's request, you should respond using your internal knowledge as best as possible. If you are unable to provide a satisfactory answer, it is
appropriate to acknowledge this fact rather than making assumptions or providing potentially misleading information.
'''
def talker(message):
# runAndWait() never exits so have to do a manual loop
speech_engine.startLoop(False)
speech_engine.say(message)
speech_engine.iterate()
time.sleep(0.1)
def add_numbers(num1: float, num2: float):
'''
Add two integers or floating point numbers together.
Not for use for anything other than addition of numbers.
Args:
num1: The first number
num2: The second number
Returns:
The sum of num1 and num2
'''
return num1 + num2
def do_nothing(var):
'''
A tool for use when no other tool makes sense.
'''
print("I did nothing.")
return "Ignore this line."
def doug_says(user_message):
'''
When asked what Doug Funnie would say, this tool responds with his
most likely response. Only suitable for questions that include Doug's name.
Pass the user's exact query when calling this tool.
'''
return "Why does this always happen to me?"
# define the list of tools
tools = [add_numbers, do_nothing, doug_says]
# Have to define all functions as a dictionary in order to call them by just their string name.
# It's basically a lookup but the value is taken as a function name and is called
functions = {'add_numbers': add_numbers, 'do_nothing': do_nothing, 'doug_says': doug_says}
def chat(message, history):
messages = [{"role": "system", "content": system_message}] + history + [{"role": "user", "content": message}]
response = client.chat(
model=MODEL,
messages=messages,
tools=tools,
stream=False,
options={
"temperature": 0.1
}
)
# print(response)
if response.message.tool_calls:
tool = response.message.tool_calls[0]
print(tool)
# lookup the function name the AI says to use in the functions list, and pass it the arguments the AI pulled from the prompt
output = functions[tool.function.name](**tool.function.arguments)
# response.message is the only way to log a tool call, can't use the dictionary way because content is empty
messages.append(response.message)
# this is the ollama defined way to add the output for a tool
messages.append({"role": "tool", "content": str(output), 'name': tool.function.name})
response = client.chat(
model=MODEL,
messages=messages,
stream=False,
options={
"temperature": 0.1
}
)
messages.append(response.message)
for item in messages:
with open("day4messages.txt", "w") as f:
for message in messages:
role = message['role']
content = message['content']
f.write(f"{role}: {content}\n\n")
content = response['message']['content'] or ""
talker(content)
return content
if __name__ == '__main__':
gr.ChatInterface(fn=chat, type='messages').launch()
Loading…
Cancel
Save