From the uDemy course on LLM engineering.
https://www.udemy.com/course/llm-engineering-master-ai-and-large-language-models
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
28 lines
904 B
28 lines
904 B
import requests |
|
import json |
|
import ollama |
|
|
|
|
|
def get_downloaded_models(): |
|
models_raw = requests.get("http://localhost:11434/api/tags").content |
|
models_dict = json.loads(models_raw) |
|
models = [model["name"] for model in models_dict["models"]] |
|
return models |
|
|
|
def get_ollama_response(model, prompt, translte_from, translte_to, options): |
|
def get_system_prompt(): |
|
with open('system_prompt.txt', 'r') as file: |
|
system_prompt = file.read() |
|
return system_prompt |
|
|
|
system_prompt = get_system_prompt() |
|
user_prompt = f"Translate from {translte_from} to {translte_to}: {prompt}" |
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt} |
|
] |
|
|
|
response = ollama.chat(model, messages, options=options, stream=True) |
|
for chunck in response: |
|
|
|
yield chunck["message"]["content"]
|
|
|