|
|
@ -7,11 +7,11 @@ app = modal.App("llama") |
|
|
|
image = Image.debian_slim().pip_install("torch", "transformers", "bitsandbytes", "accelerate") |
|
|
|
image = Image.debian_slim().pip_install("torch", "transformers", "bitsandbytes", "accelerate") |
|
|
|
secrets = [modal.Secret.from_name("hf-secret")] |
|
|
|
secrets = [modal.Secret.from_name("hf-secret")] |
|
|
|
GPU = "T4" |
|
|
|
GPU = "T4" |
|
|
|
MODEL_NAME = "meta-llama/Meta-Llama-3.1-8B" |
|
|
|
MODEL_NAME = "meta-llama/Meta-Llama-3.1-8B" # "google/gemma-2-2b" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.function(image=image, secrets=secrets, gpu=GPU) |
|
|
|
@app.function(image=image, secrets=secrets, gpu=GPU, timeout=1800) |
|
|
|
def generate(prompt: str) -> str: |
|
|
|
def generate(prompt: str) -> str: |
|
|
|
import os |
|
|
|
import os |
|
|
|
import torch |
|
|
|
import torch |
|
|
|