From 19300655ddaeb1287a2ecf427cf64c0766e8a999 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 17 May 2024 00:31:32 -0400 Subject: [PATCH] Don't automatically switch to lowvram mode on GPUs with low memory. --- comfy/model_management.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 7b54b256..21ae8d29 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -119,10 +119,6 @@ def get_total_memory(dev=None, torch_total_too=False): total_vram = get_total_memory(get_torch_device()) / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) -if not args.normalvram and not args.cpu: - if lowvram_available and total_vram <= 4096: - logging.warning("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") - set_vram_to = VRAMState.LOW_VRAM try: OOM_EXCEPTION = torch.cuda.OutOfMemoryError @@ -451,9 +447,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False): model_size = loaded_model.model_memory_required(torch_dev) current_free_mem = get_free_memory(torch_dev) lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 )) - if model_size > (current_free_mem - inference_memory): #only switch to lowvram if really necessary - vram_set_state = VRAMState.LOW_VRAM - else: + if model_size <= (current_free_mem - inference_memory): #only switch to lowvram if really necessary lowvram_model_memory = 0 if vram_set_state == VRAMState.NO_VRAM: