From a252963f956a7d76344e3f0ce24b1047480a25af Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 23 Dec 2023 04:25:06 -0500 Subject: [PATCH] --disable-smart-memory now unloads everything like it did originally. --- comfy/model_management.py | 4 ++++ execution.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 61c967f6..3adc4270 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -754,6 +754,10 @@ def soft_empty_cache(force=False): torch.cuda.empty_cache() torch.cuda.ipc_collect() +def unload_all_models(): + free_memory(1e30, get_torch_device()) + + def resolve_lowvram_weight(weight, model, key): #TODO: remove return weight diff --git a/execution.py b/execution.py index 7db1f095..7ad17131 100644 --- a/execution.py +++ b/execution.py @@ -382,6 +382,8 @@ class PromptExecutor: for x in executed: self.old_prompt[x] = copy.deepcopy(prompt[x]) self.server.last_node_id = None + if comfy.model_management.DISABLE_SMART_MEMORY: + comfy.model_management.unload_all_models()