Browse Source

Move code to empty gpu cache to model_management.py

pull/517/head
comfyanonymous 2 years ago
parent
commit
deb2b93e79
  1. 9
      comfy/model_management.py
  2. 9
      execution.py

9
comfy/model_management.py

@ -307,6 +307,15 @@ def should_use_fp16():
return True
def soft_empty_cache():
global xpu_available
if xpu_available:
torch.xpu.empty_cache()
elif torch.cuda.is_available():
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
#TODO: might be cleaner to put this somewhere else
import threading

9
execution.py

@ -10,7 +10,7 @@ import gc
import torch
import nodes
from model_management import xpu_available
import comfy.model_management
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
valid_inputs = class_def.INPUT_TYPES()
@ -204,12 +204,7 @@ class PromptExecutor:
self.server.send_sync("executing", { "node": None }, self.server.client_id)
gc.collect()
if torch.cuda.is_available():
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
elif xpu_available:
torch.xpu.empty_cache()
comfy.model_management.soft_empty_cache()
def validate_inputs(prompt, item):

Loading…
Cancel
Save