Browse Source

Low vram mode for controlnets.

pull/14/head
comfyanonymous 2 years ago
parent
commit
d66415c021
  1. 19
      comfy/model_management.py
  2. 2
      comfy/sd.py

19
comfy/model_management.py

@ -102,6 +102,12 @@ def load_model_gpu(model):
def load_controlnet_gpu(models): def load_controlnet_gpu(models):
global current_gpu_controlnets global current_gpu_controlnets
global vram_state
if vram_state == LOW_VRAM or vram_state == NO_VRAM:
#don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
return
for m in current_gpu_controlnets: for m in current_gpu_controlnets:
if m not in models: if m not in models:
m.cpu() m.cpu()
@ -111,6 +117,19 @@ def load_controlnet_gpu(models):
current_gpu_controlnets.append(m.cuda()) current_gpu_controlnets.append(m.cuda())
def load_if_low_vram(model):
global vram_state
if vram_state == LOW_VRAM or vram_state == NO_VRAM:
return model.cuda()
return model
def unload_if_low_vram(model):
global vram_state
if vram_state == LOW_VRAM or vram_state == NO_VRAM:
return model.cpu()
return model
def get_free_memory(): def get_free_memory():
dev = torch.cuda.current_device() dev = torch.cuda.current_device()
stats = torch.cuda.memory_stats(dev) stats = torch.cuda.memory_stats(dev)

2
comfy/sd.py

@ -349,7 +349,9 @@ class ControlNet:
precision_scope = contextlib.nullcontext precision_scope = contextlib.nullcontext
with precision_scope(self.device): with precision_scope(self.device):
self.control_model = model_management.load_if_low_vram(self.control_model)
control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=cond_txt) control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=cond_txt)
self.control_model = model_management.unload_if_low_vram(self.control_model)
out = [] out = []
autocast_enabled = torch.is_autocast_enabled() autocast_enabled = torch.is_autocast_enabled()
for x in control: for x in control:

Loading…
Cancel
Save