Browse Source

Leave text_encoder on the CPU when it can handle it.

pull/830/head
comfyanonymous 1 year ago
parent
commit
3b6fe51c1d
  1. 9
      comfy/model_management.py
  2. 5
      comfy/sd.py

9
comfy/model_management.py

@ -333,14 +333,19 @@ def unet_offload_device():
return torch.device("cpu")
def text_encoder_offload_device():
if args.gpu_only:
if args.gpu_only or vram_state == VRAMState.SHARED:
return get_torch_device()
else:
return torch.device("cpu")
def text_encoder_device():
if vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.SHARED or vram_state == VRAMState.NORMAL_VRAM:
if args.gpu_only or vram_state == VRAMState.SHARED:
return get_torch_device()
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough.
return get_torch_device()
else:
return torch.device("cpu")
else:
return torch.device("cpu")

5
comfy/sd.py

@ -533,8 +533,9 @@ class CLIP:
load_device = model_management.text_encoder_device()
offload_device = model_management.text_encoder_offload_device()
self.cond_stage_model = clip(**(params))
if model_management.should_use_fp16(load_device):
self.cond_stage_model.half()
#TODO: make sure this doesn't have a quality loss before enabling.
# if model_management.should_use_fp16(load_device):
# self.cond_stage_model.half()
self.cond_stage_model = self.cond_stage_model.to()

Loading…
Cancel
Save