diff --git a/comfy/model_management.py b/comfy/model_management.py index 0babdc13..ecbcabb0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -333,14 +333,19 @@ def unet_offload_device(): return torch.device("cpu") def text_encoder_offload_device(): - if args.gpu_only: + if args.gpu_only or vram_state == VRAMState.SHARED: return get_torch_device() else: return torch.device("cpu") def text_encoder_device(): - if vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.SHARED or vram_state == VRAMState.NORMAL_VRAM: + if args.gpu_only or vram_state == VRAMState.SHARED: return get_torch_device() + elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM: + if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. + return get_torch_device() + else: + return torch.device("cpu") else: return torch.device("cpu") diff --git a/comfy/sd.py b/comfy/sd.py index 5eef51b3..08d68c5f 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -533,8 +533,9 @@ class CLIP: load_device = model_management.text_encoder_device() offload_device = model_management.text_encoder_offload_device() self.cond_stage_model = clip(**(params)) - if model_management.should_use_fp16(load_device): - self.cond_stage_model.half() + #TODO: make sure this doesn't have a quality loss before enabling. + # if model_management.should_use_fp16(load_device): + # self.cond_stage_model.half() self.cond_stage_model = self.cond_stage_model.to()