From 4b0239066daa0529bc18a1c932d4e8cd148b5ab5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 2 Feb 2024 10:02:49 -0500 Subject: [PATCH] Always use fp16 for the text encoders. --- comfy/model_management.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index e12146d1..cbaa8087 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -546,10 +546,8 @@ def text_encoder_dtype(device=None): if is_device_cpu(device): return torch.float16 - if should_use_fp16(device, prioritize_performance=False): - return torch.float16 - else: - return torch.float32 + return torch.float16 + def intermediate_device(): if args.gpu_only: