From 4a77fcd6ab01d69e18c384faa29ae1c3d02237f3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 31 Jul 2023 00:08:54 -0400 Subject: [PATCH] Only shift text encoder to vram when CPU cores are under 8. --- comfy/model_management.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 75f3b38a..0ffca06d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -364,7 +364,8 @@ def text_encoder_device(): if args.gpu_only: return get_torch_device() elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM: - if torch.get_num_threads() < 4: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. + #NOTE: on a Ryzen 5 7600X with 4080 it's faster to shift to GPU + if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. return get_torch_device() else: return torch.device("cpu")