Browse Source

Don't init the CLIP model when the checkpoint has no CLIP weights.

pull/944/merge
comfyanonymous 9 months ago
parent
commit
38b7ac6e26
  1. 5
      comfy/sd.py

5
comfy/sd.py

@ -470,10 +470,13 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
w = WeightsLoader() w = WeightsLoader()
clip_target = model_config.clip_target() clip_target = model_config.clip_target()
if clip_target is not None: if clip_target is not None:
sd = model_config.process_clip_state_dict(sd)
if any(k.startswith('cond_stage_model.') for k in sd):
clip = CLIP(clip_target, embedding_directory=embedding_directory) clip = CLIP(clip_target, embedding_directory=embedding_directory)
w.cond_stage_model = clip.cond_stage_model w.cond_stage_model = clip.cond_stage_model
sd = model_config.process_clip_state_dict(sd)
load_model_weights(w, sd) load_model_weights(w, sd)
else:
print("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.")
left_over = sd.keys() left_over = sd.keys()
if len(left_over) > 0: if len(left_over) > 0:

Loading…
Cancel
Save