Browse Source

Make return_projected_pooled setable from the __init__

pull/2909/head
comfyanonymous 9 months ago
parent
commit
8ac69f62e5
  1. 4
      comfy/sd1_clip.py

4
comfy/sd1_clip.py

@ -67,7 +67,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
]
def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77,
freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=comfy.clip_model.CLIPTextModel,
special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, enable_attention_masks=False): # clip-vit-base-patch32
special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, enable_attention_masks=False, return_projected_pooled=True): # clip-vit-base-patch32
super().__init__()
assert layer in self.LAYERS
@ -91,7 +91,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
self.enable_attention_masks = enable_attention_masks
self.layer_norm_hidden_state = layer_norm_hidden_state
self.return_projected_pooled = True
self.return_projected_pooled = return_projected_pooled
if layer == "hidden":
assert layer_idx is not None

Loading…
Cancel
Save