|
|
|
@ -67,7 +67,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
|
|
|
|
] |
|
|
|
|
def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77, |
|
|
|
|
freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=comfy.clip_model.CLIPTextModel, |
|
|
|
|
special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, enable_attention_masks=False): # clip-vit-base-patch32 |
|
|
|
|
special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True, enable_attention_masks=False, return_projected_pooled=True): # clip-vit-base-patch32 |
|
|
|
|
super().__init__() |
|
|
|
|
assert layer in self.LAYERS |
|
|
|
|
|
|
|
|
@ -91,7 +91,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
|
|
|
|
self.enable_attention_masks = enable_attention_masks |
|
|
|
|
|
|
|
|
|
self.layer_norm_hidden_state = layer_norm_hidden_state |
|
|
|
|
self.return_projected_pooled = True |
|
|
|
|
self.return_projected_pooled = return_projected_pooled |
|
|
|
|
|
|
|
|
|
if layer == "hidden": |
|
|
|
|
assert layer_idx is not None |
|
|
|
|