Browse Source

A tiny bit of reorganizing.

pull/44/head
comfyanonymous 2 years ago
parent
commit
7ec1dd25a2
  1. 23
      comfy_extras/clip_vision_config.json
  2. 24
      nodes.py

23
comfy_extras/clip_vision_config.json

@ -0,0 +1,23 @@
{
"_name_or_path": "openai/clip-vit-large-patch14",
"architectures": [
"CLIPVisionModel"
],
"attention_dropout": 0.0,
"dropout": 0.0,
"hidden_act": "quick_gelu",
"hidden_size": 1024,
"image_size": 224,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 4096,
"layer_norm_eps": 1e-05,
"model_type": "clip_vision_model",
"num_attention_heads": 16,
"num_channels": 3,
"num_hidden_layers": 24,
"patch_size": 14,
"projection_dim": 768,
"torch_dtype": "float32",
"transformers_version": "4.24.0"
}

24
nodes.py

@ -395,10 +395,10 @@ class CLIPVisionEncode:
return {"required": { "clip_vision": ("CLIP_VISION",),
"image": ("IMAGE",)
}}
RETURN_TYPES = ("CLIP_VISION_EMBED",)
RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
FUNCTION = "encode"
CATEGORY = "conditioning"
CATEGORY = "conditioning/style_model"
def encode(self, clip_vision, image):
output = clip_vision.encode_image(image)
@ -425,16 +425,16 @@ class StyleModelLoader:
class StyleModelApply:
@classmethod
def INPUT_TYPES(s):
return {"required": {"clip_vision_embed": ("CLIP_VISION_EMBED", ),
return {"required": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ),
"style_model": ("STYLE_MODEL", )
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "apply_stylemodel"
CATEGORY = "conditioning"
CATEGORY = "conditioning/style_model"
def apply_stylemodel(self, clip_vision_embed, style_model):
c = style_model.get_cond(clip_vision_embed)
def apply_stylemodel(self, clip_vision_output, style_model):
c = style_model.get_cond(clip_vision_output)
return ([[c, {}]], )
@ -445,7 +445,7 @@ class ConditioningAppend:
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "conditioning"
CATEGORY = "conditioning/style_model"
def append(self, conditioning_to, conditioning_from):
c = []
@ -504,7 +504,7 @@ class LatentRotate:
RETURN_TYPES = ("LATENT",)
FUNCTION = "rotate"
CATEGORY = "latent"
CATEGORY = "latent/transform"
def rotate(self, samples, rotation):
s = samples.copy()
@ -528,7 +528,7 @@ class LatentFlip:
RETURN_TYPES = ("LATENT",)
FUNCTION = "flip"
CATEGORY = "latent"
CATEGORY = "latent/transform"
def flip(self, samples, flip_method):
s = samples.copy()
@ -593,7 +593,7 @@ class LatentCrop:
RETURN_TYPES = ("LATENT",)
FUNCTION = "crop"
CATEGORY = "latent"
CATEGORY = "latent/transform"
def crop(self, samples, width, height, x, y):
s = samples.copy()
@ -951,8 +951,6 @@ NODE_CLASS_MAPPINGS = {
"LatentCrop": LatentCrop,
"LoraLoader": LoraLoader,
"CLIPLoader": CLIPLoader,
"StyleModelLoader": StyleModelLoader,
"CLIPVisionLoader": CLIPVisionLoader,
"CLIPVisionEncode": CLIPVisionEncode,
"StyleModelApply":StyleModelApply,
"ConditioningAppend":ConditioningAppend,
@ -960,6 +958,8 @@ NODE_CLASS_MAPPINGS = {
"ControlNetLoader": ControlNetLoader,
"DiffControlNetLoader": DiffControlNetLoader,
"T2IAdapterLoader": T2IAdapterLoader,
"StyleModelLoader": StyleModelLoader,
"CLIPVisionLoader": CLIPVisionLoader,
"VAEDecodeTiled": VAEDecodeTiled,
}

Loading…
Cancel
Save