|
|
@ -395,10 +395,10 @@ class CLIPVisionEncode: |
|
|
|
return {"required": { "clip_vision": ("CLIP_VISION",), |
|
|
|
return {"required": { "clip_vision": ("CLIP_VISION",), |
|
|
|
"image": ("IMAGE",) |
|
|
|
"image": ("IMAGE",) |
|
|
|
}} |
|
|
|
}} |
|
|
|
RETURN_TYPES = ("CLIP_VISION_EMBED",) |
|
|
|
RETURN_TYPES = ("CLIP_VISION_OUTPUT",) |
|
|
|
FUNCTION = "encode" |
|
|
|
FUNCTION = "encode" |
|
|
|
|
|
|
|
|
|
|
|
CATEGORY = "conditioning" |
|
|
|
CATEGORY = "conditioning/style_model" |
|
|
|
|
|
|
|
|
|
|
|
def encode(self, clip_vision, image): |
|
|
|
def encode(self, clip_vision, image): |
|
|
|
output = clip_vision.encode_image(image) |
|
|
|
output = clip_vision.encode_image(image) |
|
|
@ -425,16 +425,16 @@ class StyleModelLoader: |
|
|
|
class StyleModelApply: |
|
|
|
class StyleModelApply: |
|
|
|
@classmethod |
|
|
|
@classmethod |
|
|
|
def INPUT_TYPES(s): |
|
|
|
def INPUT_TYPES(s): |
|
|
|
return {"required": {"clip_vision_embed": ("CLIP_VISION_EMBED", ), |
|
|
|
return {"required": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ), |
|
|
|
"style_model": ("STYLE_MODEL", ) |
|
|
|
"style_model": ("STYLE_MODEL", ) |
|
|
|
}} |
|
|
|
}} |
|
|
|
RETURN_TYPES = ("CONDITIONING",) |
|
|
|
RETURN_TYPES = ("CONDITIONING",) |
|
|
|
FUNCTION = "apply_stylemodel" |
|
|
|
FUNCTION = "apply_stylemodel" |
|
|
|
|
|
|
|
|
|
|
|
CATEGORY = "conditioning" |
|
|
|
CATEGORY = "conditioning/style_model" |
|
|
|
|
|
|
|
|
|
|
|
def apply_stylemodel(self, clip_vision_embed, style_model): |
|
|
|
def apply_stylemodel(self, clip_vision_output, style_model): |
|
|
|
c = style_model.get_cond(clip_vision_embed) |
|
|
|
c = style_model.get_cond(clip_vision_output) |
|
|
|
return ([[c, {}]], ) |
|
|
|
return ([[c, {}]], ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -445,7 +445,7 @@ class ConditioningAppend: |
|
|
|
RETURN_TYPES = ("CONDITIONING",) |
|
|
|
RETURN_TYPES = ("CONDITIONING",) |
|
|
|
FUNCTION = "append" |
|
|
|
FUNCTION = "append" |
|
|
|
|
|
|
|
|
|
|
|
CATEGORY = "conditioning" |
|
|
|
CATEGORY = "conditioning/style_model" |
|
|
|
|
|
|
|
|
|
|
|
def append(self, conditioning_to, conditioning_from): |
|
|
|
def append(self, conditioning_to, conditioning_from): |
|
|
|
c = [] |
|
|
|
c = [] |
|
|
@ -504,7 +504,7 @@ class LatentRotate: |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
|
|
FUNCTION = "rotate" |
|
|
|
FUNCTION = "rotate" |
|
|
|
|
|
|
|
|
|
|
|
CATEGORY = "latent" |
|
|
|
CATEGORY = "latent/transform" |
|
|
|
|
|
|
|
|
|
|
|
def rotate(self, samples, rotation): |
|
|
|
def rotate(self, samples, rotation): |
|
|
|
s = samples.copy() |
|
|
|
s = samples.copy() |
|
|
@ -528,7 +528,7 @@ class LatentFlip: |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
|
|
FUNCTION = "flip" |
|
|
|
FUNCTION = "flip" |
|
|
|
|
|
|
|
|
|
|
|
CATEGORY = "latent" |
|
|
|
CATEGORY = "latent/transform" |
|
|
|
|
|
|
|
|
|
|
|
def flip(self, samples, flip_method): |
|
|
|
def flip(self, samples, flip_method): |
|
|
|
s = samples.copy() |
|
|
|
s = samples.copy() |
|
|
@ -593,7 +593,7 @@ class LatentCrop: |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
|
|
FUNCTION = "crop" |
|
|
|
FUNCTION = "crop" |
|
|
|
|
|
|
|
|
|
|
|
CATEGORY = "latent" |
|
|
|
CATEGORY = "latent/transform" |
|
|
|
|
|
|
|
|
|
|
|
def crop(self, samples, width, height, x, y): |
|
|
|
def crop(self, samples, width, height, x, y): |
|
|
|
s = samples.copy() |
|
|
|
s = samples.copy() |
|
|
@ -951,8 +951,6 @@ NODE_CLASS_MAPPINGS = { |
|
|
|
"LatentCrop": LatentCrop, |
|
|
|
"LatentCrop": LatentCrop, |
|
|
|
"LoraLoader": LoraLoader, |
|
|
|
"LoraLoader": LoraLoader, |
|
|
|
"CLIPLoader": CLIPLoader, |
|
|
|
"CLIPLoader": CLIPLoader, |
|
|
|
"StyleModelLoader": StyleModelLoader, |
|
|
|
|
|
|
|
"CLIPVisionLoader": CLIPVisionLoader, |
|
|
|
|
|
|
|
"CLIPVisionEncode": CLIPVisionEncode, |
|
|
|
"CLIPVisionEncode": CLIPVisionEncode, |
|
|
|
"StyleModelApply":StyleModelApply, |
|
|
|
"StyleModelApply":StyleModelApply, |
|
|
|
"ConditioningAppend":ConditioningAppend, |
|
|
|
"ConditioningAppend":ConditioningAppend, |
|
|
@ -960,6 +958,8 @@ NODE_CLASS_MAPPINGS = { |
|
|
|
"ControlNetLoader": ControlNetLoader, |
|
|
|
"ControlNetLoader": ControlNetLoader, |
|
|
|
"DiffControlNetLoader": DiffControlNetLoader, |
|
|
|
"DiffControlNetLoader": DiffControlNetLoader, |
|
|
|
"T2IAdapterLoader": T2IAdapterLoader, |
|
|
|
"T2IAdapterLoader": T2IAdapterLoader, |
|
|
|
|
|
|
|
"StyleModelLoader": StyleModelLoader, |
|
|
|
|
|
|
|
"CLIPVisionLoader": CLIPVisionLoader, |
|
|
|
"VAEDecodeTiled": VAEDecodeTiled, |
|
|
|
"VAEDecodeTiled": VAEDecodeTiled, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|