|
|
|
@ -1059,6 +1059,43 @@ class ImagePadForOutpaint:
|
|
|
|
|
|
|
|
|
|
return (new_image, mask) |
|
|
|
|
|
|
|
|
|
class ImageToMask: |
|
|
|
|
@classmethod |
|
|
|
|
def INPUT_TYPES(s): |
|
|
|
|
return { |
|
|
|
|
"required": { |
|
|
|
|
"image": ("IMAGE",), |
|
|
|
|
"channel": (["red", "green", "blue"],), |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
CATEGORY = "image" |
|
|
|
|
|
|
|
|
|
RETURN_TYPES = ("MASK",) |
|
|
|
|
FUNCTION = "image_to_mask" |
|
|
|
|
|
|
|
|
|
def image_to_mask(self, image, channel): |
|
|
|
|
channels = ["red", "green", "blue"] |
|
|
|
|
mask = torch.select(image[0], 2, channels.index(channel)) |
|
|
|
|
return (mask,) |
|
|
|
|
|
|
|
|
|
class MaskToImage: |
|
|
|
|
@classmethod |
|
|
|
|
def INPUT_TYPES(s): |
|
|
|
|
return { |
|
|
|
|
"required": { |
|
|
|
|
"mask": ("MASK",), |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
CATEGORY = "image" |
|
|
|
|
|
|
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
|
|
|
FUNCTION = "mask_to_image" |
|
|
|
|
|
|
|
|
|
def mask_to_image(self, mask): |
|
|
|
|
result = mask[None, :, :, None].expand(-1, -1, -1, 3) |
|
|
|
|
return (result,) |
|
|
|
|
|
|
|
|
|
NODE_CLASS_MAPPINGS = { |
|
|
|
|
"KSampler": KSampler, |
|
|
|
@ -1102,6 +1139,8 @@ NODE_CLASS_MAPPINGS = {
|
|
|
|
|
"unCLIPCheckpointLoader": unCLIPCheckpointLoader, |
|
|
|
|
"CheckpointLoader": CheckpointLoader, |
|
|
|
|
"DiffusersLoader": DiffusersLoader, |
|
|
|
|
"ImageToMask": ImageToMask, |
|
|
|
|
"MaskToImage": MaskToImage, |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
NODE_DISPLAY_NAME_MAPPINGS = { |
|
|
|
@ -1147,6 +1186,8 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
|
|
|
|
"ImageUpscaleWithModel": "Upscale Image (using Model)", |
|
|
|
|
"ImageInvert": "Invert Image", |
|
|
|
|
"ImagePadForOutpaint": "Pad Image for Outpainting", |
|
|
|
|
"ImageToMask": "Convert Image to Mask", |
|
|
|
|
"MaskToImage": "Convert Mask to Image", |
|
|
|
|
# _for_testing |
|
|
|
|
"VAEDecodeTiled": "VAE Decode (Tiled)", |
|
|
|
|
"VAEEncodeTiled": "VAE Encode (Tiled)", |
|
|
|
|