|
|
|
@ -359,6 +359,62 @@ class VAEEncodeForInpaint:
|
|
|
|
|
|
|
|
|
|
return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class InpaintModelConditioning: |
|
|
|
|
@classmethod |
|
|
|
|
def INPUT_TYPES(s): |
|
|
|
|
return {"required": {"positive": ("CONDITIONING", ), |
|
|
|
|
"negative": ("CONDITIONING", ), |
|
|
|
|
"vae": ("VAE", ), |
|
|
|
|
"pixels": ("IMAGE", ), |
|
|
|
|
"mask": ("MASK", ), |
|
|
|
|
}} |
|
|
|
|
|
|
|
|
|
RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT") |
|
|
|
|
RETURN_NAMES = ("positive", "negative", "latent") |
|
|
|
|
FUNCTION = "encode" |
|
|
|
|
|
|
|
|
|
CATEGORY = "conditioning/inpaint" |
|
|
|
|
|
|
|
|
|
def encode(self, positive, negative, pixels, vae, mask): |
|
|
|
|
x = (pixels.shape[1] // 8) * 8 |
|
|
|
|
y = (pixels.shape[2] // 8) * 8 |
|
|
|
|
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear") |
|
|
|
|
|
|
|
|
|
orig_pixels = pixels |
|
|
|
|
pixels = orig_pixels.clone() |
|
|
|
|
if pixels.shape[1] != x or pixels.shape[2] != y: |
|
|
|
|
x_offset = (pixels.shape[1] % 8) // 2 |
|
|
|
|
y_offset = (pixels.shape[2] % 8) // 2 |
|
|
|
|
pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:] |
|
|
|
|
mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset] |
|
|
|
|
|
|
|
|
|
m = (1.0 - mask.round()).squeeze(1) |
|
|
|
|
for i in range(3): |
|
|
|
|
pixels[:,:,:,i] -= 0.5 |
|
|
|
|
pixels[:,:,:,i] *= m |
|
|
|
|
pixels[:,:,:,i] += 0.5 |
|
|
|
|
concat_latent = vae.encode(pixels) |
|
|
|
|
orig_latent = vae.encode(orig_pixels) |
|
|
|
|
|
|
|
|
|
out_latent = {} |
|
|
|
|
|
|
|
|
|
out_latent["samples"] = orig_latent |
|
|
|
|
out_latent["noise_mask"] = mask |
|
|
|
|
|
|
|
|
|
out = [] |
|
|
|
|
for conditioning in [positive, negative]: |
|
|
|
|
c = [] |
|
|
|
|
for t in conditioning: |
|
|
|
|
d = t[1].copy() |
|
|
|
|
d["concat_latent_image"] = concat_latent |
|
|
|
|
d["concat_mask"] = mask |
|
|
|
|
n = [t[0], d] |
|
|
|
|
c.append(n) |
|
|
|
|
out.append(c) |
|
|
|
|
return (out[0], out[1], out_latent) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SaveLatent: |
|
|
|
|
def __init__(self): |
|
|
|
|
self.output_dir = folder_paths.get_output_directory() |
|
|
|
@ -1628,10 +1684,11 @@ class ImagePadForOutpaint:
|
|
|
|
|
def expand_image(self, image, left, top, right, bottom, feathering): |
|
|
|
|
d1, d2, d3, d4 = image.size() |
|
|
|
|
|
|
|
|
|
new_image = torch.zeros( |
|
|
|
|
new_image = torch.ones( |
|
|
|
|
(d1, d2 + top + bottom, d3 + left + right, d4), |
|
|
|
|
dtype=torch.float32, |
|
|
|
|
) |
|
|
|
|
) * 0.5 |
|
|
|
|
|
|
|
|
|
new_image[:, top:top + d2, left:left + d3, :] = image |
|
|
|
|
|
|
|
|
|
mask = torch.ones( |
|
|
|
@ -1723,6 +1780,7 @@ NODE_CLASS_MAPPINGS = {
|
|
|
|
|
"unCLIPCheckpointLoader": unCLIPCheckpointLoader, |
|
|
|
|
"GLIGENLoader": GLIGENLoader, |
|
|
|
|
"GLIGENTextBoxApply": GLIGENTextBoxApply, |
|
|
|
|
"InpaintModelConditioning": InpaintModelConditioning, |
|
|
|
|
|
|
|
|
|
"CheckpointLoader": CheckpointLoader, |
|
|
|
|
"DiffusersLoader": DiffusersLoader, |
|
|
|
|