|
|
|
import torch
|
|
|
|
import nodes
|
|
|
|
import comfy.utils
|
|
|
|
|
|
|
|
class SD_4XUpscale_Conditioning:
|
|
|
|
@classmethod
|
|
|
|
def INPUT_TYPES(s):
|
|
|
|
return {"required": { "images": ("IMAGE",),
|
|
|
|
"positive": ("CONDITIONING",),
|
|
|
|
"negative": ("CONDITIONING",),
|
|
|
|
"scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
|
|
|
"noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
|
|
|
}}
|
|
|
|
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
|
|
|
|
RETURN_NAMES = ("positive", "negative", "latent")
|
|
|
|
|
|
|
|
FUNCTION = "encode"
|
|
|
|
|
|
|
|
CATEGORY = "conditioning/upscale_diffusion"
|
|
|
|
|
|
|
|
def encode(self, images, positive, negative, scale_ratio, noise_augmentation):
|
|
|
|
width = max(1, round(images.shape[-2] * scale_ratio))
|
|
|
|
height = max(1, round(images.shape[-3] * scale_ratio))
|
|
|
|
|
|
|
|
pixels = comfy.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center")
|
|
|
|
|
|
|
|
out_cp = []
|
|
|
|
out_cn = []
|
|
|
|
|
|
|
|
for t in positive:
|
|
|
|
n = [t[0], t[1].copy()]
|
|
|
|
n[1]['concat_image'] = pixels
|
|
|
|
n[1]['noise_augmentation'] = noise_augmentation
|
|
|
|
out_cp.append(n)
|
|
|
|
|
|
|
|
for t in negative:
|
|
|
|
n = [t[0], t[1].copy()]
|
|
|
|
n[1]['concat_image'] = pixels
|
|
|
|
n[1]['noise_augmentation'] = noise_augmentation
|
|
|
|
out_cn.append(n)
|
|
|
|
|
|
|
|
latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
|
|
|
|
return (out_cp, out_cn, {"samples":latent})
|
|
|
|
|
|
|
|
NODE_CLASS_MAPPINGS = {
|
|
|
|
"SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning,
|
|
|
|
}
|