Browse Source
This is an old model. Load the checkpoint like a regular one and use the new SD_4XUpscale_Conditioning node.pull/2447/head
comfyanonymous
11 months ago
6 changed files with 103 additions and 1 deletions
@ -0,0 +1,45 @@
|
||||
import torch |
||||
import nodes |
||||
import comfy.utils |
||||
|
||||
class SD_4XUpscale_Conditioning: |
||||
@classmethod |
||||
def INPUT_TYPES(s): |
||||
return {"required": { "images": ("IMAGE",), |
||||
"positive": ("CONDITIONING",), |
||||
"negative": ("CONDITIONING",), |
||||
"scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}), |
||||
# "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}), #TODO |
||||
}} |
||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") |
||||
RETURN_NAMES = ("positive", "negative", "latent") |
||||
|
||||
FUNCTION = "encode" |
||||
|
||||
CATEGORY = "conditioning/upscale_diffusion" |
||||
|
||||
def encode(self, images, positive, negative, scale_ratio): |
||||
width = max(1, round(images.shape[-2] * scale_ratio)) |
||||
height = max(1, round(images.shape[-3] * scale_ratio)) |
||||
|
||||
pixels = comfy.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center") |
||||
|
||||
out_cp = [] |
||||
out_cn = [] |
||||
|
||||
for t in positive: |
||||
n = [t[0], t[1].copy()] |
||||
n[1]['concat_image'] = pixels |
||||
out_cp.append(n) |
||||
|
||||
for t in negative: |
||||
n = [t[0], t[1].copy()] |
||||
n[1]['concat_image'] = pixels |
||||
out_cn.append(n) |
||||
|
||||
latent = torch.zeros([images.shape[0], 4, height // 4, width // 4]) |
||||
return (out_cp, out_cn, {"samples":latent}) |
||||
|
||||
NODE_CLASS_MAPPINGS = { |
||||
"SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning, |
||||
} |
Loading…
Reference in new issue