diff --git a/comfy/ldm/models/diffusion/ddpm.py b/comfy/ldm/models/diffusion/ddpm.py index d1ecd81f..802034c7 100644 --- a/comfy/ldm/models/diffusion/ddpm.py +++ b/comfy/ldm/models/diffusion/ddpm.py @@ -81,7 +81,7 @@ class DDPM(torch.nn.Module): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization - # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t diff --git a/comfy/sd.py b/comfy/sd.py index 50e1c2fc..50d81f77 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -266,6 +266,7 @@ class CLIP: self.cond_stage_model = clip(**(params)) self.tokenizer = tokenizer(embedding_directory=embedding_directory) self.patcher = ModelPatcher(self.cond_stage_model) + self.layer_idx = -1 def clone(self): n = CLIP(no_init=True) @@ -273,6 +274,7 @@ class CLIP: n.patcher = self.patcher.clone() n.cond_stage_model = self.cond_stage_model n.tokenizer = self.tokenizer + n.layer_idx = self.layer_idx return n def load_from_state_dict(self, sd): @@ -282,9 +284,10 @@ class CLIP: return self.patcher.add_patches(patches, strength) def clip_layer(self, layer_idx): - return self.cond_stage_model.clip_layer(layer_idx) + self.layer_idx = layer_idx def encode(self, text): + self.cond_stage_model.clip_layer(self.layer_idx) tokens = self.tokenizer.tokenize_with_weights(text) try: self.patcher.patch_model() @@ -744,15 +747,13 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, e else: unet_config["num_heads"] = 8 #SD1.x + if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction + k = "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias" + out = sd[k] + if torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out. + sd_config["parameterization"] = 'v' model = instantiate_from_config(model_config) model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) - if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction - cond = torch.zeros((1, 2, unet_config["context_dim"]), device="cpu") - x_in = torch.rand((1, unet_config["in_channels"], 8, 8), device="cpu", generator=torch.manual_seed(1)) - out = model.apply_model(x_in, torch.tensor([999], device="cpu"), cond) - if out.mean() < -0.6: #mean of eps should be ~0 and mean of v prediction should be ~-1 - model.parameterization = 'v' - return (ModelPatcher(model), clip, vae) diff --git a/nodes.py b/nodes.py index 8168b14e..30eafb35 100644 --- a/nodes.py +++ b/nodes.py @@ -220,6 +220,22 @@ class CheckpointLoaderSimple: out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory) return out +class CLIPSetLastLayer: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip": ("CLIP", ), + "stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}), + }} + RETURN_TYPES = ("CLIP",) + FUNCTION = "set_last_layer" + + CATEGORY = "conditioning" + + def set_last_layer(self, clip, stop_at_clip_layer): + clip = clip.clone() + clip.clip_layer(stop_at_clip_layer) + return (clip,) + class LoraLoader: models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models") lora_dir = os.path.join(models_dir, "loras") @@ -829,6 +845,7 @@ NODE_CLASS_MAPPINGS = { "KSampler": KSampler, "CheckpointLoader": CheckpointLoader, "CLIPTextEncode": CLIPTextEncode, + "CLIPSetLastLayer": CLIPSetLastLayer, "VAEDecode": VAEDecode, "VAEEncode": VAEEncode, "VAEEncodeForInpaint": VAEEncodeForInpaint,