|
|
|
@ -183,7 +183,7 @@ class VAE:
|
|
|
|
|
steps += pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap) |
|
|
|
|
pbar = comfy.utils.ProgressBar(steps) |
|
|
|
|
|
|
|
|
|
encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float() |
|
|
|
|
encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).sample().float() |
|
|
|
|
samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) |
|
|
|
|
samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) |
|
|
|
|
samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) |
|
|
|
@ -202,7 +202,7 @@ class VAE:
|
|
|
|
|
pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu") |
|
|
|
|
for x in range(0, samples_in.shape[0], batch_number): |
|
|
|
|
samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device) |
|
|
|
|
pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float() |
|
|
|
|
pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).cpu().float() + 1.0) / 2.0, min=0.0, max=1.0) |
|
|
|
|
except model_management.OOM_EXCEPTION as e: |
|
|
|
|
print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") |
|
|
|
|
pixel_samples = self.decode_tiled_(samples_in) |
|
|
|
|