Browse Source

NaN-safe JSON serialization

json.dumps() will produce nonstandard JSON if there are NaNs in the data.
Javascript's JSON.parse() will refuse to load this kind of "JSON".

At least the is_changed key can be NaN or something else if a node returns NaNs
from its IS_CHANGED function.

Fortunately, json.loads() allows parsing NaN's into Nones, so round-tripping
once is a pretty easy fix.
pull/3042/head
asagi4 8 months ago
parent
commit
68adca5318
  1. 8
      comfy/utils.py
  2. 10
      comfy_extras/nodes_images.py
  3. 13
      comfy_extras/nodes_model_merging.py
  4. 9
      nodes.py

8
comfy/utils.py

@ -6,6 +6,14 @@ import safetensors.torch
import numpy as np
from PIL import Image
import logging
import json
def dump_json(data):
# Roundtrip once to get rid of NaN, Infinity and -Infinity
good_data = json.loads(json.dumps(data), parse_constant=lambda x: None)
return json.dumps(good_data, allow_nan=False)
def load_torch_file(ckpt, safe_load=False, device=None):
if device is None:

10
comfy_extras/nodes_images.py

@ -1,12 +1,12 @@
import nodes
import folder_paths
from comfy.cli_args import args
import comfy.utils
from PIL import Image
from PIL.PngImagePlugin import PngInfo
import numpy as np
import json
import os
MAX_RESOLUTION = nodes.MAX_RESOLUTION
@ -109,11 +109,11 @@ class SaveAnimatedWEBP:
metadata = pil_images[0].getexif()
if not args.disable_metadata:
if prompt is not None:
metadata[0x0110] = "prompt:{}".format(json.dumps(prompt))
metadata[0x0110] = "prompt:{}".format(comfy.utils.dump_json(prompt))
if extra_pnginfo is not None:
inital_exif = 0x010f
for x in extra_pnginfo:
metadata[inital_exif] = "{}:{}".format(x, json.dumps(extra_pnginfo[x]))
metadata[inital_exif] = "{}:{}".format(x, comfy.utils.dump_json(extra_pnginfo[x]))
inital_exif -= 1
if num_frames == 0:
@ -171,10 +171,10 @@ class SaveAnimatedPNG:
if not args.disable_metadata:
metadata = PngInfo()
if prompt is not None:
metadata.add(b"comf", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True)
metadata.add(b"comf", "prompt".encode("latin-1", "strict") + b"\0" + comfy.utils.dump_json(prompt).encode("latin-1", "strict"), after_idat=True)
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata.add(b"comf", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True)
metadata.add(b"comf", x.encode("latin-1", "strict") + b"\0" + comfy.utils.dump_json(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True)
file = f"{filename}_{counter:05}_.png"
pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:])

13
comfy_extras/nodes_model_merging.py

@ -4,7 +4,6 @@ import comfy.model_base
import comfy.model_management
import folder_paths
import json
import os
from comfy.cli_args import args
@ -167,7 +166,7 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, output_dir)
prompt_info = ""
if prompt is not None:
prompt_info = json.dumps(prompt)
prompt_info = comfy.utils.dump_json(prompt)
metadata = {}
@ -198,7 +197,7 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
metadata["prompt"] = prompt_info
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata[x] = json.dumps(extra_pnginfo[x])
metadata[x] = comfy.utils.dump_json(extra_pnginfo[x])
output_checkpoint = f"{filename}_{counter:05}_.safetensors"
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
@ -244,14 +243,14 @@ class CLIPSave:
def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None):
prompt_info = ""
if prompt is not None:
prompt_info = json.dumps(prompt)
prompt_info = comfy.utils.dump_json(prompt)
metadata = {}
if not args.disable_metadata:
metadata["prompt"] = prompt_info
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata[x] = json.dumps(extra_pnginfo[x])
metadata[x] = comfy.utils.dump_json(extra_pnginfo[x])
comfy.model_management.load_models_gpu([clip.load_model()])
clip_sd = clip.get_sd()
@ -301,14 +300,14 @@ class VAESave:
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
prompt_info = ""
if prompt is not None:
prompt_info = json.dumps(prompt)
prompt_info = comfy.utils.dump_json(prompt)
metadata = {}
if not args.disable_metadata:
metadata["prompt"] = prompt_info
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata[x] = json.dumps(extra_pnginfo[x])
metadata[x] = comfy.utils.dump_json(extra_pnginfo[x])
output_checkpoint = f"{filename}_{counter:05}_.safetensors"
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)

9
nodes.py

@ -2,7 +2,6 @@ import torch
import os
import sys
import json
import hashlib
import traceback
import math
@ -447,14 +446,14 @@ class SaveLatent:
# support save metadata for latent sharing
prompt_info = ""
if prompt is not None:
prompt_info = json.dumps(prompt)
prompt_info = comfy.utils.dump_json(prompt)
metadata = None
if not args.disable_metadata:
metadata = {"prompt": prompt_info}
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata[x] = json.dumps(extra_pnginfo[x])
metadata[x] = comfy.utils.dump_json(extra_pnginfo[x])
file = f"{filename}_{counter:05}_.latent"
@ -1435,10 +1434,10 @@ class SaveImage:
if not args.disable_metadata:
metadata = PngInfo()
if prompt is not None:
metadata.add_text("prompt", json.dumps(prompt))
metadata.add_text("prompt", comfy.utils.dump_json(prompt))
if extra_pnginfo is not None:
for x in extra_pnginfo:
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
metadata.add_text(x, comfy.utils.dump_json(extra_pnginfo[x]))
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
file = f"{filename_with_batch_num}_{counter:05}_.png"

Loading…
Cancel
Save