diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 757fc245..353bb51e 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -129,7 +129,7 @@ if args.disable_auto_launch: args.auto_launch = False import logging -logging_level = logging.WARNING +logging_level = logging.INFO if args.verbose: logging_level = logging.DEBUG diff --git a/comfy/controlnet.py b/comfy/controlnet.py index a5e7b23f..1a72412b 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -432,7 +432,7 @@ def load_controlnet(ckpt_path, model=None): logging.warning("missing controlnet keys: {}".format(missing)) if len(unexpected) > 0: - logging.info("unexpected controlnet keys: {}".format(unexpected)) + logging.debug("unexpected controlnet keys: {}".format(unexpected)) global_average_pooling = False filename = os.path.splitext(ckpt_path)[0] @@ -545,6 +545,6 @@ def load_t2i_adapter(t2i_data): logging.warning("t2i missing {}".format(missing)) if len(unexpected) > 0: - logging.info("t2i unexpected {}".format(unexpected)) + logging.debug("t2i unexpected {}".format(unexpected)) return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm) diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index 18398cb3..08018c54 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -178,7 +178,7 @@ def convert_vae_state_dict(vae_state_dict): for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: - logging.info(f"Reshaping {k} for SD format") + logging.debug(f"Reshaping {k} for SD format") new_state_dict[k] = reshape_weight_for_sd(v) return new_state_dict diff --git a/comfy/model_base.py b/comfy/model_base.py index a2514ca5..5da71e63 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -67,8 +67,8 @@ class BaseModel(torch.nn.Module): if self.adm_channels is None: self.adm_channels = 0 self.inpaint_model = False - logging.warning("model_type {}".format(model_type.name)) - logging.info("adm {}".format(self.adm_channels)) + logging.info("model_type {}".format(model_type.name)) + logging.debug("adm {}".format(self.adm_channels)) def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): sigma = t diff --git a/comfy/model_management.py b/comfy/model_management.py index dd262e26..2f0a0a62 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -30,7 +30,7 @@ lowvram_available = True xpu_available = False if args.deterministic: - logging.warning("Using deterministic algorithms for pytorch") + logging.info("Using deterministic algorithms for pytorch") torch.use_deterministic_algorithms(True, warn_only=True) directml_enabled = False @@ -42,7 +42,7 @@ if args.directml is not None: directml_device = torch_directml.device() else: directml_device = torch_directml.device(device_index) - logging.warning("Using directml with device: {}".format(torch_directml.device_name(device_index))) + logging.info("Using directml with device: {}".format(torch_directml.device_name(device_index))) # torch_directml.disable_tiled_resources(True) lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default. @@ -118,7 +118,7 @@ def get_total_memory(dev=None, torch_total_too=False): total_vram = get_total_memory(get_torch_device()) / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) -logging.warning("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) +logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) if not args.normalvram and not args.cpu: if lowvram_available and total_vram <= 4096: logging.warning("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") @@ -144,7 +144,7 @@ else: pass try: XFORMERS_VERSION = xformers.version.__version__ - logging.warning("xformers version: {}".format(XFORMERS_VERSION)) + logging.info("xformers version: {}".format(XFORMERS_VERSION)) if XFORMERS_VERSION.startswith("0.0.18"): logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.") logging.warning("Please downgrade or upgrade xformers to a different version.\n") @@ -212,11 +212,11 @@ elif args.highvram or args.gpu_only: FORCE_FP32 = False FORCE_FP16 = False if args.force_fp32: - logging.warning("Forcing FP32, if this improves things please report it.") + logging.info("Forcing FP32, if this improves things please report it.") FORCE_FP32 = True if args.force_fp16: - logging.warning("Forcing FP16.") + logging.info("Forcing FP16.") FORCE_FP16 = True if lowvram_available: @@ -230,12 +230,12 @@ if cpu_state != CPUState.GPU: if cpu_state == CPUState.MPS: vram_state = VRAMState.SHARED -logging.warning(f"Set vram state to: {vram_state.name}") +logging.info(f"Set vram state to: {vram_state.name}") DISABLE_SMART_MEMORY = args.disable_smart_memory if DISABLE_SMART_MEMORY: - logging.warning("Disabling smart memory management") + logging.info("Disabling smart memory management") def get_torch_device_name(device): if hasattr(device, 'type'): @@ -253,11 +253,11 @@ def get_torch_device_name(device): return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) try: - logging.warning("Device: {}".format(get_torch_device_name(get_torch_device()))) + logging.info("Device: {}".format(get_torch_device_name(get_torch_device()))) except: logging.warning("Could not pick default device.") -logging.warning("VAE dtype: {}".format(VAE_DTYPE)) +logging.info("VAE dtype: {}".format(VAE_DTYPE)) current_loaded_models = [] @@ -300,7 +300,7 @@ class LoadedModel: raise e if lowvram_model_memory > 0: - logging.warning("loading in lowvram mode {}".format(lowvram_model_memory/(1024 * 1024))) + logging.info("loading in lowvram mode {}".format(lowvram_model_memory/(1024 * 1024))) mem_counter = 0 for m in self.real_model.modules(): if hasattr(m, "comfy_cast_weights"): @@ -347,7 +347,7 @@ def unload_model_clones(model): to_unload = [i] + to_unload for i in to_unload: - logging.warning("unload clone {}".format(i)) + logging.debug("unload clone {}".format(i)) current_loaded_models.pop(i).model_unload() def free_memory(memory_required, device, keep_loaded=[]): @@ -389,7 +389,7 @@ def load_models_gpu(models, memory_required=0): models_already_loaded.append(loaded_model) else: if hasattr(x, "model"): - logging.warning(f"Requested to load {x.model.__class__.__name__}") + logging.info(f"Requested to load {x.model.__class__.__name__}") models_to_load.append(loaded_model) if len(models_to_load) == 0: @@ -399,7 +399,7 @@ def load_models_gpu(models, memory_required=0): free_memory(extra_mem, d, models_already_loaded) return - logging.warning(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}") + logging.info(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}") total_memory_required = {} for loaded_model in models_to_load: diff --git a/comfy/sd.py b/comfy/sd.py index 3e4b9e47..85821120 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -229,7 +229,7 @@ class VAE: logging.warning("Missing VAE keys {}".format(m)) if len(u) > 0: - logging.info("Leftover VAE keys {}".format(u)) + logging.debug("Leftover VAE keys {}".format(u)) if device is None: device = model_management.vae_device() @@ -397,7 +397,7 @@ def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DI logging.warning("clip missing: {}".format(m)) if len(u) > 0: - logging.info("clip unexpected: {}".format(u)) + logging.debug("clip unexpected: {}".format(u)) return clip def load_gligen(ckpt_path): @@ -538,18 +538,18 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o logging.warning("clip missing: {}".format(m)) if len(u) > 0: - logging.info("clip unexpected {}:".format(u)) + logging.debug("clip unexpected {}:".format(u)) else: logging.warning("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.") left_over = sd.keys() if len(left_over) > 0: - logging.info("left over keys: {}".format(left_over)) + logging.debug("left over keys: {}".format(left_over)) if output_model: model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device) if inital_load_device != torch.device("cpu"): - logging.warning("loaded straight to GPU") + logging.info("loaded straight to GPU") model_management.load_model_gpu(model_patcher) return (model_patcher, clip, vae, clipvision) @@ -589,7 +589,7 @@ def load_unet_state_dict(sd): #load unet in diffusers format model.load_model_weights(new_sd, "") left_over = sd.keys() if len(left_over) > 0: - logging.warning("left over keys in unet: {}".format(left_over)) + logging.info("left over keys in unet: {}".format(left_over)) return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device) def load_unet(unet_path): diff --git a/comfy/utils.py b/comfy/utils.py index 8caecd86..ab47b8f2 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -22,7 +22,7 @@ def load_torch_file(ckpt, safe_load=False, device=None): else: pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle) if "global_step" in pl_sd: - logging.info(f"Global Step: {pl_sd['global_step']}") + logging.debug(f"Global Step: {pl_sd['global_step']}") if "state_dict" in pl_sd: sd = pl_sd["state_dict"] else: diff --git a/nodes.py b/nodes.py index ca14677d..e2f35dc2 100644 --- a/nodes.py +++ b/nodes.py @@ -1925,14 +1925,14 @@ def load_custom_nodes(): node_import_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_import_times) > 0: - logging.warning("\nImport times for custom nodes:") + logging.info("\nImport times for custom nodes:") for n in sorted(node_import_times): if n[2]: import_message = "" else: import_message = " (IMPORT FAILED)" - logging.warning("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) - logging.warning("") + logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) + logging.info("") def init_custom_nodes(): extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras") diff --git a/server.py b/server.py index 7b4f910e..d748a47d 100644 --- a/server.py +++ b/server.py @@ -17,6 +17,7 @@ from io import BytesIO import aiohttp from aiohttp import web +import logging import mimetypes from comfy.cli_args import args @@ -33,7 +34,7 @@ async def send_socket_catch_exception(function, message): try: await function(message) except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError) as err: - print("send error:", err) + logging.warning("send error: {}".format(err)) @web.middleware async def cache_control(request: web.Request, handler): @@ -111,7 +112,7 @@ class PromptServer(): async for msg in ws: if msg.type == aiohttp.WSMsgType.ERROR: - print('ws connection closed with exception %s' % ws.exception()) + logging.warning('ws connection closed with exception %s' % ws.exception()) finally: self.sockets.pop(sid, None) return ws @@ -446,7 +447,7 @@ class PromptServer(): @routes.post("/prompt") async def post_prompt(request): - print("got prompt") + logging.info("got prompt") resp_code = 200 out_string = "" json_data = await request.json() @@ -478,7 +479,7 @@ class PromptServer(): response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]} return web.json_response(response) else: - print("invalid prompt:", valid[1]) + logging.warning("invalid prompt: {}".format(valid[1])) return web.json_response({"error": valid[1], "node_errors": valid[3]}, status=400) else: return web.json_response({"error": "no prompt", "node_errors": []}, status=400) @@ -626,8 +627,8 @@ class PromptServer(): await site.start() if verbose: - print("Starting server\n") - print("To see the GUI go to: http://{}:{}".format(address, port)) + logging.info("Starting server\n") + logging.info("To see the GUI go to: http://{}:{}".format(address, port)) if call_on_start is not None: call_on_start(address, port) @@ -639,7 +640,7 @@ class PromptServer(): try: json_data = handler(json_data) except Exception as e: - print(f"[ERROR] An error occurred during the on_prompt_handler processing") + logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing") traceback.print_exc() return json_data