Browse Source

Pull some small changes from the other repo.

pull/1727/head
comfyanonymous 1 year ago
parent
commit
20d3852aa1
  1. 5
      comfy/model_management.py
  2. 4
      comfy/utils.py
  3. 3
      comfy_extras/nodes_custom_sampler.py
  4. 19
      execution.py
  5. 2
      folder_paths.py
  6. 2
      nodes.py

5
comfy/model_management.py

@ -354,6 +354,8 @@ def load_models_gpu(models, memory_required=0):
current_loaded_models.insert(0, current_loaded_models.pop(index))
models_already_loaded.append(loaded_model)
else:
if hasattr(x, "model"):
print(f"Requested to load {x.model.__class__.__name__}")
models_to_load.append(loaded_model)
if len(models_to_load) == 0:
@ -363,7 +365,7 @@ def load_models_gpu(models, memory_required=0):
free_memory(extra_mem, d, models_already_loaded)
return
print("loading new")
print(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}")
total_memory_required = {}
for loaded_model in models_to_load:
@ -405,7 +407,6 @@ def load_model_gpu(model):
def cleanup_models():
to_delete = []
for i in range(len(current_loaded_models)):
print(sys.getrefcount(current_loaded_models[i].model))
if sys.getrefcount(current_loaded_models[i].model) <= 2:
to_delete = [i] + to_delete

4
comfy/utils.py

@ -408,6 +408,10 @@ def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_am
output[b:b+1] = out/out_div
return output
PROGRESS_BAR_ENABLED = True
def set_progress_bar_enabled(enabled):
global PROGRESS_BAR_ENABLED
PROGRESS_BAR_ENABLED = enabled
PROGRESS_BAR_HOOK = None
def set_progress_bar_global_hook(function):

3
comfy_extras/nodes_custom_sampler.py

@ -3,6 +3,7 @@ import comfy.sample
from comfy.k_diffusion import sampling as k_diffusion_sampling
import latent_preview
import torch
import comfy.utils
class BasicScheduler:
@ -219,7 +220,7 @@ class SamplerCustom:
x0_output = {}
callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output)
disable_pbar = False
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed)
out = latent.copy()

19
execution.py

@ -2,6 +2,7 @@ import os
import sys
import copy
import json
import logging
import threading
import heapq
import traceback
@ -156,7 +157,7 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute
if server.client_id is not None:
server.send_sync("executed", { "node": unique_id, "output": output_ui, "prompt_id": prompt_id }, server.client_id)
except comfy.model_management.InterruptProcessingException as iex:
print("Processing interrupted")
logging.info("Processing interrupted")
# skip formatting inputs/outputs
error_details = {
@ -177,8 +178,8 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute
for node_id, node_outputs in outputs.items():
output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs]
print("!!! Exception during processing !!!")
print(traceback.format_exc())
logging.error("!!! Exception during processing !!!")
logging.error(traceback.format_exc())
error_details = {
"node_id": unique_id,
@ -636,11 +637,11 @@ def validate_prompt(prompt):
if valid is True:
good_outputs.add(o)
else:
print(f"Failed to validate prompt for output {o}:")
logging.error(f"Failed to validate prompt for output {o}:")
if len(reasons) > 0:
print("* (prompt):")
logging.error("* (prompt):")
for reason in reasons:
print(f" - {reason['message']}: {reason['details']}")
logging.error(f" - {reason['message']}: {reason['details']}")
errors += [(o, reasons)]
for node_id, result in validated.items():
valid = result[0]
@ -656,11 +657,11 @@ def validate_prompt(prompt):
"dependent_outputs": [],
"class_type": class_type
}
print(f"* {class_type} {node_id}:")
logging.error(f"* {class_type} {node_id}:")
for reason in reasons:
print(f" - {reason['message']}: {reason['details']}")
logging.error(f" - {reason['message']}: {reason['details']}")
node_errors[node_id]["dependent_outputs"].append(o)
print("Output will be ignored")
logging.error("Output will be ignored")
if len(good_outputs) == 0:
errors_list = []

2
folder_paths.py

@ -29,6 +29,8 @@ folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes
folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions)
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")

2
nodes.py

@ -1202,7 +1202,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
noise_mask = latent["noise_mask"]
callback = latent_preview.prepare_callback(model, steps)
disable_pbar = False
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)

Loading…
Cancel
Save