@ -5,6 +5,7 @@ import sys
import json
import hashlib
import traceback
import math
from PIL import Image
from PIL . PngImagePlugin import PngInfo
@ -16,6 +17,7 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "co
import comfy . diffusers_convert
import comfy . samplers
import comfy . sample
import comfy . sd
import comfy . utils
@ -58,14 +60,44 @@ class ConditioningCombine:
def combine ( self , conditioning_1 , conditioning_2 ) :
return ( conditioning_1 + conditioning_2 , )
class ConditioningAverage :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning_to " : ( " CONDITIONING " , ) , " conditioning_from " : ( " CONDITIONING " , ) ,
" conditioning_to_strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.01 } )
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " addWeighted "
CATEGORY = " conditioning "
def addWeighted ( self , conditioning_to , conditioning_from , conditioning_to_strength ) :
out = [ ]
if len ( conditioning_from ) > 1 :
print ( " Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to. " )
cond_from = conditioning_from [ 0 ] [ 0 ]
for i in range ( len ( conditioning_to ) ) :
t1 = conditioning_to [ i ] [ 0 ]
t0 = cond_from [ : , : t1 . shape [ 1 ] ]
if t0 . shape [ 1 ] < t1 . shape [ 1 ] :
t0 = torch . cat ( [ t0 ] + [ torch . zeros ( ( 1 , ( t1 . shape [ 1 ] - t0 . shape [ 1 ] ) , t1 . shape [ 2 ] ) ) ] , dim = 1 )
tw = torch . mul ( t1 , conditioning_to_strength ) + torch . mul ( t0 , ( 1.0 - conditioning_to_strength ) )
n = [ tw , conditioning_to [ i ] [ 1 ] . copy ( ) ]
out . append ( n )
return ( out , )
class ConditioningSetArea :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" width " : ( " INT " , { " default " : 64 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" height " : ( " INT " , { " default " : 64 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" x " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" y " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" width " : ( " INT " , { " default " : 64 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 64 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" x " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" y " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , )
@ -79,11 +111,41 @@ class ConditioningSetArea:
n = [ t [ 0 ] , t [ 1 ] . copy ( ) ]
n [ 1 ] [ ' area ' ] = ( height / / 8 , width / / 8 , y / / 8 , x / / 8 )
n [ 1 ] [ ' strength ' ] = strength
n [ 1 ] [ ' set_area_to_bounds ' ] = False
n [ 1 ] [ ' min_sigma ' ] = min_sigma
n [ 1 ] [ ' max_sigma ' ] = max_sigma
c . append ( n )
return ( c , )
class ConditioningSetMask :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" mask " : ( " MASK " , ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } ) ,
" set_cond_area " : ( [ " default " , " mask bounds " ] , ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " append "
CATEGORY = " conditioning "
def append ( self , conditioning , mask , set_cond_area , strength ) :
c = [ ]
set_area_to_bounds = False
if set_cond_area != " default " :
set_area_to_bounds = True
if len ( mask . shape ) < 3 :
mask = mask . unsqueeze ( 0 )
for t in conditioning :
n = [ t [ 0 ] , t [ 1 ] . copy ( ) ]
_ , h , w = mask . shape
n [ 1 ] [ ' mask ' ] = mask
n [ 1 ] [ ' set_area_to_bounds ' ] = set_area_to_bounds
n [ 1 ] [ ' mask_strength ' ] = strength
c . append ( n )
return ( c , )
class VAEDecode :
def __init__ ( self , device = " cpu " ) :
self . device = device
@ -126,16 +188,21 @@ class VAEEncode:
CATEGORY = " latent "
def encode ( self , vae , pixels ) :
x = ( pixels . shape [ 1 ] / / 64 ) * 64
y = ( pixels . shape [ 2 ] / / 64 ) * 64
@staticmethod
def vae_encode_crop_pixels ( pixels ) :
x = ( pixels . shape [ 1 ] / / 8 ) * 8
y = ( pixels . shape [ 2 ] / / 8 ) * 8
if pixels . shape [ 1 ] != x or pixels . shape [ 2 ] != y :
pixels = pixels [ : , : x , : y , : ]
t = vae . encode ( pixels [ : , : , : , : 3 ] )
x_offset = ( pixels . shape [ 1 ] % 8 ) / / 2
y_offset = ( pixels . shape [ 2 ] % 8 ) / / 2
pixels = pixels [ : , x_offset : x + x_offset , y_offset : y + y_offset , : ]
return pixels
def encode ( self , vae , pixels ) :
pixels = self . vae_encode_crop_pixels ( pixels )
t = vae . encode ( pixels [ : , : , : , : 3 ] )
return ( { " samples " : t } , )
class VAEEncodeTiled :
def __init__ ( self , device = " cpu " ) :
self . device = device
@ -149,46 +216,51 @@ class VAEEncodeTiled:
CATEGORY = " _for_testing "
def encode ( self , vae , pixels ) :
x = ( pixels . shape [ 1 ] / / 64 ) * 64
y = ( pixels . shape [ 2 ] / / 64 ) * 64
if pixels . shape [ 1 ] != x or pixels . shape [ 2 ] != y :
pixels = pixels [ : , : x , : y , : ]
pixels = VAEEncode . vae_encode_crop_pixels ( pixels )
t = vae . encode_tiled ( pixels [ : , : , : , : 3 ] )
return ( { " samples " : t } , )
class VAEEncodeForInpaint :
def __init__ ( self , device = " cpu " ) :
self . device = device
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " pixels " : ( " IMAGE " , ) , " vae " : ( " VAE " , ) , " mask " : ( " MASK " , ) } }
return { " required " : { " pixels " : ( " IMAGE " , ) , " vae " : ( " VAE " , ) , " mask " : ( " MASK " , ) , " grow_mask_by " : ( " INT " , { " default " : 6 , " min " : 0 , " max " : 64 , " step " : 1 } ) , } }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " encode "
CATEGORY = " latent/inpaint "
def encode ( self , vae , pixels , mask ) :
x = ( pixels . shape [ 1 ] / / 64 ) * 64
y = ( pixels . shape [ 2 ] / / 64 ) * 64
mask = torch . nn . functional . interpolate ( mask [ None , None , ] , size = ( pixels . shape [ 1 ] , pixels . shape [ 2 ] ) , mode = " bilinear " ) [ 0 ] [ 0 ]
def encode ( self , vae , pixels , mask , grow_mask_by = 6 ) :
x = ( pixels . shape [ 1 ] / / 8 ) * 8
y = ( pixels . shape [ 2 ] / / 8 ) * 8
mask = torch . nn . functional . interpolate ( mask . reshape ( ( - 1 , 1 , mask . shape [ - 2 ] , mask . shape [ - 1 ] ) ) , size = ( pixels . shape [ 1 ] , pixels . shape [ 2 ] ) , mode = " bilinear " )
pixels = pixels . clone ( )
if pixels . shape [ 1 ] != x or pixels . shape [ 2 ] != y :
pixels = pixels [ : , : x , : y , : ]
mask = mask [ : x , : y ]
x_offset = ( pixels . shape [ 1 ] % 8 ) / / 2
y_offset = ( pixels . shape [ 2 ] % 8 ) / / 2
pixels = pixels [ : , x_offset : x + x_offset , y_offset : y + y_offset , : ]
mask = mask [ : , : , x_offset : x + x_offset , y_offset : y + y_offset ]
#grow mask by a few pixels to keep things seamless in latent space
kernel_tensor = torch . ones ( ( 1 , 1 , 6 , 6 ) )
mask_erosion = torch . clamp ( torch . nn . functional . conv2d ( ( mask . round ( ) ) [ None ] , kernel_tensor , padding = 3 ) , 0 , 1 )
m = ( 1.0 - mask . round ( ) )
if grow_mask_by == 0 :
mask_erosion = mask
else :
kernel_tensor = torch . ones ( ( 1 , 1 , grow_mask_by , grow_mask_by ) )
padding = math . ceil ( ( grow_mask_by - 1 ) / 2 )
mask_erosion = torch . clamp ( torch . nn . functional . conv2d ( mask . round ( ) , kernel_tensor , padding = padding ) , 0 , 1 )
m = ( 1.0 - mask . round ( ) ) . squeeze ( 1 )
for i in range ( 3 ) :
pixels [ : , : , : , i ] - = 0.5
pixels [ : , : , : , i ] * = m
pixels [ : , : , : , i ] + = 0.5
t = vae . encode ( pixels )
return ( { " samples " : t , " noise_mask " : ( mask_erosion [ 0 ] [ : x , : y ] . round ( ) ) } , )
return ( { " samples " : t , " noise_mask " : ( mask_erosion [ : , : , : x , : y ] . round ( ) ) } , )
class CheckpointLoader :
@classmethod
@ -542,8 +614,8 @@ class EmptyLatentImage:
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " width " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
return { " required " : { " width " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" batch_size " : ( " INT " , { " default " : 1 , " min " : 1 , " max " : 64 } ) } }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " generate "
@ -581,8 +653,8 @@ class LatentUpscale:
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) , " upscale_method " : ( s . upscale_methods , ) ,
" width " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" width " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" crop " : ( s . crop_methods , ) } }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " upscale "
@ -684,8 +756,8 @@ class LatentCrop:
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
" width " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" width " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" x " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" y " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
} }
@ -710,16 +782,6 @@ class LatentCrop:
new_width = width / / 8
to_x = new_width + x
to_y = new_height + y
def enforce_image_dim ( d , to_d , max_d ) :
if to_d > max_d :
leftover = ( to_d - max_d ) % 8
to_d = max_d
d - = leftover
return ( d , to_d )
#make sure size is always multiple of 64
x , to_x = enforce_image_dim ( x , to_x , samples . shape [ 3 ] )
y , to_y = enforce_image_dim ( y , to_y , samples . shape [ 2 ] )
s [ ' samples ' ] = samples [ : , : , y : to_y , x : to_x ]
return ( s , )
@ -739,79 +801,27 @@ class SetLatentNoiseMask:
s [ " noise_mask " ] = mask
return ( s , )
def common_ksampler ( model , seed , steps , cfg , sampler_name , scheduler , positive , negative , latent , denoise = 1.0 , disable_noise = False , start_step = None , last_step = None , force_full_denoise = False ) :
latent_image = latent [ " samples " ]
noise_mask = None
device = comfy . model_management . get_torch_device ( )
latent_image = latent [ " samples " ]
if disable_noise :
noise = torch . zeros ( latent_image . size ( ) , dtype = latent_image . dtype , layout = latent_image . layout , device = " cpu " )
else :
batch_index = 0
if " batch_index " in latent :
batch_index = latent [ " batch_index " ]
generator = torch . manual_seed ( seed )
for i in range ( batch_index ) :
noise = torch . randn ( [ 1 ] + list ( latent_image . size ( ) ) [ 1 : ] , dtype = latent_image . dtype , layout = latent_image . layout , generator = generator , device = " cpu " )
noise = torch . randn ( latent_image . size ( ) , dtype = latent_image . dtype , layout = latent_image . layout , generator = generator , device = " cpu " )
skip = latent [ " batch_index " ] if " batch_index " in latent else 0
noise = comfy . sample . prepare_noise ( latent_image , seed , skip )
noise_mask = None
if " noise_mask " in latent :
noise_mask = latent [ ' noise_mask ' ]
noise_mask = torch . nn . functional . interpolate ( noise_mask [ None , None , ] , size = ( noise . shape [ 2 ] , noise . shape [ 3 ] ) , mode = " bilinear " )
noise_mask = noise_mask . round ( )
noise_mask = torch . cat ( [ noise_mask ] * noise . shape [ 1 ] , dim = 1 )
noise_mask = torch . cat ( [ noise_mask ] * noise . shape [ 0 ] )
noise_mask = noise_mask . to ( device )
real_model = None
comfy . model_management . load_model_gpu ( model )
real_model = model . model
noise = noise . to ( device )
latent_image = latent_image . to ( device )
positive_copy = [ ]
negative_copy = [ ]
control_nets = [ ]
def get_models ( cond ) :
models = [ ]
for c in cond :
if ' control ' in c [ 1 ] :
models + = [ c [ 1 ] [ ' control ' ] ]
if ' gligen ' in c [ 1 ] :
models + = [ c [ 1 ] [ ' gligen ' ] [ 1 ] ]
return models
for p in positive :
t = p [ 0 ]
if t . shape [ 0 ] < noise . shape [ 0 ] :
t = torch . cat ( [ t ] * noise . shape [ 0 ] )
t = t . to ( device )
positive_copy + = [ [ t ] + p [ 1 : ] ]
for n in negative :
t = n [ 0 ]
if t . shape [ 0 ] < noise . shape [ 0 ] :
t = torch . cat ( [ t ] * noise . shape [ 0 ] )
t = t . to ( device )
negative_copy + = [ [ t ] + n [ 1 : ] ]
models = get_models ( positive ) + get_models ( negative )
comfy . model_management . load_controlnet_gpu ( models )
if sampler_name in comfy . samplers . KSampler . SAMPLERS :
sampler = comfy . samplers . KSampler ( real_model , steps = steps , device = device , sampler = sampler_name , scheduler = scheduler , denoise = denoise , model_options = model . model_options )
else :
#other samplers
pass
noise_mask = latent [ " noise_mask " ]
samples = sampler . sample ( noise , positive_copy , negative_copy , cfg = cfg , latent_image = latent_image , start_step = start_step , last_step = last_step , force_full_denoise = force_full_denoise , denoise_mask = noise_mask )
samples = samples . cpu ( )
for m in models :
m . cleanup ( )
pbar = comfy . utils . ProgressBar ( steps )
def callback ( step , x0 , x ) :
pbar . update_absolute ( step + 1 )
samples = comfy . sample . sample ( model , noise , steps , cfg , sampler_name , scheduler , positive , negative , latent_image ,
denoise = denoise , disable_noise = disable_noise , start_step = start_step , last_step = last_step ,
force_full_denoise = force_full_denoise , noise_mask = noise_mask , callback = callback )
out = latent . copy ( )
out [ " samples " ] = samples
return ( out , )
@ -974,8 +984,7 @@ class LoadImage:
RETURN_TYPES = ( " IMAGE " , " MASK " )
FUNCTION = " load_image "
def load_image ( self , image ) :
input_dir = folder_paths . get_input_directory ( )
image_path = os . path . join ( input_dir , image )
image_path = folder_paths . get_annotated_filepath ( image )
i = Image . open ( image_path )
image = i . convert ( " RGB " )
image = np . array ( image ) . astype ( np . float32 ) / 255.0
@ -989,20 +998,27 @@ class LoadImage:
@classmethod
def IS_CHANGED ( s , image ) :
input_dir = folder_paths . get_input_directory ( )
image_path = os . path . join ( input_dir , image )
image_path = folder_paths . get_annotated_filepath ( image )
m = hashlib . sha256 ( )
with open ( image_path , ' rb ' ) as f :
m . update ( f . read ( ) )
return m . digest ( ) . hex ( )
@classmethod
def VALIDATE_INPUTS ( s , image ) :
if not folder_paths . exists_annotated_filepath ( image ) :
return " Invalid image file: {} " . format ( image )
return True
class LoadImageMask :
_color_channels = [ " alpha " , " red " , " green " , " blue " ]
@classmethod
def INPUT_TYPES ( s ) :
input_dir = folder_paths . get_input_directory ( )
return { " required " :
{ " image " : ( sorted ( os . listdir ( input_dir ) ) , ) ,
" channel " : ( [ " alpha " , " red " , " green " , " blue " ] , ) , }
" channel " : ( s . _color_channels , ) , }
}
CATEGORY = " mask "
@ -1010,8 +1026,7 @@ class LoadImageMask:
RETURN_TYPES = ( " MASK " , )
FUNCTION = " load_image "
def load_image ( self , image , channel ) :
input_dir = folder_paths . get_input_directory ( )
image_path = os . path . join ( input_dir , image )
image_path = folder_paths . get_annotated_filepath ( image )
i = Image . open ( image_path )
if i . getbands ( ) != ( " R " , " G " , " B " , " A " ) :
i = i . convert ( " RGBA " )
@ -1028,13 +1043,22 @@ class LoadImageMask:
@classmethod
def IS_CHANGED ( s , image , channel ) :
input_dir = folder_paths . get_input_directory ( )
image_path = os . path . join ( input_dir , image )
image_path = folder_paths . get_annotated_filepath ( image )
m = hashlib . sha256 ( )
with open ( image_path , ' rb ' ) as f :
m . update ( f . read ( ) )
return m . digest ( ) . hex ( )
@classmethod
def VALIDATE_INPUTS ( s , image , channel ) :
if not folder_paths . exists_annotated_filepath ( image ) :
return " Invalid image file: {} " . format ( image )
if channel not in s . _color_channels :
return " Invalid color channel: {} " . format ( channel )
return True
class ImageScale :
upscale_methods = [ " nearest-exact " , " bilinear " , " area " ]
crop_methods = [ " disabled " , " center " ]
@ -1079,10 +1103,10 @@ class ImagePadForOutpaint:
return {
" required " : {
" image " : ( " IMAGE " , ) ,
" left " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" top " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" right " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" bottom " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 64 } ) ,
" left " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" top " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" right " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" bottom " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" feathering " : ( " INT " , { " default " : 40 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 1 } ) ,
}
}
@ -1154,8 +1178,10 @@ NODE_CLASS_MAPPINGS = {
" ImageScale " : ImageScale ,
" ImageInvert " : ImageInvert ,
" ImagePadForOutpaint " : ImagePadForOutpaint ,
" ConditioningAverage " : ConditioningAverage ,
" ConditioningCombine " : ConditioningCombine ,
" ConditioningSetArea " : ConditioningSetArea ,
" ConditioningSetMask " : ConditioningSetMask ,
" KSamplerAdvanced " : KSamplerAdvanced ,
" SetLatentNoiseMask " : SetLatentNoiseMask ,
" LatentComposite " : LatentComposite ,
@ -1204,7 +1230,9 @@ NODE_DISPLAY_NAME_MAPPINGS = {
" CLIPTextEncode " : " CLIP Text Encode (Prompt) " ,
" CLIPSetLastLayer " : " CLIP Set Last Layer " ,
" ConditioningCombine " : " Conditioning (Combine) " ,
" ConditioningAverage " : " Conditioning (Average) " ,
" ConditioningSetArea " : " Conditioning (Set Area) " ,
" ConditioningSetMask " : " Conditioning (Set Mask) " ,
" ControlNetApply " : " Apply ControlNet " ,
# Latent
" VAEEncodeForInpaint " : " VAE Encode (for Inpainting) " ,