logging.getLogger("xformers").addFilter(lambdarecord:'A matching Triton is not available'notinrecord.getMessage())
if__name__=="__main__":
if'--help'insys.argv:
print()
print("Valid Command line Arguments:")
print("\t--listen [ip]\t\t\tListen on ip or 0.0.0.0 if none given so the UI can be accessed from other computers.")
print("\t--port 8188\t\t\tSet the listen port.")
print()
print("\t--extra-model-paths-config file.yaml\tload an extra_model_paths.yaml file.")
print("\t--output-directory path/to/output\tSet the ComfyUI output directory.")
print()
print()
print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n")
print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.")
print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.")
parser.add_argument("--listen",type=str,default="127.0.0.1",help="Listen on IP or 0.0.0.0 if none given so the UI can be accessed from other computers.")
parser.add_argument("--port",type=int,default=8188,help="Set the listen port.")
parser.add_argument("--extra-model-paths-config",type=str,default=None,help="Load an extra_model_paths.yaml file.")
parser.add_argument("--output-directory",type=str,default=None,help="Set the ComfyUI output directory.")
parser.add_argument("--dont-upcast-attention",action="store_true",help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
parser.add_argument("--use-split-cross-attention",action="store_true",help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.")
parser.add_argument("--use-pytorch-cross-attention",action="store_true",help="Use the new pytorch 2.0 cross attention function.")
parser.add_argument("--cuda-device",type=int,default=None,help="Set the id of the cuda device this instance will use.")
parser.add_argument("--highvram",action="store_true",help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.")
parser.add_argument("--normalvram",action="store_true",help="Used to force normal vram use if lowvram gets automatically enabled.")
parser.add_argument("--lowvram",action="store_true",help="Split the unet in parts to use less vram.")