From 2f9d6a97ec7e3cb25beb13a320da8ec4573355d3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 17 Dec 2023 16:59:21 -0500 Subject: [PATCH] Add --deterministic option to make pytorch use deterministic algorithms. --- comfy/cli_args.py | 2 +- comfy/model_management.py | 4 ++++ main.py | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index d9c8668f..8de0adb5 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -102,7 +102,7 @@ vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for e parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.") - +parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.") parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") diff --git a/comfy/model_management.py b/comfy/model_management.py index b6a9471b..23f39c98 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -28,6 +28,10 @@ total_vram = 0 lowvram_available = True xpu_available = False +if args.deterministic: + print("Using deterministic algorithms for pytorch") + torch.use_deterministic_algorithms(True, warn_only=True) + directml_enabled = False if args.directml is not None: import torch_directml diff --git a/main.py b/main.py index 1f9c5f44..f6aeceed 100644 --- a/main.py +++ b/main.py @@ -64,6 +64,10 @@ if __name__ == "__main__": os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) print("Set cuda device to:", args.cuda_device) + if args.deterministic: + if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" + import cuda_malloc import comfy.utils