Browse Source

Cleanup.

pull/796/merge
comfyanonymous 1 year ago
parent
commit
103c487a89
  1. 9
      comfy/ldm/modules/attention.py
  2. 4
      main.py

9
comfy/ldm/modules/attention.py

@ -16,11 +16,14 @@ if model_management.xformers_enabled():
import xformers
import xformers.ops
from comfy.cli_args import args
# CrossAttn precision handling
import os
_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32")
if args.dont_upcast_attention:
print("disabling upcasting of attention")
_ATTN_PRECISION = "fp16"
else:
_ATTN_PRECISION = "fp32"
from comfy.cli_args import args
def exists(val):
return val is not None

4
main.py

@ -14,10 +14,6 @@ if os.name == "nt":
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
if __name__ == "__main__":
if args.dont_upcast_attention:
print("disabling upcasting of attention")
os.environ['ATTN_PRECISION'] = "fp16"
if args.cuda_device is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
print("Set cuda device to:", args.cuda_device)

Loading…
Cancel
Save