diff --git a/comfy/model_management.py b/comfy/model_management.py index 8c859d3f..ced94cc0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -12,15 +12,22 @@ total_vram = 0 total_vram_available_mb = -1 import sys +import psutil set_vram_to = NORMAL_VRAM try: import torch total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) - if total_vram <= 4096 and not "--normalvram" in sys.argv: - print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") - set_vram_to = LOW_VRAM + total_ram = psutil.virtual_memory().total / (1024 * 1024) + forced_normal_vram = "--normalvram" in sys.argv + if not forced_normal_vram: + if total_vram <= 4096: + print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") + set_vram_to = LOW_VRAM + elif total_vram > total_ram * 1.2 and total_vram > 14336: + print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram") + vram_state = HIGH_VRAM except: pass diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index d9779a80..ddc05e6d 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -170,7 +170,7 @@ "\n", "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", "\n", - "!python main.py --highvram --dont-print-server" + "!python main.py --dont-print-server" ], "metadata": { "id": "hhhhhhhhhh"