Browse Source

Enable highvram automatically when vram >> ram

pull/38/head
comfyanonymous 2 years ago
parent
commit
86721d5158
  1. 9
      comfy/model_management.py
  2. 2
      notebooks/comfyui_colab.ipynb

9
comfy/model_management.py

@ -12,15 +12,22 @@ total_vram = 0
total_vram_available_mb = -1 total_vram_available_mb = -1
import sys import sys
import psutil
set_vram_to = NORMAL_VRAM set_vram_to = NORMAL_VRAM
try: try:
import torch import torch
total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
if total_vram <= 4096 and not "--normalvram" in sys.argv: total_ram = psutil.virtual_memory().total / (1024 * 1024)
forced_normal_vram = "--normalvram" in sys.argv
if not forced_normal_vram:
if total_vram <= 4096:
print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
set_vram_to = LOW_VRAM set_vram_to = LOW_VRAM
elif total_vram > total_ram * 1.2 and total_vram > 14336:
print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram")
vram_state = HIGH_VRAM
except: except:
pass pass

2
notebooks/comfyui_colab.ipynb

@ -170,7 +170,7 @@
"\n", "\n",
"threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
"\n", "\n",
"!python main.py --highvram --dont-print-server" "!python main.py --dont-print-server"
], ],
"metadata": { "metadata": {
"id": "hhhhhhhhhh" "id": "hhhhhhhhhh"

Loading…
Cancel
Save