Browse Source

Support releases all unoccupied cached memory from XPU

pull/512/head
藍+85CD 2 years ago
parent
commit
d63705d919
  1. 4
      execution.py

4
execution.py

@ -10,6 +10,8 @@ import gc
import torch import torch
import nodes import nodes
from model_management import xpu_available
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}): def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
valid_inputs = class_def.INPUT_TYPES() valid_inputs = class_def.INPUT_TYPES()
input_data_all = {} input_data_all = {}
@ -206,6 +208,8 @@ class PromptExecutor:
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache() torch.cuda.empty_cache()
torch.cuda.ipc_collect() torch.cuda.ipc_collect()
elif xpu_available:
torch.xpu.empty_cache()
def validate_inputs(prompt, item): def validate_inputs(prompt, item):

Loading…
Cancel
Save