From d63705d9199b6905a2a94b2a6180795d34427f65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Sat, 15 Apr 2023 15:50:51 +0800 Subject: [PATCH] Support releases all unoccupied cached memory from XPU --- execution.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/execution.py b/execution.py index 79c9a3ac..9d9ca5f6 100644 --- a/execution.py +++ b/execution.py @@ -10,6 +10,8 @@ import gc import torch import nodes +from model_management import xpu_available + def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}): valid_inputs = class_def.INPUT_TYPES() input_data_all = {} @@ -206,6 +208,8 @@ class PromptExecutor: if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda torch.cuda.empty_cache() torch.cuda.ipc_collect() + elif xpu_available: + torch.xpu.empty_cache() def validate_inputs(prompt, item):