From f509c6fe21179db585372ec4443c80179fa6c659 Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Sun, 12 May 2024 03:36:30 -0700 Subject: [PATCH] Fix Intel GPU memory allocation accuracy and documentation update. (#3459) * Change calculation of memory total to be more accurate, allocated is actually smaller than reserved. * Update README.md install documentation for Intel GPUs. --- README.md | 11 ++++++++++- comfy/model_management.py | 6 +++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2636ce14..312468a9 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,16 @@ After this you should have everything installed and can proceed to running Comfy ### Others: -#### [Intel Arc](https://github.com/comfyanonymous/ComfyUI/discussions/476) +#### Intel GPUs + +Intel GPU support is available for all Intel GPUs supported by Intel's Extension for Pytorch (IPEX) with the support requirements listed in the [Installation](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu) page. Choose your platform and method of install and follow the instructions. The steps are as follows: + +1. Start by installing the drivers or kernel listed or newer in the Installation page of IPEX linked above for Windows and Linux if needed. +1. Follow the instructions to install [Intel's oneAPI Basekit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html) for your platform. +1. Install the packages for IPEX using the instructions provided in the Installation page for your platform. +1. Follow the [ComfyUI manual installation](#manual-install-windows-linux) instructions for Windows and Linux and run ComfyUI normally as described above after everything is installed. + +Additional discussion and help can be found [here](https://github.com/comfyanonymous/ComfyUI/discussions/476). #### Apple Mac silicon diff --git a/comfy/model_management.py b/comfy/model_management.py index 3d01e8a2..7b54b256 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -102,8 +102,8 @@ def get_total_memory(dev=None, torch_total_too=False): elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] - mem_total = torch.xpu.get_device_properties(dev).total_memory mem_total_torch = mem_reserved + mem_total = torch.xpu.get_device_properties(dev).total_memory else: stats = torch.cuda.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] @@ -701,10 +701,10 @@ def get_free_memory(dev=None, torch_free_too=False): elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_active = stats['active_bytes.all.current'] - mem_allocated = stats['allocated_bytes.all.current'] mem_reserved = stats['reserved_bytes.all.current'] mem_free_torch = mem_reserved - mem_active - mem_free_total = torch.xpu.get_device_properties(dev).total_memory - mem_allocated + mem_free_xpu = torch.xpu.get_device_properties(dev).total_memory - mem_reserved + mem_free_total = mem_free_xpu + mem_free_torch else: stats = torch.cuda.memory_stats(dev) mem_active = stats['active_bytes.all.current']