Add oneAPI device selector and some other minor changes.

This commit is contained in:
Simon Lui 2024-12-19 01:28:44 -08:00
parent 9f4b181ab3
commit afde678e3c
4 changed files with 20 additions and 6 deletions

View File

@ -179,10 +179,17 @@ After this you should have everything installed and can proceed to running Comfy
#### Intel GPUs
Intel GPU support is available for all Intel GPUs supported by Intel's Extension for Pytorch (IPEX) with the support requirements listed in the [Installation](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu) page. Choose your platform and method of install and follow the instructions. The steps are as follows:
Intel GPU support is available starting from Pytorch 2.5 onwards and can be installed using the following command:
1. Start by installing the drivers or kernel listed or newer in the Installation page of IPEX linked above for Windows and Linux if needed.
1. Follow the instructions to install [Intel's oneAPI Basekit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html) for your platform.
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/test/xpu```
This is the command to install pytorch nightly instead which might have performance improvements:
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/xpu```
However, to get the most speed, installing Intel's Extension for Pytorch (IPEX) is highly recommended. The [Installation](https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=gpu) page allows you to interactively choose your platform and method of install and will then output instructions for that specific combination. Generally though, The following steps for installing IPEX are as follows:
1. Start by installing the drivers and other software packages listed in the Installation page of IPEX linked above for Windows and Linux if needed.
1. Install the packages for IPEX using the instructions provided in the Installation page for your platform.
1. Follow the [ComfyUI manual installation](#manual-install-windows-linux) instructions for Windows and Linux and run ComfyUI normally as described above after everything is installed.

View File

@ -84,7 +84,8 @@ parser.add_argument("--force-channels-last", action="store_true", help="Force ch
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.")
parser.add_argument("--oneapi-device", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
class LatentPreviewMethod(enum.Enum):
NoPreviews = "none"

View File

@ -75,7 +75,7 @@ if args.directml is not None:
try:
import intel_extension_for_pytorch as ipex
_ = torch.xpu.device_count()
xpu_available = torch.xpu.is_available()
xpu_available = xpu_available or torch.xpu.is_available()
except:
xpu_available = xpu_available or (hasattr(torch, "xpu") and torch.xpu.is_available())
@ -214,12 +214,14 @@ if is_intel_xpu():
if args.cpu_vae:
VAE_DTYPES = [torch.float32]
if ENABLE_PYTORCH_ATTENTION:
torch.backends.cuda.enable_math_sdp(True)
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True)
if int(torch_version[0]) == 2 and int(torch_version[2]) <= 5:
torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
if args.lowvram:
set_vram_to = VRAMState.LOW_VRAM
lowvram_available = True

View File

@ -114,6 +114,10 @@ if __name__ == "__main__":
os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device)
logging.info("Set cuda device to: {}".format(args.cuda_device))
if args.oneapi_device_selector is not None:
os.environ['ONEAPI_DEVICE_SELECTOR'] = args.oneapi_device_selector
logging.info("Set oneapi device selector to: {}".format(args.oneapi_device_selector))
if args.deterministic:
if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"