From 1cd6cd608086a8ff8789b747b8d4f8b9273e576e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 14 Feb 2025 05:42:14 -0500 Subject: [PATCH] Disable pytorch attention in VAE for AMD. --- comfy/ldm/modules/diffusionmodules/model.py | 2 +- comfy/model_management.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 3bf83a7e2..8162742cf 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -297,7 +297,7 @@ def vae_attention(): if model_management.xformers_enabled_vae(): logging.info("Using xformers attention in VAE") return xformers_attention - elif model_management.pytorch_attention_enabled(): + elif model_management.pytorch_attention_enabled_vae(): logging.info("Using pytorch attention in VAE") return pytorch_attention else: diff --git a/comfy/model_management.py b/comfy/model_management.py index dd8a2a28f..fb924f432 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -912,6 +912,11 @@ def pytorch_attention_enabled(): global ENABLE_PYTORCH_ATTENTION return ENABLE_PYTORCH_ATTENTION +def pytorch_attention_enabled_vae(): + if is_amd(): + return False # enabling pytorch attention on AMD currently causes crash when doing high res + return pytorch_attention_enabled() + def pytorch_attention_flash_attention(): global ENABLE_PYTORCH_ATTENTION if ENABLE_PYTORCH_ATTENTION: