diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 3bf83a7e2..8162742cf 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -297,7 +297,7 @@ def vae_attention(): if model_management.xformers_enabled_vae(): logging.info("Using xformers attention in VAE") return xformers_attention - elif model_management.pytorch_attention_enabled(): + elif model_management.pytorch_attention_enabled_vae(): logging.info("Using pytorch attention in VAE") return pytorch_attention else: diff --git a/comfy/model_management.py b/comfy/model_management.py index dd8a2a28f..fb924f432 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -912,6 +912,11 @@ def pytorch_attention_enabled(): global ENABLE_PYTORCH_ATTENTION return ENABLE_PYTORCH_ATTENTION +def pytorch_attention_enabled_vae(): + if is_amd(): + return False # enabling pytorch attention on AMD currently causes crash when doing high res + return pytorch_attention_enabled() + def pytorch_attention_flash_attention(): global ENABLE_PYTORCH_ATTENTION if ENABLE_PYTORCH_ATTENTION: