Disable pytorch attention in VAE for AMD.

This commit is contained in:
comfyanonymous 2025-02-14 05:42:14 -05:00
parent d7b4bf21a2
commit 1cd6cd6080
2 changed files with 6 additions and 1 deletions

View File

@ -297,7 +297,7 @@ def vae_attention():
if model_management.xformers_enabled_vae(): if model_management.xformers_enabled_vae():
logging.info("Using xformers attention in VAE") logging.info("Using xformers attention in VAE")
return xformers_attention return xformers_attention
elif model_management.pytorch_attention_enabled(): elif model_management.pytorch_attention_enabled_vae():
logging.info("Using pytorch attention in VAE") logging.info("Using pytorch attention in VAE")
return pytorch_attention return pytorch_attention
else: else:

View File

@ -912,6 +912,11 @@ def pytorch_attention_enabled():
global ENABLE_PYTORCH_ATTENTION global ENABLE_PYTORCH_ATTENTION
return ENABLE_PYTORCH_ATTENTION return ENABLE_PYTORCH_ATTENTION
def pytorch_attention_enabled_vae():
if is_amd():
return False # enabling pytorch attention on AMD currently causes crash when doing high res
return pytorch_attention_enabled()
def pytorch_attention_flash_attention(): def pytorch_attention_flash_attention():
global ENABLE_PYTORCH_ATTENTION global ENABLE_PYTORCH_ATTENTION
if ENABLE_PYTORCH_ATTENTION: if ENABLE_PYTORCH_ATTENTION: