From 62a5b4ee88d1834ce9993c10f487bfa3cc5ad39e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 14 Mar 2025 03:21:16 -0400 Subject: [PATCH] Update attention.py --- comfy/ldm/modules/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 7d2e7bbe..3e5089a6 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -544,7 +544,7 @@ def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape causal=False, ).transpose(1, 2) except Exception as e: - logging.warning("Flash Attention failed, using default SDPA: {e}") + logging.warning(f"Flash Attention failed, using default SDPA: {e}") out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) if not skip_output_reshape: out = (