mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 10:25:16 +00:00
61b50720d0
* fix attention OOM in xformers * allow passing attention mask in flux attention * allow an attn_mask in flux * attn masks can be done using replace patches instead of a separate dict * fix return types * fix return order * enumerate * patch the right keys * arg names * fix a silly bug * fix xformers masks * replace match with if, elif, else * mask with image_ref_size * remove unused import * remove unused import 2 * fix pytorch/xformers attention This corrects a weird inconsistency with skip_reshape. It also allows masks of various shapes to be passed, which will be automtically expanded (in a memory-efficient way) to a size that is compatible with xformers or pytorch sdpa respectively. * fix mask shapes
38 lines
1.5 KiB
Python
38 lines
1.5 KiB
Python
import torch
|
|
from einops import rearrange
|
|
from torch import Tensor
|
|
|
|
from comfy.ldm.modules.attention import optimized_attention
|
|
import comfy.model_management
|
|
|
|
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor:
|
|
q, k = apply_rope(q, k, pe)
|
|
|
|
heads = q.shape[1]
|
|
x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask)
|
|
return x
|
|
|
|
|
|
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
|
assert dim % 2 == 0
|
|
if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu():
|
|
device = torch.device("cpu")
|
|
else:
|
|
device = pos.device
|
|
|
|
scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device)
|
|
omega = 1.0 / (theta**scale)
|
|
out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega)
|
|
out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
|
|
out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
|
|
return out.to(dtype=torch.float32, device=pos.device)
|
|
|
|
|
|
def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor):
|
|
xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
|
|
xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
|
|
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
|
|
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
|
|
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
|
|
|