mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-25 15:55:18 +00:00
pytorch xpu should be flash or mem efficient attention?
This commit is contained in:
parent
20447e9ec9
commit
b1fd26fe9e
@ -693,6 +693,8 @@ def pytorch_attention_flash_attention():
|
||||
#TODO: more reliable way of checking for flash attention?
|
||||
if is_nvidia(): #pytorch flash attention only works on Nvidia
|
||||
return True
|
||||
if is_intel_xpu():
|
||||
return True
|
||||
return False
|
||||
|
||||
def force_upcast_attention_dtype():
|
||||
|
Loading…
Reference in New Issue
Block a user