Don't use is_bf16_supported to check for fp16 support.

This commit is contained in:
comfyanonymous 2024-02-04 20:53:35 -05:00
parent 24129d78e6
commit 66e28ef45c

View File

@ -722,10 +722,13 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma
if is_intel_xpu():
return True
if torch.cuda.is_bf16_supported():
if torch.version.hip:
return True
props = torch.cuda.get_device_properties("cuda")
if props.major >= 8:
return True
if props.major < 6:
return False