mix_ascend_bf16_infer_err

This commit is contained in:
zhoufan2956 2025-02-12 17:13:10 +08:00
parent d9f0fcdb0c
commit a321f9ddf3

View File

@ -1082,6 +1082,9 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
if is_intel_xpu():
return True
if is_ascend_npu():
return True
props = torch.cuda.get_device_properties(device)
if props.major >= 8: