Fix typo.

Let me know if this slows things down on 2000 series and below.
This commit is contained in:
comfyanonymous 2025-02-18 07:28:33 -05:00
parent 31e54b7052
commit b07258cef2

View File

@ -1121,7 +1121,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
bf16_works = torch.cuda.is_bf16_supported() bf16_works = torch.cuda.is_bf16_supported()
if bf16_works or manual_cast: if bf16_works and manual_cast:
free_model_memory = maximum_vram_for_weights(device) free_model_memory = maximum_vram_for_weights(device)
if (not prioritize_performance) or model_params * 4 > free_model_memory: if (not prioritize_performance) or model_params * 4 > free_model_memory:
return True return True