From b07258cef2ec53e2f76ef9ae73682ca1aa08a9b1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 18 Feb 2025 07:28:33 -0500 Subject: [PATCH] Fix typo. Let me know if this slows things down on 2000 series and below. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 9252afab1..9ff63f35d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1121,7 +1121,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma bf16_works = torch.cuda.is_bf16_supported() - if bf16_works or manual_cast: + if bf16_works and manual_cast: free_model_memory = maximum_vram_for_weights(device) if (not prioritize_performance) or model_params * 4 > free_model_memory: return True