diff --git a/comfy/model_management.py b/comfy/model_management.py index 9252afab1..9ff63f35d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1121,7 +1121,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma bf16_works = torch.cuda.is_bf16_supported() - if bf16_works or manual_cast: + if bf16_works and manual_cast: free_model_memory = maximum_vram_for_weights(device) if (not prioritize_performance) or model_params * 4 > free_model_memory: return True