From eb4543474b6a3f48125900b262a098f0a4bd6609 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Feb 2025 02:17:50 -0500 Subject: [PATCH] Use fp16 for intermediate for fp8 weights with --fast if supported. --- comfy/model_management.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 987b45e41..afbb133d4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -741,6 +741,9 @@ def unet_manual_cast(weight_dtype, inference_device, supported_dtypes=[torch.flo return None fp16_supported = should_use_fp16(inference_device, prioritize_performance=True) + if PRIORITIZE_FP16 and fp16_supported and torch.float16 in supported_dtypes: + return torch.float16 + for dt in supported_dtypes: if dt == torch.float16 and fp16_supported: return torch.float16