From a6b22bd7794c875cbaca090b516d0a842a4edeb9 Mon Sep 17 00:00:00 2001 From: silveroxides Date: Tue, 8 Apr 2025 19:27:56 +0200 Subject: [PATCH] Add launch argument for disabling fp8 compute --- comfy/cli_args.py | 1 + comfy/model_management.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 62079e6a7..e3d2ea1cb 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -129,6 +129,7 @@ parser.add_argument("--reserve-vram", type=float, default=None, help="Set the am parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.") +parser.add_argument("--disable-fp8-compute", action="store_true", help="Prevent ComfyUI from activating fp8 compute in Nvidia cards that support it. Can prevent some issues with some models not suitable for fp8 compute.") parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.") parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 19e6c8dff..084093264 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1211,6 +1211,8 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma def supports_fp8_compute(device=None): if not is_nvidia(): return False + if args.disable_fp8_compute: + return False props = torch.cuda.get_device_properties(device) if props.major >= 9: