From 708138c77d003a7c3f5f4d449d54df659cdae1a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 8 Feb 2023 14:51:18 -0500 Subject: [PATCH] Remove print. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index ece6db3f..c97bfcff 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -84,7 +84,7 @@ def load_model_gpu(model): device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) elif vram_state == LOW_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) - print(device_map, "{}MiB".format(total_vram_available_mb)) + accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda") model_accelerated = True return current_loaded_model