From 6a737925b1a6aabfa3b7052c30ba87bf17be1801 Mon Sep 17 00:00:00 2001 From: David McCloskey Date: Fri, 17 Jan 2025 12:18:30 -0600 Subject: [PATCH] Move the model_sampling to CPU since it is very small amount of work --- comfy/sd.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index d7e89f72..d0911908 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -923,6 +923,8 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c if inital_load_device != torch.device("cpu"): logging.info("loaded diffusion model directly to GPU") model_management.load_models_gpu([model_patcher], force_full_load=True) + # damcclos: move the model_sampling back to the CPU. The work needed for this is not worth the gpu. + model_patcher.model.model_sampling.to(torch.device("cpu")) return (model_patcher, clip, vae, clipvision)