From e44d0ac7f77820e8339d20fe3c0698bf8a5e9347 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 23 Dec 2024 01:50:11 -0500 Subject: [PATCH] Make --novram completely offload weights. This flag is mainly used for testing the weight offloading, it shouldn't actually be used in practice. Remove useless import. --- comfy/ldm/pixart/blocks.py | 1 - comfy/model_management.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/comfy/ldm/pixart/blocks.py b/comfy/ldm/pixart/blocks.py index 40b0663e..2225076e 100644 --- a/comfy/ldm/pixart/blocks.py +++ b/comfy/ldm/pixart/blocks.py @@ -6,7 +6,6 @@ import torch.nn as nn import torch.nn.functional as F from einops import rearrange -from comfy import model_management from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, Mlp, timestep_embedding from comfy.ldm.modules.attention import optimized_attention diff --git a/comfy/model_management.py b/comfy/model_management.py index b480aaaa..d77ae8c0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -521,7 +521,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu lowvram_model_memory = 0 if vram_set_state == VRAMState.NO_VRAM: - lowvram_model_memory = 64 * 1024 * 1024 + lowvram_model_memory = 0.1 loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights) current_loaded_models.insert(0, loaded_model)