mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Try to free memory for both cond+uncond before inference.
This commit is contained in:
parent
be71bb5e13
commit
13fdee6abf
@ -166,8 +166,8 @@ class BaseModel(torch.nn.Module):
|
||||
def memory_required(self, input_shape):
|
||||
if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention():
|
||||
#TODO: this needs to be tweaked
|
||||
area = max(input_shape[0], 3) * input_shape[2] * input_shape[3]
|
||||
return (area * comfy.model_management.dtype_size(self.get_dtype()) / 60) * (1024 * 1024)
|
||||
area = input_shape[0] * input_shape[2] * input_shape[3]
|
||||
return (area * comfy.model_management.dtype_size(self.get_dtype()) / 50) * (1024 * 1024)
|
||||
else:
|
||||
#TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory.
|
||||
area = input_shape[0] * input_shape[2] * input_shape[3]
|
||||
|
@ -83,7 +83,7 @@ def prepare_sampling(model, noise_shape, positive, negative, noise_mask):
|
||||
|
||||
real_model = None
|
||||
models, inference_memory = get_additional_models(positive, negative, model.model_dtype())
|
||||
comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise_shape) + inference_memory)
|
||||
comfy.model_management.load_models_gpu([model] + models, model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:])) + inference_memory)
|
||||
real_model = model.model
|
||||
|
||||
return real_model, positive, negative, noise_mask, models
|
||||
|
Loading…
Reference in New Issue
Block a user