From cc8baf1080c26fb0993674ffaa9cb4ed97862f4e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 5 Mar 2023 14:20:07 -0500 Subject: [PATCH] Make VAE use common function to get free memory. --- comfy/ldm/modules/diffusionmodules/model.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 6f0b41dc..01ab2ede 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -7,6 +7,7 @@ from einops import rearrange from typing import Optional, Any from ldm.modules.attention import MemoryEfficientCrossAttention +import model_management try: import xformers @@ -199,12 +200,7 @@ class AttnBlock(nn.Module): r1 = torch.zeros_like(k, device=q.device) - stats = torch.cuda.memory_stats(q.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch + mem_free_total = model_management.get_free_memory(q.device) gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()