Empty the cache when torch cache is more than 25% free mem.

This commit is contained in:
comfyanonymous 2023-10-22 13:53:59 -04:00
parent 8b65f5de54
commit 8594c8be4d

View File

@ -339,7 +339,11 @@ def free_memory(memory_required, device, keep_loaded=[]):
if unloaded_model:
soft_empty_cache()
else:
if vram_state != VRAMState.HIGH_VRAM:
mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True)
if mem_free_torch > mem_free_total * 0.25:
soft_empty_cache()
def load_models_gpu(models, memory_required=0):
global vram_state