From 7e1e193f398297e9cc1bcf0276676add68c0161c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 10 Feb 2023 00:47:56 -0500 Subject: [PATCH] Automatically enable lowvram mode if vram is less than 4GB. Use: --normalvram to disable it. --- comfy/model_management.py | 16 +++++++++++----- main.py | 1 + 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index c97bfcff3..ff7cbeb0c 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -13,16 +13,22 @@ total_vram_available_mb = -1 import sys set_vram_to = NORMAL_VRAM + +try: + import torch + total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) + if total_vram <= 4096 and not "--normalvram" in sys.argv: + print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") + set_vram_to = LOW_VRAM +except: + pass + if "--lowvram" in sys.argv: set_vram_to = LOW_VRAM if "--novram" in sys.argv: set_vram_to = NO_VRAM -try: - import torch - total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) -except: - pass + if set_vram_to != NORMAL_VRAM: try: diff --git a/main.py b/main.py index 0f466a3f5..666193b6c 100644 --- a/main.py +++ b/main.py @@ -14,6 +14,7 @@ if __name__ == "__main__": print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.") print() + print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.") print("\t--lowvram\t\t\tSplit the unet in parts to use less vram.") print("\t--novram\t\t\tWhen lowvram isn't enough.") print()