mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-03-15 14:09:36 +00:00
To be really simple CheckpointLoaderSimple should pick the right type.
This commit is contained in:
parent
94bb0375b0
commit
fed315a76a
@ -173,6 +173,30 @@ def maximum_batch_area():
|
|||||||
memory_free = get_free_memory() / (1024 * 1024)
|
memory_free = get_free_memory() / (1024 * 1024)
|
||||||
area = ((memory_free - 1024) * 0.9) / (0.6)
|
area = ((memory_free - 1024) * 0.9) / (0.6)
|
||||||
return int(max(area, 0))
|
return int(max(area, 0))
|
||||||
|
|
||||||
|
def cpu_mode():
|
||||||
|
global vram_state
|
||||||
|
return vram_state == CPU
|
||||||
|
|
||||||
|
def should_use_fp16():
|
||||||
|
if cpu_mode():
|
||||||
|
return False #TODO ?
|
||||||
|
|
||||||
|
if torch.cuda.is_bf16_supported():
|
||||||
|
return True
|
||||||
|
|
||||||
|
props = torch.cuda.get_device_properties()
|
||||||
|
if props.major < 7:
|
||||||
|
return False
|
||||||
|
|
||||||
|
#FP32 is faster on those cards?
|
||||||
|
nvidia_16_series = ["1660", "1650", "1630"]
|
||||||
|
for x in nvidia_16_series:
|
||||||
|
if x in props.name:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
#TODO: might be cleaner to put this somewhere else
|
#TODO: might be cleaner to put this somewhere else
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
@ -656,12 +656,14 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e
|
|||||||
return (ModelPatcher(model), clip, vae)
|
return (ModelPatcher(model), clip, vae)
|
||||||
|
|
||||||
|
|
||||||
def load_checkpoint_guess_config(ckpt_path, fp16=False, output_vae=True, output_clip=True, embedding_directory=None):
|
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
||||||
sd = load_torch_file(ckpt_path)
|
sd = load_torch_file(ckpt_path)
|
||||||
sd_keys = sd.keys()
|
sd_keys = sd.keys()
|
||||||
clip = None
|
clip = None
|
||||||
vae = None
|
vae = None
|
||||||
|
|
||||||
|
fp16 = model_management.should_use_fp16()
|
||||||
|
|
||||||
class WeightsLoader(torch.nn.Module):
|
class WeightsLoader(torch.nn.Module):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
8
nodes.py
8
nodes.py
@ -209,19 +209,15 @@ class CheckpointLoaderSimple:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), ),
|
return {"required": { "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir), supported_ckpt_extensions), ),
|
||||||
"type": (["fp16", "fp32"],),
|
|
||||||
"stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
|
|
||||||
}}
|
}}
|
||||||
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
||||||
FUNCTION = "load_checkpoint"
|
FUNCTION = "load_checkpoint"
|
||||||
|
|
||||||
CATEGORY = "_for_testing"
|
CATEGORY = "_for_testing"
|
||||||
|
|
||||||
def load_checkpoint(self, ckpt_name, type, stop_at_clip_layer, output_vae=True, output_clip=True):
|
def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
|
||||||
ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
|
ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
|
||||||
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, type=="fp16", output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
|
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
|
||||||
if out[1] is not None:
|
|
||||||
out[1].clip_layer(stop_at_clip_layer)
|
|
||||||
return out
|
return out
|
||||||
|
|
||||||
class LoraLoader:
|
class LoraLoader:
|
||||||
|
Loading…
Reference in New Issue
Block a user