mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-09 08:47:17 +08:00
[feat] move func up
This commit is contained in:
parent
777ee90206
commit
0215f8013f
@ -87,6 +87,13 @@ def get_torch_device():
|
|||||||
else:
|
else:
|
||||||
return torch.device(torch.cuda.current_device())
|
return torch.device(torch.cuda.current_device())
|
||||||
|
|
||||||
|
def get_containerd_memory_limit():
|
||||||
|
cgroup_memory_limit = '/sys/fs/cgroup/memory/memory.limit_in_bytes'
|
||||||
|
if os.path.isfile(cgroup_memory_limit):
|
||||||
|
with open(cgroup_memory_limit, 'r') as f:
|
||||||
|
return int(f.read())
|
||||||
|
return 0
|
||||||
|
|
||||||
def get_total_memory(dev=None, torch_total_too=False):
|
def get_total_memory(dev=None, torch_total_too=False):
|
||||||
global directml_enabled
|
global directml_enabled
|
||||||
if dev is None:
|
if dev is None:
|
||||||
@ -707,12 +714,6 @@ def is_device_mps(device):
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_containerd_memory_limit():
|
|
||||||
cgroup_memory_limit = '/sys/fs/cgroup/memory/memory.limit_in_bytes'
|
|
||||||
if os.path.isfile(cgroup_memory_limit):
|
|
||||||
with open(cgroup_memory_limit, 'r') as f:
|
|
||||||
return int(f.read())
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
|
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
|
||||||
global directml_enabled
|
global directml_enabled
|
||||||
|
Loading…
x
Reference in New Issue
Block a user