mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-18 01:53:31 +00:00
Merge branch 'comfyanonymous:master' into multigpu_support
This commit is contained in:
commit
66838ebd39
@ -1,6 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
class AppSettings():
|
class AppSettings():
|
||||||
@ -11,8 +12,12 @@ class AppSettings():
|
|||||||
file = self.user_manager.get_request_user_filepath(
|
file = self.user_manager.get_request_user_filepath(
|
||||||
request, "comfy.settings.json")
|
request, "comfy.settings.json")
|
||||||
if os.path.isfile(file):
|
if os.path.isfile(file):
|
||||||
with open(file) as f:
|
try:
|
||||||
return json.load(f)
|
with open(file) as f:
|
||||||
|
return json.load(f)
|
||||||
|
except:
|
||||||
|
logging.error(f"The user settings file is corrupted: {file}")
|
||||||
|
return {}
|
||||||
else:
|
else:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@ -475,7 +475,7 @@ class DPMSolver(nn.Module):
|
|||||||
return x_3, eps_cache
|
return x_3, eps_cache
|
||||||
|
|
||||||
def dpm_solver_fast(self, x, t_start, t_end, nfe, eta=0., s_noise=1., noise_sampler=None):
|
def dpm_solver_fast(self, x, t_start, t_end, nfe, eta=0., s_noise=1., noise_sampler=None):
|
||||||
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
noise_sampler = default_noise_sampler(x, seed=self.extra_args.get("seed", None)) if noise_sampler is None else noise_sampler
|
||||||
if not t_end > t_start and eta:
|
if not t_end > t_start and eta:
|
||||||
raise ValueError('eta must be 0 for reverse sampling')
|
raise ValueError('eta must be 0 for reverse sampling')
|
||||||
|
|
||||||
@ -514,7 +514,7 @@ class DPMSolver(nn.Module):
|
|||||||
return x
|
return x
|
||||||
|
|
||||||
def dpm_solver_adaptive(self, x, t_start, t_end, order=3, rtol=0.05, atol=0.0078, h_init=0.05, pcoeff=0., icoeff=1., dcoeff=0., accept_safety=0.81, eta=0., s_noise=1., noise_sampler=None):
|
def dpm_solver_adaptive(self, x, t_start, t_end, order=3, rtol=0.05, atol=0.0078, h_init=0.05, pcoeff=0., icoeff=1., dcoeff=0., accept_safety=0.81, eta=0., s_noise=1., noise_sampler=None):
|
||||||
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
noise_sampler = default_noise_sampler(x, seed=self.extra_args.get("seed", None)) if noise_sampler is None else noise_sampler
|
||||||
if order not in {2, 3}:
|
if order not in {2, 3}:
|
||||||
raise ValueError('order should be 2 or 3')
|
raise ValueError('order should be 2 or 3')
|
||||||
forward = t_end > t_start
|
forward = t_end > t_start
|
||||||
@ -894,7 +894,8 @@ def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler):
|
|||||||
|
|
||||||
def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
|
def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
|
||||||
extra_args = {} if extra_args is None else extra_args
|
extra_args = {} if extra_args is None else extra_args
|
||||||
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
seed = extra_args.get("seed", None)
|
||||||
|
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
|
||||||
s_in = x.new_ones([x.shape[0]])
|
s_in = x.new_ones([x.shape[0]])
|
||||||
|
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
@ -914,7 +915,8 @@ def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None,
|
|||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
||||||
extra_args = {} if extra_args is None else extra_args
|
extra_args = {} if extra_args is None else extra_args
|
||||||
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
seed = extra_args.get("seed", None)
|
||||||
|
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
|
||||||
s_in = x.new_ones([x.shape[0]])
|
s_in = x.new_ones([x.shape[0]])
|
||||||
for i in trange(len(sigmas) - 1, disable=disable):
|
for i in trange(len(sigmas) - 1, disable=disable):
|
||||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||||
|
@ -227,8 +227,9 @@ class T5(torch.nn.Module):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_layers = config_dict["num_layers"]
|
self.num_layers = config_dict["num_layers"]
|
||||||
model_dim = config_dict["d_model"]
|
model_dim = config_dict["d_model"]
|
||||||
|
inner_dim = config_dict["d_kv"] * config_dict["num_heads"]
|
||||||
|
|
||||||
self.encoder = T5Stack(self.num_layers, model_dim, model_dim, config_dict["d_ff"], config_dict["dense_act_fn"], config_dict["is_gated_act"], config_dict["num_heads"], config_dict["model_type"] != "umt5", dtype, device, operations)
|
self.encoder = T5Stack(self.num_layers, model_dim, inner_dim, config_dict["d_ff"], config_dict["dense_act_fn"], config_dict["is_gated_act"], config_dict["num_heads"], config_dict["model_type"] != "umt5", dtype, device, operations)
|
||||||
self.dtype = dtype
|
self.dtype = dtype
|
||||||
self.shared = operations.Embedding(config_dict["vocab_size"], model_dim, device=device, dtype=dtype)
|
self.shared = operations.Embedding(config_dict["vocab_size"], model_dim, device=device, dtype=dtype)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user