diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 5b8d8000..c9f4c04d 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -929,6 +929,30 @@ def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, n return x +# x0 = + +@torch.no_grad() +def sample_lcm_scalewise(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None): + extra_args = {} if extra_args is None else extra_args + seed = extra_args.get("seed", None) + scales = extra_args.get("scales", None) + if scales: + assert len(scales) == len(sigmas) - 1, "Number of scales must be equal to number of sampling steps minus one." + noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler + s_in = x.new_ones([x.shape[0]]) + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + + x = denoised + if sigmas[i + 1] > 0: + if scales: + # Interpolate to next scale + x = nn.functional.interpolate(x, size=scales[i + 1], mode='bicubic') + x = model.inner_model.inner_model.model_sampling.noise_scaling(sigmas[i + 1], noise_sampler(sigmas[i], sigmas[i + 1]), x) + return x + @torch.no_grad() def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):