Merge branch 'comfyanonymous:master' into master

This commit is contained in:
Kallen Ding 2024-12-17 10:46:30 +08:00 committed by GitHub
commit d788cbb299
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
121 changed files with 477811 additions and 29447 deletions

View File

@ -33,12 +33,12 @@ def pull(repo, remote_name='origin', branch='master'):
user = repo.default_signature
tree = repo.index.write_tree()
commit = repo.create_commit('HEAD',
user,
user,
'Merge!',
tree,
[repo.head.target, remote_master_id])
repo.create_commit('HEAD',
user,
user,
'Merge!',
tree,
[repo.head.target, remote_master_id])
# We need to do this or git CLI will think we are still merging.
repo.state_cleanup()
else:

View File

@ -147,7 +147,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins
This is the command to install the nightly with ROCm 6.2 which might have some performance improvements:
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.2```
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.2.4```
### NVIDIA

View File

@ -40,7 +40,7 @@ class InternalRoutes:
return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in app.logger.get_logs()]))
@self.routes.get('/logs/raw')
async def get_logs(request):
async def get_raw_logs(request):
self.terminal_service.update_size()
return web.json_response({
"entries": list(app.logger.get_logs()),

View File

@ -413,7 +413,6 @@ class ControlNet(nn.Module):
out_output = []
out_middle = []
hs = []
if self.num_classes is not None:
assert y.shape[0] == x.shape[0]
emb = emb + self.label_emb(y)

View File

@ -297,7 +297,6 @@ class ControlLoraOps:
class Linear(torch.nn.Module, comfy.ops.CastWeightBiasOp):
def __init__(self, in_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.in_features = in_features
self.out_features = out_features
@ -382,7 +381,6 @@ class ControlLora(ControlNet):
self.control_model.to(comfy.model_management.get_torch_device())
diffusion_model = model.diffusion_model
sd = diffusion_model.state_dict()
cm = self.control_model.state_dict()
for k in sd:
weight = sd[k]
@ -823,7 +821,7 @@ def load_t2i_adapter(t2i_data, model_options={}): #TODO: model_options
for i in range(4):
for j in range(2):
prefix_replace["adapter.body.{}.resnets.{}.".format(i, j)] = "body.{}.".format(i * 2 + j)
prefix_replace["adapter.body.{}.".format(i, j)] = "body.{}.".format(i * 2)
prefix_replace["adapter.body.{}.".format(i, )] = "body.{}.".format(i * 2)
prefix_replace["adapter."] = ""
t2i_data = comfy.utils.state_dict_prefix_replace(t2i_data, prefix_replace)
keys = t2i_data.keys()

View File

@ -157,16 +157,23 @@ vae_conversion_map_attn = [
]
def reshape_weight_for_sd(w):
def reshape_weight_for_sd(w, conv3d=False):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape, 1, 1)
if conv3d:
return w.reshape(*w.shape, 1, 1, 1)
else:
return w.reshape(*w.shape, 1, 1)
def convert_vae_state_dict(vae_state_dict):
mapping = {k: k for k in vae_state_dict.keys()}
conv3d = False
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
v = v.replace(hf_part, sd_part)
if v.endswith(".conv.weight"):
if not conv3d and vae_state_dict[k].ndim == 5:
conv3d = True
mapping[k] = v
for k, v in mapping.items():
if "attentions" in k:
@ -179,7 +186,7 @@ def convert_vae_state_dict(vae_state_dict):
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
logging.debug(f"Reshaping {k} for SD format")
new_state_dict[k] = reshape_weight_for_sd(v)
new_state_dict[k] = reshape_weight_for_sd(v, conv3d=conv3d)
return new_state_dict

View File

@ -703,7 +703,6 @@ class UniPC:
):
# t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
# t_T = self.noise_schedule.T if t_start is None else t_start
device = x.device
steps = len(timesteps) - 1
if method == 'multistep':
assert steps >= order

View File

@ -1,3 +1,4 @@
import math
import torch
from torch import nn
from .ldm.modules.attention import CrossAttention

View File

@ -130,7 +130,7 @@ class WeightHook(Hook):
weights = self.weights
else:
weights = self.weights_clip
k = model.add_hook_patches(hook=self, patches=weights, strength_patch=strength)
model.add_hook_patches(hook=self, patches=weights, strength_patch=strength)
registered.append(self)
return True
# TODO: add logs about any keys that were not applied

View File

@ -11,7 +11,6 @@ import numpy as np
# Transfer from the input time (sigma) used in EDM to that (t) used in DEIS.
def edm2t(edm_steps, epsilon_s=1e-3, sigma_min=0.002, sigma_max=80):
vp_sigma = lambda beta_d, beta_min: lambda t: (np.e ** (0.5 * beta_d * (t ** 2) + beta_min * t) - 1) ** 0.5
vp_sigma_inv = lambda beta_d, beta_min: lambda sigma: ((beta_min ** 2 + 2 * beta_d * (sigma ** 2 + 1).log()).sqrt() - beta_min) / beta_d
vp_beta_d = 2 * (np.log(torch.tensor(sigma_min).cpu() ** 2 + 1) / epsilon_s - np.log(torch.tensor(sigma_max).cpu() ** 2 + 1)) / (epsilon_s - 1)
vp_beta_min = np.log(torch.tensor(sigma_max).cpu() ** 2 + 1) - 0.5 * vp_beta_d

View File

@ -352,3 +352,7 @@ class LTXV(LatentFormat):
]
self.latent_rgb_factors_bias = [-0.0571, -0.1657, -0.2512]
class HunyuanVideo(LatentFormat):
latent_channels = 16
scale_factor = 0.476986

View File

@ -97,7 +97,7 @@ def get_activation(activation: Literal["elu", "snake", "none"], antialias=False,
raise ValueError(f"Unknown activation {activation}")
if antialias:
act = Activation1d(act)
act = Activation1d(act) # noqa: F821 Activation1d is not defined
return act

View File

@ -158,7 +158,6 @@ class RotaryEmbedding(nn.Module):
def forward(self, t):
# device = self.inv_freq.device
device = t.device
dtype = t.dtype
# t = t.to(torch.float32)
@ -170,7 +169,7 @@ class RotaryEmbedding(nn.Module):
if self.scale is None:
return freqs, 1.
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base
power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base # noqa: F821 seq_len is not defined
scale = comfy.ops.cast_to_input(self.scale, t) ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
@ -229,9 +228,9 @@ class FeedForward(nn.Module):
linear_in = GLU(dim, inner_dim, activation, dtype=dtype, device=device, operations=operations)
else:
linear_in = nn.Sequential(
Rearrange('b n d -> b d n') if use_conv else nn.Identity(),
rearrange('b n d -> b d n') if use_conv else nn.Identity(),
operations.Linear(dim, inner_dim, bias = not no_bias, dtype=dtype, device=device) if not use_conv else operations.Conv1d(dim, inner_dim, conv_kernel_size, padding = (conv_kernel_size // 2), bias = not no_bias, dtype=dtype, device=device),
Rearrange('b n d -> b d n') if use_conv else nn.Identity(),
rearrange('b n d -> b d n') if use_conv else nn.Identity(),
activation
)
@ -246,9 +245,9 @@ class FeedForward(nn.Module):
self.ff = nn.Sequential(
linear_in,
Rearrange('b d n -> b n d') if use_conv else nn.Identity(),
rearrange('b d n -> b n d') if use_conv else nn.Identity(),
linear_out,
Rearrange('b n d -> b d n') if use_conv else nn.Identity(),
rearrange('b n d -> b d n') if use_conv else nn.Identity(),
)
def forward(self, x):
@ -346,18 +345,13 @@ class Attention(nn.Module):
# determine masking
masks = []
final_attn_mask = None # The mask that will be applied to the attention matrix, taking all masks into account
if input_mask is not None:
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
masks.append(~input_mask)
# Other masks will be added here later
if len(masks) > 0:
final_attn_mask = ~or_reduce(masks)
n, device = q.shape[-2], q.device
n = q.shape[-2]
causal = self.causal if causal is None else causal

View File

@ -147,7 +147,6 @@ class DoubleAttention(nn.Module):
bsz, seqlen1, _ = c.shape
bsz, seqlen2, _ = x.shape
seqlen = seqlen1 + seqlen2
cq, ck, cv = self.w1q(c), self.w1k(c), self.w1v(c)
cq = cq.view(bsz, seqlen1, self.n_heads, self.head_dim)

View File

@ -4,9 +4,12 @@ import comfy.ops
def pad_to_patch_size(img, patch_size=(2, 2), padding_mode="circular"):
if padding_mode == "circular" and (torch.jit.is_tracing() or torch.jit.is_scripting()):
padding_mode = "reflect"
pad_h = (patch_size[0] - img.shape[-2] % patch_size[0]) % patch_size[0]
pad_w = (patch_size[1] - img.shape[-1] % patch_size[1]) % patch_size[1]
return torch.nn.functional.pad(img, (0, pad_w, 0, pad_h), mode=padding_mode)
pad = ()
for i in range(img.ndim - 2):
pad = (0, (patch_size[i] - img.shape[i + 2] % patch_size[i]) % patch_size[i]) + pad
return torch.nn.functional.pad(img, pad, mode=padding_mode)
try:
rms_norm_torch = torch.nn.functional.rms_norm

View File

@ -114,7 +114,7 @@ class Modulation(nn.Module):
class DoubleStreamBlock(nn.Module):
def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, dtype=None, device=None, operations=None):
def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False, flipped_img_txt=False, dtype=None, device=None, operations=None):
super().__init__()
mlp_hidden_dim = int(hidden_size * mlp_ratio)
@ -141,8 +141,9 @@ class DoubleStreamBlock(nn.Module):
nn.GELU(approximate="tanh"),
operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device),
)
self.flipped_img_txt = flipped_img_txt
def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor):
def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, attn_mask=None):
img_mod1, img_mod2 = self.img_mod(vec)
txt_mod1, txt_mod2 = self.txt_mod(vec)
@ -160,12 +161,22 @@ class DoubleStreamBlock(nn.Module):
txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
# run actual attention
attn = attention(torch.cat((txt_q, img_q), dim=2),
torch.cat((txt_k, img_k), dim=2),
torch.cat((txt_v, img_v), dim=2), pe=pe)
if self.flipped_img_txt:
# run actual attention
attn = attention(torch.cat((img_q, txt_q), dim=2),
torch.cat((img_k, txt_k), dim=2),
torch.cat((img_v, txt_v), dim=2),
pe=pe, mask=attn_mask)
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1]:]
else:
# run actual attention
attn = attention(torch.cat((txt_q, img_q), dim=2),
torch.cat((txt_k, img_k), dim=2),
torch.cat((txt_v, img_v), dim=2),
pe=pe, mask=attn_mask)
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:]
# calculate the img bloks
img = img + img_mod1.gate * self.img_attn.proj(img_attn)
@ -217,7 +228,7 @@ class SingleStreamBlock(nn.Module):
self.mlp_act = nn.GELU(approximate="tanh")
self.modulation = Modulation(hidden_size, double=False, dtype=dtype, device=device, operations=operations)
def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor:
def forward(self, x: Tensor, vec: Tensor, pe: Tensor, attn_mask=None) -> Tensor:
mod, _ = self.modulation(vec)
x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
@ -226,7 +237,7 @@ class SingleStreamBlock(nn.Module):
q, k = self.norm(q, k, v)
# compute attention
attn = attention(q, k, v, pe=pe)
attn = attention(q, k, v, pe=pe, mask=attn_mask)
# compute activation in mlp stream, cat again and run second linear layer
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
x += mod.gate * output

View File

@ -1,14 +1,15 @@
import torch
from einops import rearrange
from torch import Tensor
from comfy.ldm.modules.attention import optimized_attention
import comfy.model_management
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor:
q, k = apply_rope(q, k, pe)
heads = q.shape[1]
x = optimized_attention(q, k, v, heads, skip_reshape=True)
x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask)
return x
@ -33,3 +34,4 @@ def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor):
xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)

View File

@ -4,6 +4,8 @@ from dataclasses import dataclass
import torch
from torch import Tensor, nn
from einops import rearrange, repeat
import comfy.ldm.common_dit
from .layers import (
DoubleStreamBlock,
@ -14,9 +16,6 @@ from .layers import (
timestep_embedding,
)
from einops import rearrange, repeat
import comfy.ldm.common_dit
@dataclass
class FluxParams:
in_channels: int
@ -98,8 +97,9 @@ class Flux(nn.Module):
timesteps: Tensor,
y: Tensor,
guidance: Tensor = None,
control=None,
control = None,
transformer_options={},
attn_mask: Tensor = None,
) -> Tensor:
patches_replace = transformer_options.get("patches_replace", {})
if img.ndim != 3 or txt.ndim != 3:
@ -124,14 +124,27 @@ class Flux(nn.Module):
if ("double_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"], out["txt"] = block(img=args["img"], txt=args["txt"], vec=args["vec"], pe=args["pe"])
out["img"], out["txt"] = block(img=args["img"],
txt=args["txt"],
vec=args["vec"],
pe=args["pe"],
attn_mask=args.get("attn_mask"))
return out
out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe}, {"original_block": block_wrap})
out = blocks_replace[("double_block", i)]({"img": img,
"txt": txt,
"vec": vec,
"pe": pe,
"attn_mask": attn_mask},
{"original_block": block_wrap})
txt = out["txt"]
img = out["img"]
else:
img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
img, txt = block(img=img,
txt=txt,
vec=vec,
pe=pe,
attn_mask=attn_mask)
if control is not None: # Controlnet
control_i = control.get("input")
@ -146,13 +159,20 @@ class Flux(nn.Module):
if ("single_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"])
out["img"] = block(args["img"],
vec=args["vec"],
pe=args["pe"],
attn_mask=args.get("attn_mask"))
return out
out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe}, {"original_block": block_wrap})
out = blocks_replace[("single_block", i)]({"img": img,
"vec": vec,
"pe": pe,
"attn_mask": attn_mask},
{"original_block": block_wrap})
img = out["img"]
else:
img = block(img, vec=vec, pe=pe)
img = block(img, vec=vec, pe=pe, attn_mask=attn_mask)
if control is not None: # Controlnet
control_o = control.get("output")
@ -181,5 +201,5 @@ class Flux(nn.Module):
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype)
out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options)
out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None))
return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h,:w]

View File

@ -461,8 +461,6 @@ class AsymmDiTJoint(nn.Module):
pH, pW = H // self.patch_size, W // self.patch_size
x = self.embed_x(x) # (B, N, D), where N = T * H * W / patch_size ** 2
assert x.ndim == 3
B = x.size(0)
pH, pW = H // self.patch_size, W // self.patch_size
N = T * pH * pW

View File

@ -0,0 +1,330 @@
#Based on Flux code because of weird hunyuan video code license.
import torch
import comfy.ldm.flux.layers
import comfy.ldm.modules.diffusionmodules.mmdit
from comfy.ldm.modules.attention import optimized_attention
from dataclasses import dataclass
from einops import repeat
from torch import Tensor, nn
from comfy.ldm.flux.layers import (
DoubleStreamBlock,
EmbedND,
LastLayer,
MLPEmbedder,
SingleStreamBlock,
timestep_embedding
)
import comfy.ldm.common_dit
@dataclass
class HunyuanVideoParams:
in_channels: int
out_channels: int
vec_in_dim: int
context_in_dim: int
hidden_size: int
mlp_ratio: float
num_heads: int
depth: int
depth_single_blocks: int
axes_dim: list
theta: int
patch_size: list
qkv_bias: bool
guidance_embed: bool
class SelfAttentionRef(nn.Module):
def __init__(self, dim: int, qkv_bias: bool = False, dtype=None, device=None, operations=None):
super().__init__()
self.qkv = operations.Linear(dim, dim * 3, bias=qkv_bias, dtype=dtype, device=device)
self.proj = operations.Linear(dim, dim, dtype=dtype, device=device)
class TokenRefinerBlock(nn.Module):
def __init__(
self,
hidden_size,
heads,
dtype=None,
device=None,
operations=None
):
super().__init__()
self.heads = heads
mlp_hidden_dim = hidden_size * 4
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
operations.Linear(hidden_size, 2 * hidden_size, bias=True, dtype=dtype, device=device),
)
self.norm1 = operations.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6, dtype=dtype, device=device)
self.self_attn = SelfAttentionRef(hidden_size, True, dtype=dtype, device=device, operations=operations)
self.norm2 = operations.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6, dtype=dtype, device=device)
self.mlp = nn.Sequential(
operations.Linear(hidden_size, mlp_hidden_dim, bias=True, dtype=dtype, device=device),
nn.SiLU(),
operations.Linear(mlp_hidden_dim, hidden_size, bias=True, dtype=dtype, device=device),
)
def forward(self, x, c, mask):
mod1, mod2 = self.adaLN_modulation(c).chunk(2, dim=1)
norm_x = self.norm1(x)
qkv = self.self_attn.qkv(norm_x)
q, k, v = qkv.reshape(qkv.shape[0], qkv.shape[1], 3, self.heads, -1).permute(2, 0, 3, 1, 4)
attn = optimized_attention(q, k, v, self.heads, mask=mask, skip_reshape=True)
x = x + self.self_attn.proj(attn) * mod1.unsqueeze(1)
x = x + self.mlp(self.norm2(x)) * mod2.unsqueeze(1)
return x
class IndividualTokenRefiner(nn.Module):
def __init__(
self,
hidden_size,
heads,
num_blocks,
dtype=None,
device=None,
operations=None
):
super().__init__()
self.blocks = nn.ModuleList(
[
TokenRefinerBlock(
hidden_size=hidden_size,
heads=heads,
dtype=dtype,
device=device,
operations=operations
)
for _ in range(num_blocks)
]
)
def forward(self, x, c, mask):
m = None
if mask is not None:
m = mask.view(mask.shape[0], 1, 1, mask.shape[1]).repeat(1, 1, mask.shape[1], 1)
m = m + m.transpose(2, 3)
for block in self.blocks:
x = block(x, c, m)
return x
class TokenRefiner(nn.Module):
def __init__(
self,
text_dim,
hidden_size,
heads,
num_blocks,
dtype=None,
device=None,
operations=None
):
super().__init__()
self.input_embedder = operations.Linear(text_dim, hidden_size, bias=True, dtype=dtype, device=device)
self.t_embedder = MLPEmbedder(256, hidden_size, dtype=dtype, device=device, operations=operations)
self.c_embedder = MLPEmbedder(text_dim, hidden_size, dtype=dtype, device=device, operations=operations)
self.individual_token_refiner = IndividualTokenRefiner(hidden_size, heads, num_blocks, dtype=dtype, device=device, operations=operations)
def forward(
self,
x,
timesteps,
mask,
):
t = self.t_embedder(timestep_embedding(timesteps, 256, time_factor=1.0).to(x.dtype))
# m = mask.float().unsqueeze(-1)
# c = (x.float() * m).sum(dim=1) / m.sum(dim=1) #TODO: the following works when the x.shape is the same length as the tokens but might break otherwise
c = x.sum(dim=1) / x.shape[1]
c = t + self.c_embedder(c.to(x.dtype))
x = self.input_embedder(x)
x = self.individual_token_refiner(x, c, mask)
return x
class HunyuanVideo(nn.Module):
"""
Transformer model for flow matching on sequences.
"""
def __init__(self, image_model=None, final_layer=True, dtype=None, device=None, operations=None, **kwargs):
super().__init__()
self.dtype = dtype
params = HunyuanVideoParams(**kwargs)
self.params = params
self.patch_size = params.patch_size
self.in_channels = params.in_channels
self.out_channels = params.out_channels
if params.hidden_size % params.num_heads != 0:
raise ValueError(
f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}"
)
pe_dim = params.hidden_size // params.num_heads
if sum(params.axes_dim) != pe_dim:
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
self.hidden_size = params.hidden_size
self.num_heads = params.num_heads
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
self.img_in = comfy.ldm.modules.diffusionmodules.mmdit.PatchEmbed(None, self.patch_size, self.in_channels, self.hidden_size, conv3d=True, dtype=dtype, device=device, operations=operations)
self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, dtype=dtype, device=device, operations=operations)
self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size, dtype=dtype, device=device, operations=operations)
self.guidance_in = (
MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size, dtype=dtype, device=device, operations=operations) if params.guidance_embed else nn.Identity()
)
self.txt_in = TokenRefiner(params.context_in_dim, self.hidden_size, self.num_heads, 2, dtype=dtype, device=device, operations=operations)
self.double_blocks = nn.ModuleList(
[
DoubleStreamBlock(
self.hidden_size,
self.num_heads,
mlp_ratio=params.mlp_ratio,
qkv_bias=params.qkv_bias,
flipped_img_txt=True,
dtype=dtype, device=device, operations=operations
)
for _ in range(params.depth)
]
)
self.single_blocks = nn.ModuleList(
[
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, dtype=dtype, device=device, operations=operations)
for _ in range(params.depth_single_blocks)
]
)
if final_layer:
self.final_layer = LastLayer(self.hidden_size, self.patch_size[-1], self.out_channels, dtype=dtype, device=device, operations=operations)
def forward_orig(
self,
img: Tensor,
img_ids: Tensor,
txt: Tensor,
txt_ids: Tensor,
txt_mask: Tensor,
timesteps: Tensor,
y: Tensor,
guidance: Tensor = None,
control=None,
transformer_options={},
) -> Tensor:
patches_replace = transformer_options.get("patches_replace", {})
initial_shape = list(img.shape)
# running on sequences img
img = self.img_in(img)
vec = self.time_in(timestep_embedding(timesteps, 256, time_factor=1.0).to(img.dtype))
vec = vec + self.vector_in(y[:, :self.params.vec_in_dim])
if self.params.guidance_embed:
if guidance is None:
raise ValueError("Didn't get guidance strength for guidance distilled model.")
vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
if txt_mask is not None and not torch.is_floating_point(txt_mask):
txt_mask = (txt_mask - 1).to(img.dtype) * torch.finfo(img.dtype).max
txt = self.txt_in(txt, timesteps, txt_mask)
ids = torch.cat((img_ids, txt_ids), dim=1)
pe = self.pe_embedder(ids)
img_len = img.shape[1]
if txt_mask is not None:
attn_mask_len = img_len + txt.shape[1]
attn_mask = torch.zeros((1, 1, attn_mask_len), dtype=img.dtype, device=img.device)
attn_mask[:, 0, img_len:] = txt_mask
else:
attn_mask = None
blocks_replace = patches_replace.get("dit", {})
for i, block in enumerate(self.double_blocks):
if ("double_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"], out["txt"] = block(img=args["img"], txt=args["txt"], vec=args["vec"], pe=args["pe"], attn_mask=args["attention_mask"])
return out
out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe, "attention_mask": attn_mask}, {"original_block": block_wrap})
txt = out["txt"]
img = out["img"]
else:
img, txt = block(img=img, txt=txt, vec=vec, pe=pe, attn_mask=attn_mask)
if control is not None: # Controlnet
control_i = control.get("input")
if i < len(control_i):
add = control_i[i]
if add is not None:
img += add
img = torch.cat((img, txt), 1)
for i, block in enumerate(self.single_blocks):
if ("single_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], attn_mask=args["attention_mask"])
return out
out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe, "attention_mask": attn_mask}, {"original_block": block_wrap})
img = out["img"]
else:
img = block(img, vec=vec, pe=pe, attn_mask=attn_mask)
if control is not None: # Controlnet
control_o = control.get("output")
if i < len(control_o):
add = control_o[i]
if add is not None:
img[:, : img_len] += add
img = img[:, : img_len]
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
shape = initial_shape[-3:]
for i in range(len(shape)):
shape[i] = shape[i] // self.patch_size[i]
img = img.reshape([img.shape[0]] + shape + [self.out_channels] + self.patch_size)
img = img.permute(0, 4, 1, 5, 2, 6, 3, 7)
img = img.reshape(initial_shape)
return img
def forward(self, x, timestep, context, y, guidance, attention_mask=None, control=None, transformer_options={}, **kwargs):
bs, c, t, h, w = x.shape
patch_size = self.patch_size
t_len = ((t + (patch_size[0] // 2)) // patch_size[0])
h_len = ((h + (patch_size[1] // 2)) // patch_size[1])
w_len = ((w + (patch_size[2] // 2)) // patch_size[2])
img_ids = torch.zeros((t_len, h_len, w_len, 3), device=x.device, dtype=x.dtype)
img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(0, t_len - 1, steps=t_len, device=x.device, dtype=x.dtype).reshape(-1, 1, 1)
img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).reshape(1, -1, 1)
img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).reshape(1, 1, -1)
img_ids = repeat(img_ids, "t h w c -> b (t h w) c", b=bs)
txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype)
out = self.forward_orig(x, img_ids, context, txt_ids, attention_mask, timestep, y, guidance, control, transformer_options)
return out

View File

@ -164,9 +164,6 @@ class HunYuanControlNet(nn.Module):
),
)
# Image embedding
num_patches = self.x_embedder.num_patches
# HUnYuanDiT Blocks
self.blocks = nn.ModuleList(
[

View File

@ -248,9 +248,6 @@ class HunYuanDiT(nn.Module):
operations.Linear(hidden_size * 4, hidden_size, bias=True, dtype=dtype, device=device),
)
# Image embedding
num_patches = self.x_embedder.num_patches
# HUnYuanDiT Blocks
self.blocks = nn.ModuleList([
HunYuanDiTBlock(hidden_size=hidden_size,

View File

@ -1,10 +1,12 @@
import logging
import math
import torch
from contextlib import contextmanager
from typing import Any, Dict, Tuple, Union
from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from comfy.ldm.util import instantiate_from_config
from comfy.ldm.util import get_obj_from_str, instantiate_from_config
from comfy.ldm.modules.ema import LitEma
import comfy.ops
@ -52,7 +54,7 @@ class AbstractAutoencoder(torch.nn.Module):
if self.use_ema:
self.model_ema = LitEma(self, decay=ema_decay)
logpy.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
logging.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
def get_input(self, batch) -> Any:
raise NotImplementedError()
@ -68,14 +70,14 @@ class AbstractAutoencoder(torch.nn.Module):
self.model_ema.store(self.parameters())
self.model_ema.copy_to(self)
if context is not None:
logpy.info(f"{context}: Switched to EMA weights")
logging.info(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.parameters())
if context is not None:
logpy.info(f"{context}: Restored training weights")
logging.info(f"{context}: Restored training weights")
def encode(self, *args, **kwargs) -> torch.Tensor:
raise NotImplementedError("encode()-method of abstract base class called")
@ -84,7 +86,7 @@ class AbstractAutoencoder(torch.nn.Module):
raise NotImplementedError("decode()-method of abstract base class called")
def instantiate_optimizer_from_config(self, params, lr, cfg):
logpy.info(f"loading >>> {cfg['target']} <<< optimizer from config")
logging.info(f"loading >>> {cfg['target']} <<< optimizer from config")
return get_obj_from_str(cfg["target"])(
params, lr=lr, **cfg.get("params", dict())
)
@ -112,7 +114,7 @@ class AutoencodingEngine(AbstractAutoencoder):
self.encoder: torch.nn.Module = instantiate_from_config(encoder_config)
self.decoder: torch.nn.Module = instantiate_from_config(decoder_config)
self.regularization: AbstractRegularizer = instantiate_from_config(
self.regularization = instantiate_from_config(
regularizer_config
)
@ -160,12 +162,19 @@ class AutoencodingEngineLegacy(AutoencodingEngine):
},
**kwargs,
)
self.quant_conv = comfy.ops.disable_weight_init.Conv2d(
if ddconfig.get("conv3d", False):
conv_op = comfy.ops.disable_weight_init.Conv3d
else:
conv_op = comfy.ops.disable_weight_init.Conv2d
self.quant_conv = conv_op(
(1 + ddconfig["double_z"]) * ddconfig["z_channels"],
(1 + ddconfig["double_z"]) * embed_dim,
1,
)
self.post_quant_conv = comfy.ops.disable_weight_init.Conv2d(embed_dim, ddconfig["z_channels"], 1)
self.post_quant_conv = conv_op(embed_dim, ddconfig["z_channels"], 1)
self.embed_dim = embed_dim
def get_autoencoder_params(self) -> list:

View File

@ -157,8 +157,6 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None,
b, _, dim_head = query.shape
dim_head //= heads
scale = dim_head ** -0.5
if skip_reshape:
query = query.reshape(b * heads, -1, dim_head)
value = value.reshape(b * heads, -1, dim_head)
@ -177,9 +175,8 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None,
bytes_per_token = torch.finfo(query.dtype).bits//8
batch_x_heads, q_tokens, _ = query.shape
_, _, k_tokens = key.shape
qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
mem_free_total, _ = model_management.get_free_memory(query.device, True)
kv_chunk_size_min = None
kv_chunk_size = None
@ -230,7 +227,6 @@ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
scale = dim_head ** -0.5
h = heads
if skip_reshape:
q, k, v = map(
lambda t: t.reshape(b * heads, -1, dim_head),
@ -344,12 +340,9 @@ except:
pass
def attention_xformers(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
if skip_reshape:
b, _, _, dim_head = q.shape
else:
b, _, dim_head = q.shape
dim_head //= heads
b = q.shape[0]
dim_head = q.shape[-1]
# check to make sure xformers isn't broken
disabled_xformers = False
if BROKEN_XFORMERS:
@ -364,35 +357,44 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None, skip_resh
return attention_pytorch(q, k, v, heads, mask, skip_reshape=skip_reshape)
if skip_reshape:
q, k, v = map(
lambda t: t.reshape(b * heads, -1, dim_head),
# b h k d -> b k h d
q, k, v = map(
lambda t: t.permute(0, 2, 1, 3),
(q, k, v),
)
# actually do the reshaping
else:
dim_head //= heads
q, k, v = map(
lambda t: t.reshape(b, -1, heads, dim_head),
(q, k, v),
)
if mask is not None:
# add a singleton batch dimension
if mask.ndim == 2:
mask = mask.unsqueeze(0)
# add a singleton heads dimension
if mask.ndim == 3:
mask = mask.unsqueeze(1)
# pad to a multiple of 8
pad = 8 - mask.shape[-1] % 8
mask_out = torch.empty([q.shape[0], q.shape[2], q.shape[1], mask.shape[-1] + pad], dtype=q.dtype, device=q.device)
# the xformers docs says that it's allowed to have a mask of shape (1, Nq, Nk)
# but when using separated heads, the shape has to be (B, H, Nq, Nk)
# in flux, this matrix ends up being over 1GB
# here, we create a mask with the same batch/head size as the input mask (potentially singleton or full)
mask_out = torch.empty([mask.shape[0], mask.shape[1], q.shape[1], mask.shape[-1] + pad], dtype=q.dtype, device=q.device)
mask_out[..., :mask.shape[-1]] = mask
# doesn't this remove the padding again??
mask = mask_out[..., :mask.shape[-1]]
mask = mask.expand(b, heads, -1, -1)
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
if skip_reshape:
out = (
out.unsqueeze(0)
.reshape(b, heads, -1, dim_head)
.permute(0, 2, 1, 3)
.reshape(b, -1, heads * dim_head)
)
else:
out = (
out.reshape(b, -1, heads * dim_head)
)
out = (
out.reshape(b, -1, heads * dim_head)
)
return out
@ -414,15 +416,34 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha
(q, k, v),
)
if SDP_BATCH_LIMIT >= q.shape[0]:
if mask is not None:
# add a batch dimension if there isn't already one
if mask.ndim == 2:
mask = mask.unsqueeze(0)
# add a heads dimension if there isn't already one
if mask.ndim == 3:
mask = mask.unsqueeze(1)
if SDP_BATCH_LIMIT >= b:
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
out = (
out.transpose(1, 2).reshape(b, -1, heads * dim_head)
)
else:
out = torch.empty((q.shape[0], q.shape[2], heads * dim_head), dtype=q.dtype, layout=q.layout, device=q.device)
for i in range(0, q.shape[0], SDP_BATCH_LIMIT):
out[i : i + SDP_BATCH_LIMIT] = torch.nn.functional.scaled_dot_product_attention(q[i : i + SDP_BATCH_LIMIT], k[i : i + SDP_BATCH_LIMIT], v[i : i + SDP_BATCH_LIMIT], attn_mask=mask, dropout_p=0.0, is_causal=False).transpose(1, 2).reshape(-1, q.shape[2], heads * dim_head)
out = torch.empty((b, q.shape[2], heads * dim_head), dtype=q.dtype, layout=q.layout, device=q.device)
for i in range(0, b, SDP_BATCH_LIMIT):
m = mask
if mask is not None:
if mask.shape[0] > 1:
m = mask[i : i + SDP_BATCH_LIMIT]
out[i : i + SDP_BATCH_LIMIT] = torch.nn.functional.scaled_dot_product_attention(
q[i : i + SDP_BATCH_LIMIT],
k[i : i + SDP_BATCH_LIMIT],
v[i : i + SDP_BATCH_LIMIT],
attn_mask=m,
dropout_p=0.0, is_causal=False
).transpose(1, 2).reshape(-1, q.shape[2], heads * dim_head)
return out

View File

@ -1,3 +1,4 @@
from functools import partial
from typing import Dict, Optional, List
import numpy as np
@ -70,45 +71,33 @@ class PatchEmbed(nn.Module):
strict_img_size: bool = True,
dynamic_img_pad: bool = True,
padding_mode='circular',
conv3d=False,
dtype=None,
device=None,
operations=None,
):
super().__init__()
self.patch_size = (patch_size, patch_size)
try:
len(patch_size)
self.patch_size = patch_size
except:
if conv3d:
self.patch_size = (patch_size, patch_size, patch_size)
else:
self.patch_size = (patch_size, patch_size)
self.padding_mode = padding_mode
if img_size is not None:
self.img_size = (img_size, img_size)
self.grid_size = tuple([s // p for s, p in zip(self.img_size, self.patch_size)])
self.num_patches = self.grid_size[0] * self.grid_size[1]
else:
self.img_size = None
self.grid_size = None
self.num_patches = None
# flatten spatial dim and transpose to channels last, kept for bwd compat
self.flatten = flatten
self.strict_img_size = strict_img_size
self.dynamic_img_pad = dynamic_img_pad
self.proj = operations.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias, dtype=dtype, device=device)
if conv3d:
self.proj = operations.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias, dtype=dtype, device=device)
else:
self.proj = operations.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias, dtype=dtype, device=device)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
# B, C, H, W = x.shape
# if self.img_size is not None:
# if self.strict_img_size:
# _assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).")
# _assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).")
# elif not self.dynamic_img_pad:
# _assert(
# H % self.patch_size[0] == 0,
# f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})."
# )
# _assert(
# W % self.patch_size[1] == 0,
# f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})."
# )
if self.dynamic_img_pad:
x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size, padding_mode=self.padding_mode)
x = self.proj(x)

View File

@ -43,51 +43,100 @@ def Normalize(in_channels, num_groups=32):
return ops.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
class VideoConv3d(nn.Module):
def __init__(self, n_channels, out_channels, kernel_size, stride=1, dilation=1, padding_mode='replicate', padding=1, **kwargs):
super().__init__()
self.padding_mode = padding_mode
if padding != 0:
padding = (padding, padding, padding, padding, kernel_size - 1, 0)
else:
kwargs["padding"] = padding
self.padding = padding
self.conv = ops.Conv3d(n_channels, out_channels, kernel_size, stride=stride, dilation=dilation, **kwargs)
def forward(self, x):
if self.padding != 0:
x = torch.nn.functional.pad(x, self.padding, mode=self.padding_mode)
return self.conv(x)
def interpolate_up(x, scale_factor):
try:
return torch.nn.functional.interpolate(x, scale_factor=scale_factor, mode="nearest")
except: #operation not implemented for bf16
orig_shape = list(x.shape)
out_shape = orig_shape[:2]
for i in range(len(orig_shape) - 2):
out_shape.append(round(orig_shape[i + 2] * scale_factor[i]))
out = torch.empty(out_shape, dtype=x.dtype, layout=x.layout, device=x.device)
split = 8
l = out.shape[1] // split
for i in range(0, out.shape[1], l):
out[:,i:i+l] = torch.nn.functional.interpolate(x[:,i:i+l].to(torch.float32), scale_factor=scale_factor, mode="nearest").to(x.dtype)
return out
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
def __init__(self, in_channels, with_conv, conv_op=ops.Conv2d, scale_factor=2.0):
super().__init__()
self.with_conv = with_conv
self.scale_factor = scale_factor
if self.with_conv:
self.conv = ops.Conv2d(in_channels,
self.conv = conv_op(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
try:
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
except: #operation not implemented for bf16
b, c, h, w = x.shape
out = torch.empty((b, c, h*2, w*2), dtype=x.dtype, layout=x.layout, device=x.device)
split = 8
l = out.shape[1] // split
for i in range(0, out.shape[1], l):
out[:,i:i+l] = torch.nn.functional.interpolate(x[:,i:i+l].to(torch.float32), scale_factor=2.0, mode="nearest").to(x.dtype)
del x
x = out
scale_factor = self.scale_factor
if not isinstance(scale_factor, tuple):
scale_factor = (scale_factor,) * (x.ndim - 2)
if x.ndim == 5 and scale_factor[0] > 1.0:
t = x.shape[2]
if t > 1:
a, b = x.split((1, t - 1), dim=2)
del x
b = interpolate_up(b, scale_factor)
else:
a = x
a = interpolate_up(a.squeeze(2), scale_factor=scale_factor[1:]).unsqueeze(2)
if t > 1:
x = torch.cat((a, b), dim=2)
else:
x = a
else:
x = interpolate_up(x, self.scale_factor)
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
def __init__(self, in_channels, with_conv, stride=2, conv_op=ops.Conv2d):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = ops.Conv2d(in_channels,
self.conv = conv_op(in_channels,
in_channels,
kernel_size=3,
stride=2,
stride=stride,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
if x.ndim == 4:
pad = (0, 1, 0, 1)
mode = "constant"
x = torch.nn.functional.pad(x, pad, mode=mode, value=0)
elif x.ndim == 5:
pad = (1, 1, 1, 1, 2, 0)
mode = "replicate"
x = torch.nn.functional.pad(x, pad, mode=mode)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
@ -96,7 +145,7 @@ class Downsample(nn.Module):
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout, temb_channels=512):
dropout, temb_channels=512, conv_op=ops.Conv2d):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
@ -105,7 +154,7 @@ class ResnetBlock(nn.Module):
self.swish = torch.nn.SiLU(inplace=True)
self.norm1 = Normalize(in_channels)
self.conv1 = ops.Conv2d(in_channels,
self.conv1 = conv_op(in_channels,
out_channels,
kernel_size=3,
stride=1,
@ -115,20 +164,20 @@ class ResnetBlock(nn.Module):
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout, inplace=True)
self.conv2 = ops.Conv2d(out_channels,
self.conv2 = conv_op(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = ops.Conv2d(in_channels,
self.conv_shortcut = conv_op(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = ops.Conv2d(in_channels,
self.nin_shortcut = conv_op(in_channels,
out_channels,
kernel_size=1,
stride=1,
@ -162,7 +211,6 @@ def slice_attention(q, k, v):
mem_free_total = model_management.get_free_memory(q.device)
gb = 1024 ** 3
tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
modifier = 3 if q.element_size() == 2 else 2.5
mem_required = tensor_size * modifier
@ -195,21 +243,25 @@ def slice_attention(q, k, v):
def normal_attention(q, k, v):
# compute attention
b,c,h,w = q.shape
orig_shape = q.shape
b = orig_shape[0]
c = orig_shape[1]
q = q.reshape(b,c,h*w)
q = q.permute(0,2,1) # b,hw,c
k = k.reshape(b,c,h*w) # b,c,hw
v = v.reshape(b,c,h*w)
q = q.reshape(b, c, -1)
q = q.permute(0, 2, 1) # b,hw,c
k = k.reshape(b, c, -1) # b,c,hw
v = v.reshape(b, c, -1)
r1 = slice_attention(q, k, v)
h_ = r1.reshape(b,c,h,w)
h_ = r1.reshape(orig_shape)
del r1
return h_
def xformers_attention(q, k, v):
# compute attention
B, C, H, W = q.shape
orig_shape = q.shape
B = orig_shape[0]
C = orig_shape[1]
q, k, v = map(
lambda t: t.view(B, C, -1).transpose(1, 2).contiguous(),
(q, k, v),
@ -217,14 +269,16 @@ def xformers_attention(q, k, v):
try:
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
out = out.transpose(1, 2).reshape(B, C, H, W)
except NotImplementedError as e:
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
out = out.transpose(1, 2).reshape(orig_shape)
except NotImplementedError:
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(orig_shape)
return out
def pytorch_attention(q, k, v):
# compute attention
B, C, H, W = q.shape
orig_shape = q.shape
B = orig_shape[0]
C = orig_shape[1]
q, k, v = map(
lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(),
(q, k, v),
@ -232,35 +286,35 @@ def pytorch_attention(q, k, v):
try:
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
out = out.transpose(2, 3).reshape(B, C, H, W)
except model_management.OOM_EXCEPTION as e:
out = out.transpose(2, 3).reshape(orig_shape)
except model_management.OOM_EXCEPTION:
logging.warning("scaled_dot_product_attention OOMed: switched to slice attention")
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(orig_shape)
return out
class AttnBlock(nn.Module):
def __init__(self, in_channels):
def __init__(self, in_channels, conv_op=ops.Conv2d):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = ops.Conv2d(in_channels,
self.q = conv_op(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = ops.Conv2d(in_channels,
self.k = conv_op(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = ops.Conv2d(in_channels,
self.v = conv_op(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = ops.Conv2d(in_channels,
self.proj_out = conv_op(in_channels,
in_channels,
kernel_size=1,
stride=1,
@ -290,8 +344,8 @@ class AttnBlock(nn.Module):
return x+h_
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
return AttnBlock(in_channels)
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None, conv_op=ops.Conv2d):
return AttnBlock(in_channels, conv_op=conv_op)
class Model(nn.Module):
@ -450,6 +504,7 @@ class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
conv3d=False, time_compress=None,
**ignore_kwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
@ -460,8 +515,15 @@ class Encoder(nn.Module):
self.resolution = resolution
self.in_channels = in_channels
if conv3d:
conv_op = VideoConv3d
mid_attn_conv_op = ops.Conv3d
else:
conv_op = ops.Conv2d
mid_attn_conv_op = ops.Conv2d
# downsampling
self.conv_in = ops.Conv2d(in_channels,
self.conv_in = conv_op(in_channels,
self.ch,
kernel_size=3,
stride=1,
@ -480,15 +542,20 @@ class Encoder(nn.Module):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
dropout=dropout,
conv_op=conv_op))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
attn.append(make_attn(block_in, attn_type=attn_type, conv_op=conv_op))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
stride = 2
if time_compress is not None:
if (self.num_resolutions - 1 - i_level) > math.log2(time_compress):
stride = (1, 2, 2)
down.downsample = Downsample(block_in, resamp_with_conv, stride=stride, conv_op=conv_op)
curr_res = curr_res // 2
self.down.append(down)
@ -497,16 +564,18 @@ class Encoder(nn.Module):
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
dropout=dropout,
conv_op=conv_op)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type, conv_op=mid_attn_conv_op)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
dropout=dropout,
conv_op=conv_op)
# end
self.norm_out = Normalize(block_in)
self.conv_out = ops.Conv2d(block_in,
self.conv_out = conv_op(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
@ -544,9 +613,10 @@ class Decoder(nn.Module):
conv_out_op=ops.Conv2d,
resnet_op=ResnetBlock,
attn_op=AttnBlock,
conv3d=False,
time_compress=None,
**ignorekwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
@ -556,8 +626,15 @@ class Decoder(nn.Module):
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
if conv3d:
conv_op = VideoConv3d
conv_out_op = VideoConv3d
mid_attn_conv_op = ops.Conv3d
else:
conv_op = ops.Conv2d
mid_attn_conv_op = ops.Conv2d
# compute block_in and curr_res at lowest res
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
@ -565,7 +642,7 @@ class Decoder(nn.Module):
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = ops.Conv2d(z_channels,
self.conv_in = conv_op(z_channels,
block_in,
kernel_size=3,
stride=1,
@ -576,12 +653,14 @@ class Decoder(nn.Module):
self.mid.block_1 = resnet_op(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = attn_op(block_in)
dropout=dropout,
conv_op=conv_op)
self.mid.attn_1 = attn_op(block_in, conv_op=mid_attn_conv_op)
self.mid.block_2 = resnet_op(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
dropout=dropout,
conv_op=conv_op)
# upsampling
self.up = nn.ModuleList()
@ -593,15 +672,21 @@ class Decoder(nn.Module):
block.append(resnet_op(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
dropout=dropout,
conv_op=conv_op))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(attn_op(block_in))
attn.append(attn_op(block_in, conv_op=conv_op))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
scale_factor = 2.0
if time_compress is not None:
if i_level > math.log2(time_compress):
scale_factor = (1.0, 2.0, 2.0)
up.upsample = Upsample(block_in, resamp_with_conv, conv_op=conv_op, scale_factor=scale_factor)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order

View File

@ -22,7 +22,6 @@ except ImportError:
from typing import Optional, NamedTuple, List
from typing_extensions import Protocol
from torch import Tensor
from typing import List
from comfy import model_management
@ -172,7 +171,7 @@ def _get_attention_scores_no_kv_chunking(
del attn_scores
except model_management.OOM_EXCEPTION:
logging.warning("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead")
attn_scores -= attn_scores.max(dim=-1, keepdim=True).values
attn_scores -= attn_scores.max(dim=-1, keepdim=True).values # noqa: F821 attn_scores is not defined
torch.exp(attn_scores, out=attn_scores)
summed = torch.sum(attn_scores, dim=-1, keepdim=True)
attn_scores /= summed

View File

@ -194,6 +194,7 @@ def make_time_attn(
attn_kwargs=None,
alpha: float = 0,
merge_strategy: str = "learned",
conv_op=ops.Conv2d,
):
return partialclass(
AttnVideoBlock, in_channels, alpha=alpha, merge_strategy=merge_strategy

View File

@ -133,7 +133,6 @@ class AdamWwithEMAandWings(optim.Optimizer):
exp_avgs = []
exp_avg_sqs = []
ema_params_with_grad = []
state_sums = []
max_exp_avg_sqs = []
state_steps = []
amsgrad = group['amsgrad']

View File

@ -31,6 +31,7 @@ import comfy.ldm.audio.dit
import comfy.ldm.audio.embedders
import comfy.ldm.flux.model
import comfy.ldm.lightricks.model
import comfy.ldm.hunyuan_video.model
import comfy.model_management
import comfy.patcher_extension
@ -427,7 +428,6 @@ class SVD_img2vid(BaseModel):
latent_image = kwargs.get("concat_latent_image", None)
noise = kwargs.get("noise", None)
device = kwargs["device"]
if latent_image is None:
latent_image = torch.zeros_like(noise)
@ -687,6 +687,7 @@ class StableAudio1(BaseModel):
sd["{}{}".format(k, l)] = s[l]
return sd
class HunyuanDiT(BaseModel):
def __init__(self, model_config, model_type=ModelType.V_PREDICTION, device=None):
super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hydit.models.HunYuanDiT)
@ -711,8 +712,6 @@ class HunyuanDiT(BaseModel):
width = kwargs.get("width", 768)
height = kwargs.get("height", 768)
crop_w = kwargs.get("crop_w", 0)
crop_h = kwargs.get("crop_h", 0)
target_width = kwargs.get("target_width", width)
target_height = kwargs.get("target_height", height)
@ -769,6 +768,16 @@ class Flux(BaseModel):
cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None:
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
# upscale the attention mask, since now we
attention_mask = kwargs.get("attention_mask", None)
if attention_mask is not None:
shape = kwargs["noise"].shape
mask_ref_size = kwargs["attention_mask_img_shape"]
# the model will pad to the patch size, and then divide
# essentially dividing and rounding up
(h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size))
attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok))
out['attention_mask'] = comfy.conds.CONDRegular(attention_mask)
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 3.5)]))
return out
@ -810,3 +819,21 @@ class LTXV(BaseModel):
out['frame_rate'] = comfy.conds.CONDConstant(kwargs.get("frame_rate", 25))
return out
class HunyuanVideo(BaseModel):
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hunyuan_video.model.HunyuanVideo)
def encode_adm(self, **kwargs):
return kwargs["pooled_output"]
def extra_conds(self, **kwargs):
out = super().extra_conds(**kwargs)
attention_mask = kwargs.get("attention_mask", None)
if attention_mask is not None:
out['attention_mask'] = comfy.conds.CONDRegular(attention_mask)
cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None:
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 6.0)]))
return out

View File

@ -133,6 +133,26 @@ def detect_unet_config(state_dict, key_prefix):
unet_config["image_model"] = "hydit1"
return unet_config
if '{}txt_in.individual_token_refiner.blocks.0.norm1.weight'.format(key_prefix) in state_dict_keys: #Hunyuan Video
dit_config = {}
dit_config["image_model"] = "hunyuan_video"
dit_config["in_channels"] = 16
dit_config["patch_size"] = [1, 2, 2]
dit_config["out_channels"] = 16
dit_config["vec_in_dim"] = 768
dit_config["context_in_dim"] = 4096
dit_config["hidden_size"] = 3072
dit_config["mlp_ratio"] = 4.0
dit_config["num_heads"] = 24
dit_config["depth"] = count_blocks(state_dict_keys, '{}double_blocks.'.format(key_prefix) + '{}.')
dit_config["depth_single_blocks"] = count_blocks(state_dict_keys, '{}single_blocks.'.format(key_prefix) + '{}.')
dit_config["axes_dim"] = [16, 56, 56]
dit_config["theta"] = 256
dit_config["qkv_bias"] = True
guidance_keys = list(filter(lambda a: a.startswith("{}guidance_in.".format(key_prefix)), state_dict_keys))
dit_config["guidance_embed"] = len(guidance_keys) > 0
return dit_config
if '{}double_blocks.0.img_attn.norm.key_norm.scale'.format(key_prefix) in state_dict_keys: #Flux
dit_config = {}
dit_config["image_model"] = "flux"
@ -216,7 +236,6 @@ def detect_unet_config(state_dict, key_prefix):
num_res_blocks = []
channel_mult = []
attention_resolutions = []
transformer_depth = []
transformer_depth_output = []
context_dim = None
@ -388,7 +407,6 @@ def convert_config(unet_config):
t_out += [d] * (res + 1)
s *= 2
transformer_depth = t_in
transformer_depth_output = t_out
new_config["transformer_depth"] = t_in
new_config["transformer_depth_output"] = t_out
new_config["transformer_depth_middle"] = transformer_depth_middle

View File

@ -512,7 +512,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
if vram_set_state == VRAMState.NO_VRAM:
lowvram_model_memory = 64 * 1024 * 1024
cur_loaded_model = loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
current_loaded_models.insert(0, loaded_model)
return
@ -581,7 +581,7 @@ def unet_offload_device():
def unet_inital_load_device(parameters, dtype):
torch_dev = get_torch_device()
if vram_state == VRAMState.HIGH_VRAM:
if vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.SHARED:
return torch_dev
cpu_dev = torch.device("cpu")
@ -695,7 +695,7 @@ def text_encoder_initial_device(load_device, offload_device, model_size=0):
return offload_device
if is_device_mps(load_device):
return offload_device
return load_device
mem_l = get_free_memory(load_device)
mem_o = get_free_memory(offload_device)

View File

@ -113,7 +113,7 @@ class WrapperExecutor:
def _create_next_executor(self) -> 'WrapperExecutor':
new_idx = self.idx + 1
if new_idx > len(self.wrappers):
raise Exception(f"Wrapper idx exceeded available wrappers; something went very wrong.")
raise Exception("Wrapper idx exceeded available wrappers; something went very wrong.")
if self.class_obj is None:
return WrapperExecutor.new_executor(self.original, self.wrappers, new_idx)
return WrapperExecutor.new_class_executor(self.original, self.class_obj, self.wrappers, new_idx)

View File

@ -103,7 +103,6 @@ def cleanup_additional_models(models):
def prepare_sampling(model: 'ModelPatcher', noise_shape, conds):
device = model.load_device
real_model: 'BaseModel' = None
models, inference_memory = get_additional_models(conds, model.model_dtype())
models += model.get_nested_additional_models() # TODO: does this require inference_memory update?

View File

@ -130,11 +130,6 @@ def can_concat_cond(c1, c2):
return cond_equal_size(c1.conditioning, c2.conditioning)
def cond_cat(c_list):
c_crossattn = []
c_concat = []
c_adm = []
crossattn_max_len = 0
temp = {}
for x in c_list:
for k in x:
@ -608,8 +603,6 @@ def pre_run_control(model, conds):
for t in range(len(conds)):
x = conds[t]
timestep_start = None
timestep_end = None
percent_to_timestep_function = lambda a: s.percent_to_sigma(a)
if 'control' in x:
x['control'].pre_run(model, percent_to_timestep_function)

View File

@ -31,6 +31,7 @@ import comfy.text_encoders.flux
import comfy.text_encoders.long_clipl
import comfy.text_encoders.genmo
import comfy.text_encoders.lt
import comfy.text_encoders.hunyuan_video
import comfy.model_patcher
import comfy.lora
@ -306,12 +307,23 @@ class VAE:
self.upscale_ratio = 4
self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.weight"].shape[1]
if 'quant_conv.weight' in sd:
self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4)
if 'post_quant_conv.weight' in sd:
self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=sd['post_quant_conv.weight'].shape[1])
else:
self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"},
encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': ddconfig},
decoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Decoder", 'params': ddconfig})
elif "decoder.conv_in.conv.weight" in sd:
ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
ddconfig["conv3d"] = True
ddconfig["time_compress"] = 4
self.upscale_ratio = (lambda a: max(0, a * 4 - 3), 8, 8)
self.latent_dim = 3
self.latent_channels = ddconfig['z_channels'] = sd["decoder.conv_in.conv.weight"].shape[1]
self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=sd['post_quant_conv.weight'].shape[1])
self.memory_used_decode = lambda shape, dtype: (1500 * shape[2] * shape[3] * shape[4] * (4 * 8 * 8)) * model_management.dtype_size(dtype)
self.memory_used_encode = lambda shape, dtype: (900 * max(shape[2], 2) * shape[3] * shape[4]) * model_management.dtype_size(dtype)
elif "decoder.layers.1.layers.0.beta" in sd:
self.first_stage_model = AudioOobleckVAE()
self.memory_used_encode = lambda shape, dtype: (1000 * shape[2]) * model_management.dtype_size(dtype)
@ -435,7 +447,7 @@ class VAE:
if pixel_samples is None:
pixel_samples = torch.empty((samples_in.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
pixel_samples[x:x+batch_number] = out
except model_management.OOM_EXCEPTION as e:
except model_management.OOM_EXCEPTION:
logging.warning("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
dims = samples_in.ndim - 2
if dims == 1:
@ -490,7 +502,7 @@ class VAE:
samples = torch.empty((pixel_samples.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
samples[x:x + batch_number] = out
except model_management.OOM_EXCEPTION as e:
except model_management.OOM_EXCEPTION:
logging.warning("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
if len(pixel_samples.shape) == 3:
samples = self.encode_tiled_1d(pixel_samples)
@ -544,6 +556,7 @@ class CLIPType(Enum):
FLUX = 6
MOCHI = 7
LTXV = 8
HUNYUAN_VIDEO = 9
def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
clip_data = []
@ -559,6 +572,7 @@ class TEModel(Enum):
T5_XXL = 4
T5_XL = 5
T5_BASE = 6
LLAMA3_8 = 7
def detect_te_model(sd):
if "text_model.encoder.layers.30.mlp.fc1.weight" in sd:
@ -575,6 +589,8 @@ def detect_te_model(sd):
return TEModel.T5_XL
if "encoder.block.0.layer.0.SelfAttention.k.weight" in sd:
return TEModel.T5_BASE
if "model.layers.0.post_attention_layernorm.weight" in sd:
return TEModel.LLAMA3_8
return None
@ -652,6 +668,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
elif clip_type == CLIPType.FLUX:
clip_target.clip = comfy.text_encoders.flux.flux_clip(**t5xxl_detect(clip_data))
clip_target.tokenizer = comfy.text_encoders.flux.FluxTokenizer
elif clip_type == CLIPType.HUNYUAN_VIDEO:
clip_target.clip = comfy.text_encoders.hunyuan_video.hunyuan_video_clip() #TODO
clip_target.tokenizer = comfy.text_encoders.hunyuan_video.HunyuanVideoTokenizer
else:
clip_target.clip = sdxl_clip.SDXLClipModel
clip_target.tokenizer = sdxl_clip.SDXLTokenizer
@ -691,7 +710,6 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl
config = yaml.safe_load(stream)
model_config_params = config['model']['params']
clip_config = model_config_params['cond_stage_config']
scale_factor = model_config_params['scale_factor']
if "parameterization" in model_config_params:
if model_config_params["parameterization"] == "v":

View File

@ -336,7 +336,6 @@ def expand_directory_list(directories):
return list(dirs)
def bundled_embed(embed, prefix, suffix): #bundled embedding in lora format
i = 0
out_list = []
for k in embed:
if k.startswith(prefix) and k.endswith(suffix):
@ -392,7 +391,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
embed_out = safe_load_embed_zip(embed_path)
else:
embed = torch.load(embed_path, map_location="cpu")
except Exception as e:
except Exception:
logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name))
return None
@ -421,7 +420,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
return embed_out
class SDTokenizer:
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, tokenizer_data={}):
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data={}):
if tokenizer_path is None:
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path)
@ -434,11 +433,16 @@ class SDTokenizer:
self.tokens_start = 1
self.start_token = empty[0]
if has_end_token:
self.end_token = empty[1]
if end_token is not None:
self.end_token = end_token
else:
self.end_token = empty[1]
else:
self.tokens_start = 0
self.start_token = None
if has_end_token:
if end_token is not None:
self.end_token = end_token
else:
self.end_token = empty[0]
if pad_token is not None:

View File

@ -12,6 +12,7 @@ import comfy.text_encoders.hydit
import comfy.text_encoders.flux
import comfy.text_encoders.genmo
import comfy.text_encoders.lt
import comfy.text_encoders.hunyuan_video
from . import supported_models_base
from . import latent_formats
@ -224,7 +225,6 @@ class SDXL(supported_models_base.BASE):
def process_clip_state_dict_for_saving(self, state_dict):
replace_prefix = {}
keys_to_replace = {}
state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g")
for k in state_dict:
if k.startswith("clip_l"):
@ -527,7 +527,6 @@ class SD3(supported_models_base.BASE):
clip_l = False
clip_g = False
t5 = False
dtype_t5 = None
pref = self.text_encoder_key_prefix[0]
if "{}clip_l.transformer.text_model.final_layer_norm.weight".format(pref) in state_dict:
clip_l = True
@ -740,6 +739,54 @@ class LTXV(supported_models_base.BASE):
t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
return supported_models_base.ClipTarget(comfy.text_encoders.lt.LTXVT5Tokenizer, comfy.text_encoders.lt.ltxv_te(**t5_detect))
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV]
class HunyuanVideo(supported_models_base.BASE):
unet_config = {
"image_model": "hunyuan_video",
}
sampling_settings = {
"shift": 7.0,
}
unet_extra_config = {}
latent_format = latent_formats.HunyuanVideo
memory_usage_factor = 2.0 #TODO
supported_inference_dtypes = [torch.bfloat16, torch.float16, torch.float32]
vae_key_prefix = ["vae."]
text_encoder_key_prefix = ["text_encoders."]
def get_model(self, state_dict, prefix="", device=None):
out = model_base.HunyuanVideo(self, device=device)
return out
def process_unet_state_dict(self, state_dict):
out_sd = {}
for k in list(state_dict.keys()):
key_out = k
key_out = key_out.replace("txt_in.t_embedder.mlp.0.", "txt_in.t_embedder.in_layer.").replace("txt_in.t_embedder.mlp.2.", "txt_in.t_embedder.out_layer.")
key_out = key_out.replace("txt_in.c_embedder.linear_1.", "txt_in.c_embedder.in_layer.").replace("txt_in.c_embedder.linear_2.", "txt_in.c_embedder.out_layer.")
key_out = key_out.replace("_mod.linear.", "_mod.lin.").replace("_attn_qkv.", "_attn.qkv.")
key_out = key_out.replace("mlp.fc1.", "mlp.0.").replace("mlp.fc2.", "mlp.2.")
key_out = key_out.replace("_attn_q_norm.weight", "_attn.norm.query_norm.scale").replace("_attn_k_norm.weight", "_attn.norm.key_norm.scale")
key_out = key_out.replace(".q_norm.weight", ".norm.query_norm.scale").replace(".k_norm.weight", ".norm.key_norm.scale")
key_out = key_out.replace("_attn_proj.", "_attn.proj.")
key_out = key_out.replace(".modulation.linear.", ".modulation.lin.")
key_out = key_out.replace("_in.mlp.2.", "_in.out_layer.").replace("_in.mlp.0.", "_in.in_layer.")
out_sd[key_out] = state_dict[k]
return out_sd
def process_unet_state_dict_for_saving(self, state_dict):
replace_prefix = {"": "model.model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
def clip_target(self, state_dict={}):
# pref = self.text_encoder_key_prefix[0]
# t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
return supported_models_base.ClipTarget(comfy.text_encoders.hunyuan_video.HunyuanVideoTokenizer, comfy.text_encoders.hunyuan_video.hunyuan_video_clip()) #TODO
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo]
models += [SVD_img2vid]

View File

@ -0,0 +1,98 @@
from comfy import sd1_clip
import comfy.model_management
import comfy.text_encoders.llama
from transformers import LlamaTokenizerFast
import torch
import os
class LLAMA3Tokenizer(sd1_clip.SDTokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}, min_length=256):
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "llama_tokenizer")
super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='llama', tokenizer_class=LlamaTokenizerFast, has_start_token=True, has_end_token=True, pad_to_max_length=False, max_length=99999999, pad_token=128258, end_token=128009, min_length=min_length)
class LLAMAModel(sd1_clip.SDClipModel):
def __init__(self, device="cpu", layer="hidden", layer_idx=-3, dtype=None, attention_mask=True, model_options={}):
llama_scaled_fp8 = model_options.get("llama_scaled_fp8", None)
if llama_scaled_fp8 is not None:
model_options = model_options.copy()
model_options["scaled_fp8"] = llama_scaled_fp8
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 128000, "pad": 128258}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Llama2, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
class HunyuanVideoTokenizer:
def __init__(self, embedding_directory=None, tokenizer_data={}):
clip_l_tokenizer_class = tokenizer_data.get("clip_l_tokenizer_class", sd1_clip.SDTokenizer)
self.clip_l = clip_l_tokenizer_class(embedding_directory=embedding_directory)
self.llama_template = """<|start_header_id|>system<|end_header_id|>
Describe the video by detailing the following aspects: 1. The main content and theme of the video.2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects.3. Actions, events, behaviors temporal relationships, physical movement changes of the objects.4. background environment, light, style and atmosphere.5. camera angles, movements, and transitions used in the video:<|eot_id|><|start_header_id|>user<|end_header_id|>""" # 93 tokens
self.llama = LLAMA3Tokenizer(embedding_directory=embedding_directory, min_length=1)
def tokenize_with_weights(self, text:str, return_word_ids=False):
out = {}
out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids)
llama_text = "{}{}".format(self.llama_template, text)
out["llama"] = self.llama.tokenize_with_weights(llama_text, return_word_ids)
return out
def untokenize(self, token_weight_pair):
return self.clip_l.untokenize(token_weight_pair)
def state_dict(self):
return {}
class HunyuanVideoClipModel(torch.nn.Module):
def __init__(self, dtype_llama=None, device="cpu", dtype=None, model_options={}):
super().__init__()
dtype_llama = comfy.model_management.pick_weight_dtype(dtype_llama, dtype, device)
clip_l_class = model_options.get("clip_l_class", sd1_clip.SDClipModel)
self.clip_l = clip_l_class(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options)
self.llama = LLAMAModel(device=device, dtype=dtype_llama, model_options=model_options)
self.dtypes = set([dtype, dtype_llama])
def set_clip_options(self, options):
self.clip_l.set_clip_options(options)
self.llama.set_clip_options(options)
def reset_clip_options(self):
self.clip_l.reset_clip_options()
self.llama.reset_clip_options()
def encode_token_weights(self, token_weight_pairs):
token_weight_pairs_l = token_weight_pairs["l"]
token_weight_pairs_llama = token_weight_pairs["llama"]
llama_out, llama_pooled, llama_extra_out = self.llama.encode_token_weights(token_weight_pairs_llama)
template_end = 0
for i, v in enumerate(token_weight_pairs_llama[0]):
if v[0] == 128007: # <|end_header_id|>
template_end = i
llama_out = llama_out[:, template_end:]
llama_extra_out["attention_mask"] = llama_extra_out["attention_mask"][:, template_end:]
if llama_extra_out["attention_mask"].sum() == torch.numel(llama_extra_out["attention_mask"]):
llama_extra_out.pop("attention_mask") # attention mask is useless if no masked elements
l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs_l)
return llama_out, l_pooled, llama_extra_out
def load_sd(self, sd):
if "text_model.encoder.layers.1.mlp.fc1.weight" in sd:
return self.clip_l.load_sd(sd)
else:
return self.llama.load_sd(sd)
def hunyuan_video_clip(dtype_llama=None, llama_scaled_fp8=None):
class HunyuanVideoClipModel_(HunyuanVideoClipModel):
def __init__(self, device="cpu", dtype=None, model_options={}):
if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options:
model_options = model_options.copy()
model_options["llama_scaled_fp8"] = llama_scaled_fp8
super().__init__(dtype_llama=dtype_llama, device=device, dtype=dtype, model_options=model_options)
return HunyuanVideoClipModel_

View File

@ -0,0 +1,221 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Optional, Any
from comfy.ldm.modules.attention import optimized_attention
import comfy.model_management
import comfy.ldm.common_dit
import comfy.model_management
@dataclass
class Llama2Config:
vocab_size: int = 128320
hidden_size: int = 4096
intermediate_size: int = 14336
num_hidden_layers: int = 32
num_attention_heads: int = 32
num_key_value_heads: int = 8
max_position_embeddings: int = 8192
rms_norm_eps: float = 1e-5
rope_theta: float = 500000.0
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-5, device=None, dtype=None):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.empty(dim, device=device, dtype=dtype))
def forward(self, x: torch.Tensor):
return comfy.ldm.common_dit.rms_norm(x, self.weight, self.eps)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def precompute_freqs_cis(head_dim, seq_len, theta, device=None):
theta_numerator = torch.arange(0, head_dim, 2, device=device).float()
inv_freq = 1.0 / (theta ** (theta_numerator / head_dim))
position_ids = torch.arange(0, seq_len, device=device).unsqueeze(0)
inv_freq_expanded = inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos()
sin = emb.sin()
return (cos, sin)
def apply_rope(xq, xk, freqs_cis):
cos = freqs_cis[0].unsqueeze(1)
sin = freqs_cis[1].unsqueeze(1)
q_embed = (xq * cos) + (rotate_half(xq) * sin)
k_embed = (xk * cos) + (rotate_half(xk) * sin)
return q_embed, k_embed
class Attention(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
super().__init__()
self.num_heads = config.num_attention_heads
self.num_kv_heads = config.num_key_value_heads
self.hidden_size = config.hidden_size
self.head_dim = self.hidden_size // self.num_heads
ops = ops or nn
self.q_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype)
self.k_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
self.o_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
):
batch_size, seq_length, _ = hidden_states.shape
xq = self.q_proj(hidden_states)
xk = self.k_proj(hidden_states)
xv = self.v_proj(hidden_states)
xq = xq.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
xk = xk.view(batch_size, seq_length, self.num_kv_heads, self.head_dim).transpose(1, 2)
xv = xv.view(batch_size, seq_length, self.num_kv_heads, self.head_dim).transpose(1, 2)
xq, xk = apply_rope(xq, xk, freqs_cis=freqs_cis)
xk = xk.repeat_interleave(self.num_heads // self.num_kv_heads, dim=1)
xv = xv.repeat_interleave(self.num_heads // self.num_kv_heads, dim=1)
output = optimized_attention(xq, xk, xv, self.num_heads, mask=attention_mask, skip_reshape=True)
return self.o_proj(output)
class MLP(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
super().__init__()
ops = ops or nn
self.gate_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
self.up_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
self.down_proj = ops.Linear(config.intermediate_size, config.hidden_size, bias=False, device=device, dtype=dtype)
def forward(self, x):
return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x))
class TransformerBlock(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
super().__init__()
self.self_attn = Attention(config, device=device, dtype=dtype, ops=ops)
self.mlp = MLP(config, device=device, dtype=dtype, ops=ops)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, device=device, dtype=dtype)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, device=device, dtype=dtype)
def forward(
self,
x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
):
# Self Attention
residual = x
x = self.input_layernorm(x)
x = self.self_attn(
hidden_states=x,
attention_mask=attention_mask,
freqs_cis=freqs_cis,
)
x = residual + x
# MLP
residual = x
x = self.post_attention_layernorm(x)
x = self.mlp(x)
x = residual + x
return x
class Llama2_(nn.Module):
def __init__(self, config, device=None, dtype=None, ops=None):
super().__init__()
self.config = config
self.vocab_size = config.vocab_size
self.embed_tokens = ops.Embedding(
config.vocab_size,
config.hidden_size,
device=device,
dtype=dtype
)
self.layers = nn.ModuleList([
TransformerBlock(config, device=device, dtype=dtype, ops=ops)
for _ in range(config.num_hidden_layers)
])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, device=device, dtype=dtype)
# self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype)
def forward(self, x, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None):
x = self.embed_tokens(x, out_dtype=dtype)
freqs_cis = precompute_freqs_cis(self.config.hidden_size // self.config.num_attention_heads,
x.shape[1],
self.config.rope_theta,
device=x.device)
mask = None
if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
if mask is not None:
mask += causal_mask
else:
mask = causal_mask
intermediate = None
if intermediate_output is not None:
if intermediate_output < 0:
intermediate_output = len(self.layers) + intermediate_output
for i, layer in enumerate(self.layers):
x = layer(
x=x,
attention_mask=mask,
freqs_cis=freqs_cis,
)
if i == intermediate_output:
intermediate = x.clone()
x = self.norm(x)
if intermediate is not None and final_layer_norm_intermediate:
intermediate = self.norm(intermediate)
return x, intermediate
class Llama2(torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
config = Llama2Config(**config_dict)
self.num_layers = config.num_hidden_layers
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, embeddings):
self.model.embed_tokens = embeddings
def forward(self, input_ids, *args, **kwargs):
return self.model(input_ids, *args, **kwargs)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -172,7 +172,6 @@ class T5LayerSelfAttention(torch.nn.Module):
# self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, x, mask=None, past_bias=None, optimized_attention=None):
normed_hidden_states = self.layer_norm(x)
output, past_bias = self.SelfAttention(self.layer_norm(x), mask=mask, past_bias=past_bias, optimized_attention=optimized_attention)
# x = x + self.dropout(attention_output)
x += output

View File

@ -26,6 +26,8 @@ import numpy as np
from PIL import Image
import logging
import itertools
from torch.nn.functional import interpolate
from einops import rearrange
def load_torch_file(ckpt, safe_load=False, device=None):
if device is None:
@ -873,5 +875,46 @@ def reshape_mask(input_mask, output_shape):
mask = torch.nn.functional.interpolate(input_mask, size=output_shape[2:], mode=scale_mode)
if mask.shape[1] < output_shape[1]:
mask = mask.repeat((1, output_shape[1]) + (1,) * dims)[:,:output_shape[1]]
mask = comfy.utils.repeat_to_batch_size(mask, output_shape[0])
mask = repeat_to_batch_size(mask, output_shape[0])
return mask
def upscale_dit_mask(mask: torch.Tensor, img_size_in, img_size_out):
hi, wi = img_size_in
ho, wo = img_size_out
# if it's already the correct size, no need to do anything
if (hi, wi) == (ho, wo):
return mask
if mask.ndim == 2:
mask = mask.unsqueeze(0)
if mask.ndim != 3:
raise ValueError(f"Got a mask of shape {list(mask.shape)}, expected [b, q, k] or [q, k]")
txt_tokens = mask.shape[1] - (hi * wi)
# quadrants of the mask
txt_to_txt = mask[:, :txt_tokens, :txt_tokens]
txt_to_img = mask[:, :txt_tokens, txt_tokens:]
img_to_img = mask[:, txt_tokens:, txt_tokens:]
img_to_txt = mask[:, txt_tokens:, :txt_tokens]
# convert to 1d x 2d, interpolate, then back to 1d x 1d
txt_to_img = rearrange (txt_to_img, "b t (h w) -> b t h w", h=hi, w=wi)
txt_to_img = interpolate(txt_to_img, size=img_size_out, mode="bilinear")
txt_to_img = rearrange (txt_to_img, "b t h w -> b t (h w)")
# this one is hard because we have to do it twice
# convert to 1d x 2d, interpolate, then to 2d x 1d, interpolate, then 1d x 1d
img_to_img = rearrange (img_to_img, "b hw (h w) -> b hw h w", h=hi, w=wi)
img_to_img = interpolate(img_to_img, size=img_size_out, mode="bilinear")
img_to_img = rearrange (img_to_img, "b (hk wk) hq wq -> b (hq wq) hk wk", hk=hi, wk=wi)
img_to_img = interpolate(img_to_img, size=img_size_out, mode="bilinear")
img_to_img = rearrange (img_to_img, "b (hq wq) hk wk -> b (hk wk) (hq wq)", hq=ho, wq=wo)
# convert to 2d x 1d, interpolate, then back to 1d x 1d
img_to_txt = rearrange (img_to_txt, "b (h w) t -> b t h w", h=hi, w=wi)
img_to_txt = interpolate(img_to_txt, size=img_size_out, mode="bilinear")
img_to_txt = rearrange (img_to_txt, "b t h w -> b (h w) t")
# reassemble the mask from blocks
out = torch.cat([
torch.cat([txt_to_txt, txt_to_img], dim=2),
torch.cat([img_to_txt, img_to_img], dim=2)],
dim=1
)
return out

View File

@ -22,14 +22,15 @@ class CLIPTextEncodeSDXL:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"clip": ("CLIP", ),
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
"crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
"target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"text_g": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
"text_l": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
"text_g": ("STRING", {"multiline": True, "dynamicPrompts": True}),
"text_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "encode"

View File

@ -1,3 +1,8 @@
import nodes
import torch
import comfy.model_management
class CLIPTextEncodeHunyuanDiT:
@classmethod
def INPUT_TYPES(s):
@ -17,7 +22,23 @@ class CLIPTextEncodeHunyuanDiT:
return (clip.encode_from_tokens_scheduled(tokens), )
class EmptyHunyuanLatentVideo:
@classmethod
def INPUT_TYPES(s):
return {"required": { "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
"height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
"length": ("INT", {"default": 25, "min": 1, "max": nodes.MAX_RESOLUTION, "step": 4}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "generate"
CATEGORY = "latent/video"
def generate(self, width, height, length, batch_size=1):
latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
return ({"samples":latent}, )
NODE_CLASS_MAPPINGS = {
"CLIPTextEncodeHunyuanDiT": CLIPTextEncodeHunyuanDiT,
"EmptyHunyuanLatentVideo": EmptyHunyuanLatentVideo,
}

View File

@ -35,8 +35,6 @@ class HyperTile:
CATEGORY = "model_patches/unet"
def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
model_channels = model.model.model_config.unet_config["model_channels"]
latent_tile_size = max(32, tile_size) // 8
self.temp = None

View File

@ -0,0 +1,95 @@
import nodes
import folder_paths
import os
def normalize_path(path):
return path.replace('\\', '/')
class Load3D():
@classmethod
def INPUT_TYPES(s):
input_dir = os.path.join(folder_paths.get_input_directory(), "3d")
os.makedirs(input_dir, exist_ok=True)
files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.obj', '.mtl', '.fbx', '.stl'))]
return {"required": {
"model_file": (sorted(files), {"file_upload": True}),
"image": ("LOAD_3D", {}),
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"show_grid": ([True, False],),
"camera_type": (["perspective", "orthographic"],),
"view": (["front", "right", "top", "isometric"],),
"material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
}}
RETURN_TYPES = ("IMAGE", "MASK", "STRING")
RETURN_NAMES = ("image", "mask", "mesh_path")
FUNCTION = "process"
CATEGORY = "3d"
def process(self, model_file, image, **kwargs):
imagepath = folder_paths.get_annotated_filepath(image)
load_image_node = nodes.LoadImage()
output_image, output_mask = load_image_node.load_image(image=imagepath)
return output_image, output_mask, model_file,
class Load3DAnimation():
@classmethod
def INPUT_TYPES(s):
input_dir = os.path.join(folder_paths.get_input_directory(), "3d")
os.makedirs(input_dir, exist_ok=True)
files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.fbx'))]
return {"required": {
"model_file": (sorted(files), {"file_upload": True}),
"image": ("LOAD_3D_ANIMATION", {}),
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"show_grid": ([True, False],),
"camera_type": (["perspective", "orthographic"],),
"view": (["front", "right", "top", "isometric"],),
"material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"animation_speed": (["0.1", "0.5", "1", "1.5", "2"], {"default": "1"}),
}}
RETURN_TYPES = ("IMAGE", "MASK", "STRING")
RETURN_NAMES = ("image", "mask", "mesh_path")
FUNCTION = "process"
CATEGORY = "3d"
def process(self, model_file, image, **kwargs):
imagepath = folder_paths.get_annotated_filepath(image)
load_image_node = nodes.LoadImage()
output_image, output_mask = load_image_node.load_image(image=imagepath)
return output_image, output_mask, model_file,
NODE_CLASS_MAPPINGS = {
"Load3D": Load3D,
"Load3DAnimation": Load3DAnimation
}
NODE_DISPLAY_NAME_MAPPINGS = {
"Load3D": "Load 3D",
"Load3DAnimation": "Load 3D - Animation"
}

View File

@ -240,7 +240,6 @@ class ModelSamplingContinuousV:
def patch(self, model, sampling, sigma_max, sigma_min):
m = model.clone()
latent_format = None
sigma_data = 1.0
if sampling == "v_prediction":
sampling_type = comfy.model_sampling.V_PREDICTION

View File

@ -16,6 +16,7 @@ VISION_CONFIG_DICT = {
"patch_size": 14,
"projection_dim": 768,
"hidden_act": "quick_gelu",
"model_type": "clip_vision_model",
}
class MLP(nn.Module):

View File

@ -144,11 +144,16 @@ def _map_node_over_list(obj, input_data_all, func, allow_interrupt=False, execut
return {k: v[i if len(v) > i else -1] for k, v in d.items()}
results = []
def process_inputs(inputs, index=None):
def process_inputs(inputs, index=None, input_is_list=False):
if allow_interrupt:
nodes.before_node_execution()
execution_block = None
for k, v in inputs.items():
if input_is_list:
for e in v:
if isinstance(e, ExecutionBlocker):
v = e
break
if isinstance(v, ExecutionBlocker):
execution_block = execution_block_cb(v) if execution_block_cb else v
break
@ -160,7 +165,7 @@ def _map_node_over_list(obj, input_data_all, func, allow_interrupt=False, execut
results.append(execution_block)
if input_is_list:
process_inputs(input_data_all, 0)
process_inputs(input_data_all, 0, input_is_list=input_is_list)
elif max_len_input == 0:
process_inputs({})
else:
@ -760,7 +765,7 @@ def validate_prompt(prompt):
if 'class_type' not in prompt[x]:
error = {
"type": "invalid_prompt",
"message": f"Cannot execute because a node is missing the class_type property.",
"message": "Cannot execute because a node is missing the class_type property.",
"details": f"Node ID '#{x}'",
"extra_info": {}
}

View File

@ -22,7 +22,7 @@ def fix_pytorch_libomp():
if b"libomp140.x86_64.dll" not in contents:
break
try:
mydll = ctypes.cdll.LoadLibrary(test_file)
except FileNotFoundError as e:
ctypes.cdll.LoadLibrary(test_file)
except FileNotFoundError:
logging.warning("Detected pytorch version with libomp issue, patching.")
shutil.copyfile(os.path.join(lib_folder, "libiomp5md.dll"), dest)

View File

@ -200,10 +200,17 @@ def add_model_folder_path(folder_name: str, full_folder_path: str, is_default: b
global folder_names_and_paths
folder_name = map_legacy(folder_name)
if folder_name in folder_names_and_paths:
if is_default:
folder_names_and_paths[folder_name][0].insert(0, full_folder_path)
paths, _exts = folder_names_and_paths[folder_name]
if full_folder_path in paths:
if is_default and paths[0] != full_folder_path:
# If the path to the folder is not the first in the list, move it to the beginning.
paths.remove(full_folder_path)
paths.insert(0, full_folder_path)
else:
folder_names_and_paths[folder_name][0].append(full_folder_path)
if is_default:
paths.insert(0, full_folder_path)
else:
paths.append(full_folder_path)
else:
folder_names_and_paths[folder_name] = ([full_folder_path], set())

72
main.py
View File

@ -7,6 +7,9 @@ import folder_paths
import time
from comfy.cli_args import args
from app.logger import setup_logger
import itertools
import utils.extra_config
import logging
if __name__ == "__main__":
#NOTE: These do not do anything on core ComfyUI which should already have no communication with the internet, they are for custom nodes.
@ -16,6 +19,40 @@ if __name__ == "__main__":
setup_logger(log_level=args.verbose)
def apply_custom_paths():
# extra model paths
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
utils.extra_config.load_extra_path_config(extra_model_paths_config_path)
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):
utils.extra_config.load_extra_path_config(config_path)
# --output-directory, --input-directory, --user-directory
if args.output_directory:
output_dir = os.path.abspath(args.output_directory)
logging.info(f"Setting output directory to: {output_dir}")
folder_paths.set_output_directory(output_dir)
# These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
folder_paths.add_model_folder_path("diffusion_models",
os.path.join(folder_paths.get_output_directory(), "diffusion_models"))
folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras"))
if args.input_directory:
input_dir = os.path.abspath(args.input_directory)
logging.info(f"Setting input directory to: {input_dir}")
folder_paths.set_input_directory(input_dir)
if args.user_directory:
user_dir = os.path.abspath(args.user_directory)
logging.info(f"Setting user directory to: {user_dir}")
folder_paths.set_user_directory(user_dir)
def execute_prestartup_script():
def execute_script(script_path):
@ -57,18 +94,16 @@ def execute_prestartup_script():
print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
print()
apply_custom_paths()
execute_prestartup_script()
# Main code
import asyncio
import itertools
import shutil
import threading
import gc
import logging
import utils.extra_config
if os.name == "nt":
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
@ -112,6 +147,7 @@ def cuda_malloc_warning():
logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n")
def prompt_worker(q, server):
current_time: float = 0.0
e = execution.PromptExecutor(server, lru_size=args.cache_lru)
last_gc_collect = 0
need_gc = False
@ -208,14 +244,6 @@ if __name__ == "__main__":
server = server.PromptServer(loop)
q = execution.PromptQueue(server)
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
utils.extra_config.load_extra_path_config(extra_model_paths_config_path)
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):
utils.extra_config.load_extra_path_config(config_path)
nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes)
cuda_malloc_warning()
@ -225,28 +253,6 @@ if __name__ == "__main__":
threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start()
if args.output_directory:
output_dir = os.path.abspath(args.output_directory)
logging.info(f"Setting output directory to: {output_dir}")
folder_paths.set_output_directory(output_dir)
#These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
folder_paths.add_model_folder_path("diffusion_models", os.path.join(folder_paths.get_output_directory(), "diffusion_models"))
folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras"))
if args.input_directory:
input_dir = os.path.abspath(args.input_directory)
logging.info(f"Setting input directory to: {input_dir}")
folder_paths.set_input_directory(input_dir)
if args.user_directory:
user_dir = os.path.abspath(args.user_directory)
logging.info(f"Setting user directory to: {user_dir}")
folder_paths.set_user_directory(user_dir)
if args.quick_test_for_ci:
exit(0)

View File

@ -929,7 +929,7 @@ class DualCLIPLoader:
def INPUT_TYPES(s):
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
"type": (["sdxl", "sd3", "flux"], ),
"type": (["sdxl", "sd3", "flux", "hunyuan_video"], ),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "load_clip"
@ -947,6 +947,8 @@ class DualCLIPLoader:
clip_type = comfy.sd.CLIPType.SD3
elif type == "flux":
clip_type = comfy.sd.CLIPType.FLUX
elif type == "hunyuan_video":
clip_type = comfy.sd.CLIPType.HUNYUAN_VIDEO
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
return (clip,)
@ -1008,23 +1010,58 @@ class StyleModelApply:
"style_model": ("STYLE_MODEL", ),
"clip_vision_output": ("CLIP_VISION_OUTPUT", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}),
"strength_type": (["multiply"], ),
"strength_type": (["multiply", "attn_bias"], ),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "apply_stylemodel"
CATEGORY = "conditioning/style_model"
def apply_stylemodel(self, clip_vision_output, style_model, conditioning, strength, strength_type):
def apply_stylemodel(self, conditioning, style_model, clip_vision_output, strength, strength_type):
cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
if strength_type == "multiply":
cond *= strength
c = []
n = cond.shape[1]
c_out = []
for t in conditioning:
n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
c.append(n)
return (c, )
(txt, keys) = t
keys = keys.copy()
if strength_type == "attn_bias" and strength != 1.0:
# math.log raises an error if the argument is zero
# torch.log returns -inf, which is what we want
attn_bias = torch.log(torch.Tensor([strength]))
# get the size of the mask image
mask_ref_size = keys.get("attention_mask_img_shape", (1, 1))
n_ref = mask_ref_size[0] * mask_ref_size[1]
n_txt = txt.shape[1]
# grab the existing mask
mask = keys.get("attention_mask", None)
# create a default mask if it doesn't exist
if mask is None:
mask = torch.zeros((txt.shape[0], n_txt + n_ref, n_txt + n_ref), dtype=torch.float16)
# convert the mask dtype, because it might be boolean
# we want it to be interpreted as a bias
if mask.dtype == torch.bool:
# log(True) = log(1) = 0
# log(False) = log(0) = -inf
mask = torch.log(mask.to(dtype=torch.float16))
# now we make the mask bigger to add space for our new tokens
new_mask = torch.zeros((txt.shape[0], n_txt + n + n_ref, n_txt + n + n_ref), dtype=torch.float16)
# copy over the old mask, in quandrants
new_mask[:, :n_txt, :n_txt] = mask[:, :n_txt, :n_txt]
new_mask[:, :n_txt, n_txt+n:] = mask[:, :n_txt, n_txt:]
new_mask[:, n_txt+n:, :n_txt] = mask[:, n_txt:, :n_txt]
new_mask[:, n_txt+n:, n_txt+n:] = mask[:, n_txt:, n_txt:]
# now fill in the attention bias to our redux tokens
new_mask[:, :n_txt, n_txt:n_txt+n] = attn_bias
new_mask[:, n_txt+n:, n_txt:n_txt+n] = attn_bias
keys["attention_mask"] = new_mask.to(txt.device)
keys["attention_mask_img_shape"] = mask_ref_size
c_out.append([torch.cat((txt, cond), dim=1), keys])
return (c_out,)
class unCLIPConditioning:
@classmethod
@ -2150,6 +2187,7 @@ def init_builtin_extra_nodes():
"nodes_mahiro.py",
"nodes_lt.py",
"nodes_hooks.py",
"nodes_load_3d.py",
]
import_failed = []

View File

@ -237,11 +237,7 @@
"source": [
"!npm install -g localtunnel\n",
"\n",
"import subprocess\n",
"import threading\n",
"import time\n",
"import socket\n",
"import urllib.request\n",
"\n",
"def iframe_thread(port):\n",
" while True:\n",
@ -288,8 +284,6 @@
"outputs": [],
"source": [
"import threading\n",
"import time\n",
"import socket\n",
"def iframe_thread(port):\n",
" while True:\n",
" time.sleep(0.5)\n",

View File

@ -4,5 +4,7 @@ lint.ignore = ["ALL"]
# Enable specific rules
lint.select = [
"S307", # suspicious-eval-usage
"F401", # unused-import
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
"F",
]

View File

@ -460,7 +460,21 @@ class PromptServer():
return web.Response(body=alpha_buffer.read(), content_type='image/png',
headers={"Content-Disposition": f"filename=\"{filename}\""})
else:
return web.FileResponse(file, headers={"Content-Disposition": f"filename=\"{filename}\""})
# Get content type from mimetype, defaulting to 'application/octet-stream'
content_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# For security, force certain extensions to download instead of display
file_extension = os.path.splitext(filename)[1].lower()
if file_extension in {'.html', '.htm', '.js', '.css'}:
content_type = 'application/octet-stream' # Forces download
return web.FileResponse(
file,
headers={
"Content-Disposition": f"filename=\"{filename}\"",
"Content-Type": content_type
}
)
return web.Response(status=404)
@ -563,7 +577,7 @@ class PromptServer():
for x in nodes.NODE_CLASS_MAPPINGS:
try:
out[x] = node_info(x)
except Exception as e:
except Exception:
logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.")
logging.error(traceback.format_exc())
return web.json_response(out)
@ -584,7 +598,7 @@ class PromptServer():
return web.json_response(self.prompt_queue.get_history(max_items=max_items))
@routes.get("/history/{prompt_id}")
async def get_history(request):
async def get_history_prompt_id(request):
prompt_id = request.match_info.get("prompt_id", None)
return web.json_response(self.prompt_queue.get_history(prompt_id=prompt_id))
@ -599,8 +613,6 @@ class PromptServer():
@routes.post("/prompt")
async def post_prompt(request):
logging.info("got prompt")
resp_code = 200
out_string = ""
json_data = await request.json()
json_data = self.trigger_on_prompt(json_data)
@ -832,8 +844,8 @@ class PromptServer():
for handler in self.on_prompt_handlers:
try:
json_data = handler(json_data)
except Exception as e:
logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing")
except Exception:
logging.warning("[ERROR] An error occurred during the on_prompt_handler processing")
logging.warning(traceback.format_exc())
return json_data

View File

@ -7,6 +7,14 @@ from unittest.mock import patch
import folder_paths
@pytest.fixture()
def clear_folder_paths():
# Clear the global dictionary before each test to ensure isolation
original = folder_paths.folder_names_and_paths.copy()
folder_paths.folder_names_and_paths.clear()
yield
folder_paths.folder_names_and_paths = original
@pytest.fixture
def temp_dir():
with tempfile.TemporaryDirectory() as tmpdirname:
@ -30,9 +38,33 @@ def test_get_annotated_filepath():
assert folder_paths.get_annotated_filepath("test.txt", default_dir) == os.path.join(default_dir, "test.txt")
assert folder_paths.get_annotated_filepath("test.txt [output]") == os.path.join(folder_paths.get_output_directory(), "test.txt")
def test_add_model_folder_path():
folder_paths.add_model_folder_path("test_folder", "/test/path")
assert "/test/path" in folder_paths.get_folder_paths("test_folder")
def test_add_model_folder_path_append(clear_folder_paths):
folder_paths.add_model_folder_path("test_folder", "/default/path", is_default=True)
folder_paths.add_model_folder_path("test_folder", "/test/path", is_default=False)
assert folder_paths.get_folder_paths("test_folder") == ["/default/path", "/test/path"]
def test_add_model_folder_path_insert(clear_folder_paths):
folder_paths.add_model_folder_path("test_folder", "/test/path", is_default=False)
folder_paths.add_model_folder_path("test_folder", "/default/path", is_default=True)
assert folder_paths.get_folder_paths("test_folder") == ["/default/path", "/test/path"]
def test_add_model_folder_path_re_add_existing_default(clear_folder_paths):
folder_paths.add_model_folder_path("test_folder", "/test/path", is_default=False)
folder_paths.add_model_folder_path("test_folder", "/old_default/path", is_default=True)
assert folder_paths.get_folder_paths("test_folder") == ["/old_default/path", "/test/path"]
folder_paths.add_model_folder_path("test_folder", "/test/path", is_default=True)
assert folder_paths.get_folder_paths("test_folder") == ["/test/path", "/old_default/path"]
def test_add_model_folder_path_re_add_existing_non_default(clear_folder_paths):
folder_paths.add_model_folder_path("test_folder", "/test/path", is_default=False)
folder_paths.add_model_folder_path("test_folder", "/default/path", is_default=True)
assert folder_paths.get_folder_paths("test_folder") == ["/default/path", "/test/path"]
folder_paths.add_model_folder_path("test_folder", "/test/path", is_default=False)
assert folder_paths.get_folder_paths("test_folder") == ["/default/path", "/test/path"]
def test_recursive_search(temp_dir):
os.makedirs(os.path.join(temp_dir, "subdir"))

View File

@ -259,7 +259,7 @@ class TestForLoopOpen:
graph = GraphBuilder()
if "initial_value0" in kwargs:
remaining = kwargs["initial_value0"]
while_open = graph.node("TestWhileLoopOpen", condition=remaining, initial_value0=remaining, **{(f"initial_value{i}"): kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)})
graph.node("TestWhileLoopOpen", condition=remaining, initial_value0=remaining, **{(f"initial_value{i}"): kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)})
outputs = [kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)]
return {
"result": tuple(["stub", remaining] + outputs),

58
web/assets/DownloadGitView-DyhrHmlh.js generated vendored Normal file
View File

@ -0,0 +1,58 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { a as defineComponent, f as openBlock, g as createElementBlock, A as createBaseVNode, a8 as toDisplayString, h as createVNode, z as unref, D as script, bU as useRouter } from "./index-CSl7lfOs.js";
const _hoisted_1 = { class: "font-sans w-screen h-screen mx-0 grid place-items-center justify-center items-center text-neutral-900 bg-neutral-300 pointer-events-auto" };
const _hoisted_2 = { class: "col-start-1 h-screen row-start-1 place-content-center mx-auto overflow-y-auto" };
const _hoisted_3 = { class: "max-w-screen-sm flex flex-col gap-8 p-8 bg-[url('/assets/images/Git-Logo-White.svg')] bg-no-repeat bg-right-top bg-origin-padding" };
const _hoisted_4 = { class: "mt-24 text-4xl font-bold text-red-500" };
const _hoisted_5 = { class: "space-y-4" };
const _hoisted_6 = { class: "text-xl" };
const _hoisted_7 = { class: "text-xl" };
const _hoisted_8 = { class: "text-m" };
const _hoisted_9 = { class: "flex gap-4 flex-row-reverse" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "DownloadGitView",
setup(__props) {
const openGitDownloads = /* @__PURE__ */ __name(() => {
window.open("https://git-scm.com/downloads/", "_blank");
}, "openGitDownloads");
const skipGit = /* @__PURE__ */ __name(() => {
console.warn("pushing");
const router = useRouter();
router.push("install");
}, "skipGit");
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1, [
createBaseVNode("div", _hoisted_2, [
createBaseVNode("div", _hoisted_3, [
createBaseVNode("h1", _hoisted_4, toDisplayString(_ctx.$t("downloadGit.title")), 1),
createBaseVNode("div", _hoisted_5, [
createBaseVNode("p", _hoisted_6, toDisplayString(_ctx.$t("downloadGit.message")), 1),
createBaseVNode("p", _hoisted_7, toDisplayString(_ctx.$t("downloadGit.instructions")), 1),
createBaseVNode("p", _hoisted_8, toDisplayString(_ctx.$t("downloadGit.warning")), 1)
]),
createBaseVNode("div", _hoisted_9, [
createVNode(unref(script), {
label: _ctx.$t("downloadGit.gitWebsite"),
icon: "pi pi-external-link",
"icon-pos": "right",
onClick: openGitDownloads,
severity: "primary"
}, null, 8, ["label"]),
createVNode(unref(script), {
label: _ctx.$t("downloadGit.skip"),
icon: "pi pi-exclamation-triangle",
onClick: skipGit,
severity: "secondary"
}, null, 8, ["label"])
])
])
])
]);
};
}
});
export {
_sfc_main as default
};
//# sourceMappingURL=DownloadGitView-DyhrHmlh.js.map

1
web/assets/DownloadGitView-DyhrHmlh.js.map generated vendored Normal file
View File

@ -0,0 +1 @@
{"version":3,"file":"DownloadGitView-DyhrHmlh.js","sources":["../../src/views/DownloadGitView.vue"],"sourcesContent":["<template>\n <div\n class=\"font-sans w-screen h-screen mx-0 grid place-items-center justify-center items-center text-neutral-900 bg-neutral-300 pointer-events-auto\"\n >\n <div\n class=\"col-start-1 h-screen row-start-1 place-content-center mx-auto overflow-y-auto\"\n >\n <div\n class=\"max-w-screen-sm flex flex-col gap-8 p-8 bg-[url('/assets/images/Git-Logo-White.svg')] bg-no-repeat bg-right-top bg-origin-padding\"\n >\n <!-- Header -->\n <h1 class=\"mt-24 text-4xl font-bold text-red-500\">\n {{ $t('downloadGit.title') }}\n </h1>\n\n <!-- Message -->\n <div class=\"space-y-4\">\n <p class=\"text-xl\">\n {{ $t('downloadGit.message') }}\n </p>\n <p class=\"text-xl\">\n {{ $t('downloadGit.instructions') }}\n </p>\n <p class=\"text-m\">\n {{ $t('downloadGit.warning') }}\n </p>\n </div>\n\n <!-- Actions -->\n <div class=\"flex gap-4 flex-row-reverse\">\n <Button\n :label=\"$t('downloadGit.gitWebsite')\"\n icon=\"pi pi-external-link\"\n icon-pos=\"right\"\n @click=\"openGitDownloads\"\n severity=\"primary\"\n />\n <Button\n :label=\"$t('downloadGit.skip')\"\n icon=\"pi pi-exclamation-triangle\"\n @click=\"skipGit\"\n severity=\"secondary\"\n />\n </div>\n </div>\n </div>\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport { useRouter } from 'vue-router'\n\nconst openGitDownloads = () => {\n window.open('https://git-scm.com/downloads/', '_blank')\n}\n\nconst skipGit = () => {\n console.warn('pushing')\n const router = useRouter()\n router.push('install')\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;;;;;;;AAqDA,UAAM,mBAAmB,6BAAM;AACtB,aAAA,KAAK,kCAAkC,QAAQ;AAAA,IAAA,GAD/B;AAIzB,UAAM,UAAU,6BAAM;AACpB,cAAQ,KAAK,SAAS;AACtB,YAAM,SAAS;AACf,aAAO,KAAK,SAAS;AAAA,IAAA,GAHP;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1,8 +1,8 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, r as ref, c6 as FilterMatchMode, ca as useExtensionStore, u as useSettingStore, o as onMounted, q as computed, g as openBlock, x as createBlock, y as withCtx, i as createVNode, c7 as SearchBox, z as unref, bT as script, A as createBaseVNode, h as createElementBlock, O as renderList, a6 as toDisplayString, aw as createTextVNode, N as Fragment, D as script$1, j as createCommentVNode, bV as script$3, c8 as _sfc_main$1 } from "./index-CoOvI8ZH.js";
import { s as script$2, a as script$4 } from "./index-DK6Kev7f.js";
import "./index-D4DWQPPQ.js";
import { a as defineComponent, r as ref, ck as FilterMatchMode, co as useExtensionStore, u as useSettingStore, o as onMounted, q as computed, f as openBlock, x as createBlock, y as withCtx, h as createVNode, cl as SearchBox, z as unref, bW as script, A as createBaseVNode, g as createElementBlock, Q as renderList, a8 as toDisplayString, ay as createTextVNode, P as Fragment, D as script$1, i as createCommentVNode, c5 as script$3, cm as _sfc_main$1 } from "./index-CSl7lfOs.js";
import { s as script$2, a as script$4 } from "./index-CgmI-OoW.js";
import "./index-aSkd2KAK.js";
const _hoisted_1 = { class: "flex justify-end" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "ExtensionPanel",
@ -47,7 +47,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
createVNode(SearchBox, {
modelValue: filters.value["global"].value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => filters.value["global"].value = $event),
placeholder: _ctx.$t("searchExtensions") + "..."
placeholder: _ctx.$t("g.searchExtensions") + "..."
}, null, 8, ["modelValue", "placeholder"]),
hasChanges.value ? (openBlock(), createBlock(unref(script), {
key: 0,
@ -67,7 +67,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
]),
createBaseVNode("div", _hoisted_1, [
createVNode(unref(script$1), {
label: _ctx.$t("reloadToApplyChanges"),
label: _ctx.$t("g.reloadToApplyChanges"),
onClick: applyChanges,
outlined: "",
severity: "danger"
@ -87,7 +87,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
default: withCtx(() => [
createVNode(unref(script$2), {
field: "name",
header: _ctx.$t("extensionName"),
header: _ctx.$t("g.extensionName"),
sortable: ""
}, null, 8, ["header"]),
createVNode(unref(script$2), { pt: {
@ -114,4 +114,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as default
};
//# sourceMappingURL=ExtensionPanel-DsD42OtO.js.map
//# sourceMappingURL=ExtensionPanel-DgaZovwi.js.map

1
web/assets/ExtensionPanel-DgaZovwi.js.map generated vendored Normal file
View File

@ -0,0 +1 @@
{"version":3,"file":"ExtensionPanel-DgaZovwi.js","sources":["../../src/components/dialog/content/setting/ExtensionPanel.vue"],"sourcesContent":["<template>\n <PanelTemplate value=\"Extension\" class=\"extension-panel\">\n <template #header>\n <SearchBox\n v-model=\"filters['global'].value\"\n :placeholder=\"$t('g.searchExtensions') + '...'\"\n />\n <Message v-if=\"hasChanges\" severity=\"info\" pt:text=\"w-full\">\n <ul>\n <li v-for=\"ext in changedExtensions\" :key=\"ext.name\">\n <span>\n {{ extensionStore.isExtensionEnabled(ext.name) ? '[-]' : '[+]' }}\n </span>\n {{ ext.name }}\n </li>\n </ul>\n <div class=\"flex justify-end\">\n <Button\n :label=\"$t('g.reloadToApplyChanges')\"\n @click=\"applyChanges\"\n outlined\n severity=\"danger\"\n />\n </div>\n </Message>\n </template>\n <DataTable\n :value=\"extensionStore.extensions\"\n stripedRows\n size=\"small\"\n :filters=\"filters\"\n >\n <Column field=\"name\" :header=\"$t('g.extensionName')\" sortable></Column>\n <Column\n :pt=\"{\n bodyCell: 'flex items-center justify-end'\n }\"\n >\n <template #body=\"slotProps\">\n <ToggleSwitch\n v-model=\"editingEnabledExtensions[slotProps.data.name]\"\n @change=\"updateExtensionStatus\"\n />\n </template>\n </Column>\n </DataTable>\n </PanelTemplate>\n</template>\n\n<script setup lang=\"ts\">\nimport { ref, computed, onMounted } from 'vue'\nimport { useExtensionStore } from '@/stores/extensionStore'\nimport { useSettingStore } from '@/stores/settingStore'\nimport DataTable from 'primevue/datatable'\nimport Column from 'primevue/column'\nimport ToggleSwitch from 'primevue/toggleswitch'\nimport Button from 'primevue/button'\nimport Message from 'primevue/message'\nimport { FilterMatchMode } from '@primevue/core/api'\nimport PanelTemplate from './PanelTemplate.vue'\nimport SearchBox from '@/components/common/SearchBox.vue'\n\nconst filters = ref({\n global: { value: '', matchMode: FilterMatchMode.CONTAINS }\n})\n\nconst extensionStore = useExtensionStore()\nconst settingStore = useSettingStore()\n\nconst editingEnabledExtensions = ref<Record<string, boolean>>({})\n\nonMounted(() => {\n extensionStore.extensions.forEach((ext) => {\n editingEnabledExtensions.value[ext.name] =\n extensionStore.isExtensionEnabled(ext.name)\n })\n})\n\nconst changedExtensions = computed(() => {\n return extensionStore.extensions.filter(\n (ext) =>\n editingEnabledExtensions.value[ext.name] !==\n extensionStore.isExtensionEnabled(ext.name)\n )\n})\n\nconst hasChanges = computed(() => {\n return changedExtensions.value.length > 0\n})\n\nconst updateExtensionStatus = () => {\n const editingDisabledExtensionNames = Object.entries(\n editingEnabledExtensions.value\n )\n .filter(([_, enabled]) => !enabled)\n .map(([name]) => name)\n\n settingStore.set('Comfy.Extension.Disabled', [\n ...extensionStore.inactiveDisabledExtensionNames,\n ...editingDisabledExtensionNames\n ])\n}\n\nconst applyChanges = () => {\n // Refresh the page to apply changes\n window.location.reload()\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;AA8DA,UAAM,UAAU,IAAI;AAAA,MAClB,QAAQ,EAAE,OAAO,IAAI,WAAW,gBAAgB,SAAS;AAAA,IAAA,CAC1D;AAED,UAAM,iBAAiB;AACvB,UAAM,eAAe;AAEf,UAAA,2BAA2B,IAA6B,CAAA,CAAE;AAEhE,cAAU,MAAM;AACC,qBAAA,WAAW,QAAQ,CAAC,QAAQ;AACzC,iCAAyB,MAAM,IAAI,IAAI,IACrC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA,CAC7C;AAAA,IAAA,CACF;AAEK,UAAA,oBAAoB,SAAS,MAAM;AACvC,aAAO,eAAe,WAAW;AAAA,QAC/B,CAAC,QACC,yBAAyB,MAAM,IAAI,IAAI,MACvC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA;AAAA,IAC9C,CACD;AAEK,UAAA,aAAa,SAAS,MAAM;AACzB,aAAA,kBAAkB,MAAM,SAAS;AAAA,IAAA,CACzC;AAED,UAAM,wBAAwB,6BAAM;AAClC,YAAM,gCAAgC,OAAO;AAAA,QAC3C,yBAAyB;AAAA,MAExB,EAAA,OAAO,CAAC,CAAC,GAAG,OAAO,MAAM,CAAC,OAAO,EACjC,IAAI,CAAC,CAAC,IAAI,MAAM,IAAI;AAEvB,mBAAa,IAAI,4BAA4B;AAAA,QAC3C,GAAG,eAAe;AAAA,QAClB,GAAG;AAAA,MAAA,CACJ;AAAA,IAAA,GAV2B;AAa9B,UAAM,eAAe,6BAAM;AAEzB,aAAO,SAAS;IAAO,GAFJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1 +0,0 @@
{"version":3,"file":"ExtensionPanel-DsD42OtO.js","sources":["../../src/components/dialog/content/setting/ExtensionPanel.vue"],"sourcesContent":["<template>\n <PanelTemplate value=\"Extension\" class=\"extension-panel\">\n <template #header>\n <SearchBox\n v-model=\"filters['global'].value\"\n :placeholder=\"$t('searchExtensions') + '...'\"\n />\n <Message v-if=\"hasChanges\" severity=\"info\" pt:text=\"w-full\">\n <ul>\n <li v-for=\"ext in changedExtensions\" :key=\"ext.name\">\n <span>\n {{ extensionStore.isExtensionEnabled(ext.name) ? '[-]' : '[+]' }}\n </span>\n {{ ext.name }}\n </li>\n </ul>\n <div class=\"flex justify-end\">\n <Button\n :label=\"$t('reloadToApplyChanges')\"\n @click=\"applyChanges\"\n outlined\n severity=\"danger\"\n />\n </div>\n </Message>\n </template>\n <DataTable\n :value=\"extensionStore.extensions\"\n stripedRows\n size=\"small\"\n :filters=\"filters\"\n >\n <Column field=\"name\" :header=\"$t('extensionName')\" sortable></Column>\n <Column\n :pt=\"{\n bodyCell: 'flex items-center justify-end'\n }\"\n >\n <template #body=\"slotProps\">\n <ToggleSwitch\n v-model=\"editingEnabledExtensions[slotProps.data.name]\"\n @change=\"updateExtensionStatus\"\n />\n </template>\n </Column>\n </DataTable>\n </PanelTemplate>\n</template>\n\n<script setup lang=\"ts\">\nimport { ref, computed, onMounted } from 'vue'\nimport { useExtensionStore } from '@/stores/extensionStore'\nimport { useSettingStore } from '@/stores/settingStore'\nimport DataTable from 'primevue/datatable'\nimport Column from 'primevue/column'\nimport ToggleSwitch from 'primevue/toggleswitch'\nimport Button from 'primevue/button'\nimport Message from 'primevue/message'\nimport { FilterMatchMode } from '@primevue/core/api'\nimport PanelTemplate from './PanelTemplate.vue'\nimport SearchBox from '@/components/common/SearchBox.vue'\n\nconst filters = ref({\n global: { value: '', matchMode: FilterMatchMode.CONTAINS }\n})\n\nconst extensionStore = useExtensionStore()\nconst settingStore = useSettingStore()\n\nconst editingEnabledExtensions = ref<Record<string, boolean>>({})\n\nonMounted(() => {\n extensionStore.extensions.forEach((ext) => {\n editingEnabledExtensions.value[ext.name] =\n extensionStore.isExtensionEnabled(ext.name)\n })\n})\n\nconst changedExtensions = computed(() => {\n return extensionStore.extensions.filter(\n (ext) =>\n editingEnabledExtensions.value[ext.name] !==\n extensionStore.isExtensionEnabled(ext.name)\n )\n})\n\nconst hasChanges = computed(() => {\n return changedExtensions.value.length > 0\n})\n\nconst updateExtensionStatus = () => {\n const editingDisabledExtensionNames = Object.entries(\n editingEnabledExtensions.value\n )\n .filter(([_, enabled]) => !enabled)\n .map(([name]) => name)\n\n settingStore.set('Comfy.Extension.Disabled', [\n ...extensionStore.inactiveDisabledExtensionNames,\n ...editingDisabledExtensionNames\n ])\n}\n\nconst applyChanges = () => {\n // Refresh the page to apply changes\n window.location.reload()\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;AA8DA,UAAM,UAAU,IAAI;AAAA,MAClB,QAAQ,EAAE,OAAO,IAAI,WAAW,gBAAgB,SAAS;AAAA,IAAA,CAC1D;AAED,UAAM,iBAAiB;AACvB,UAAM,eAAe;AAEf,UAAA,2BAA2B,IAA6B,CAAA,CAAE;AAEhE,cAAU,MAAM;AACC,qBAAA,WAAW,QAAQ,CAAC,QAAQ;AACzC,iCAAyB,MAAM,IAAI,IAAI,IACrC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA,CAC7C;AAAA,IAAA,CACF;AAEK,UAAA,oBAAoB,SAAS,MAAM;AACvC,aAAO,eAAe,WAAW;AAAA,QAC/B,CAAC,QACC,yBAAyB,MAAM,IAAI,IAAI,MACvC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA;AAAA,IAC9C,CACD;AAEK,UAAA,aAAa,SAAS,MAAM;AACzB,aAAA,kBAAkB,MAAM,SAAS;AAAA,IAAA,CACzC;AAED,UAAM,wBAAwB,6BAAM;AAClC,YAAM,gCAAgC,OAAO;AAAA,QAC3C,yBAAyB;AAAA,MAExB,EAAA,OAAO,CAAC,CAAC,GAAG,OAAO,MAAM,CAAC,OAAO,EACjC,IAAI,CAAC,CAAC,IAAI,MAAM,IAAI;AAEvB,mBAAa,IAAI,4BAA4B;AAAA,QAC3C,GAAG,eAAe;AAAA,QAClB,GAAG;AAAA,MAAA,CACJ;AAAA,IAAA,GAV2B;AAa9B,UAAM,eAAe,6BAAM;AAEzB,aAAO,SAAS;IAAO,GAFJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -45,7 +45,7 @@
--sidebar-icon-size: 1rem;
}
.side-tool-bar-container[data-v-e0812a25] {
.side-tool-bar-container[data-v-7851c166] {
display: flex;
flex-direction: column;
align-items: center;
@ -55,10 +55,11 @@
width: var(--sidebar-width);
height: 100%;
background-color: var(--comfy-menu-bg);
background-color: var(--comfy-menu-secondary-bg);
color: var(--fg-color);
box-shadow: var(--bar-shadow);
}
.side-tool-bar-end[data-v-e0812a25] {
.side-tool-bar-end[data-v-7851c166] {
align-self: flex-end;
margin-top: auto;
}
@ -97,7 +98,7 @@
z-index: 999;
}
[data-v-37f672ab] .highlight {
[data-v-d7cc0bce] .highlight {
background-color: var(--p-primary-color);
color: var(--p-primary-contrast-color);
font-weight: bold;
@ -124,7 +125,7 @@
align-items: flex-start !important;
}
.node-tooltip[data-v-c2e0098f] {
.node-tooltip[data-v-9ecc8adc] {
background: var(--comfy-input-bg);
border-radius: 5px;
box-shadow: 0 0 5px rgba(0, 0, 0, 0.4);
@ -152,31 +153,30 @@
border-radius: 0;
}
.comfy-menu-hamburger[data-v-2ddd26e8] {
.comfy-menu-hamburger[data-v-962c4073] {
pointer-events: auto;
position: fixed;
z-index: 9999;
}
[data-v-783f8efe] .p-togglebutton::before {
[data-v-4cb762cb] .p-togglebutton::before {
display: none
}
[data-v-783f8efe] .p-togglebutton {
[data-v-4cb762cb] .p-togglebutton {
position: relative;
flex-shrink: 0;
border-radius: 0px;
background-color: transparent;
padding-left: 0.5rem;
padding-right: 0.5rem
padding: 0px
}
[data-v-783f8efe] .p-togglebutton.p-togglebutton-checked {
[data-v-4cb762cb] .p-togglebutton.p-togglebutton-checked {
border-bottom-width: 2px;
border-bottom-color: var(--p-button-text-primary-color)
}
[data-v-783f8efe] .p-togglebutton-checked .close-button,[data-v-783f8efe] .p-togglebutton:hover .close-button {
[data-v-4cb762cb] .p-togglebutton-checked .close-button,[data-v-4cb762cb] .p-togglebutton:hover .close-button {
visibility: visible
}
.status-indicator[data-v-783f8efe] {
.status-indicator[data-v-4cb762cb] {
position: absolute;
font-weight: 700;
font-size: 1.5rem;
@ -184,22 +184,22 @@
left: 50%;
transform: translate(-50%, -50%)
}
[data-v-783f8efe] .p-togglebutton:hover .status-indicator {
[data-v-4cb762cb] .p-togglebutton:hover .status-indicator {
display: none
}
[data-v-783f8efe] .p-togglebutton .close-button {
[data-v-4cb762cb] .p-togglebutton .close-button {
visibility: hidden
}
.top-menubar[data-v-9646ca0a] .p-menubar-item-link svg {
.top-menubar[data-v-a2b12676] .p-menubar-item-link svg {
display: none;
}
[data-v-9646ca0a] .p-menubar-submenu.dropdown-direction-up {
[data-v-a2b12676] .p-menubar-submenu.dropdown-direction-up {
top: auto;
bottom: 100%;
flex-direction: column-reverse;
}
.keybinding-tag[data-v-9646ca0a] {
.keybinding-tag[data-v-a2b12676] {
background: var(--p-content-hover-background);
border-color: var(--p-content-border-color);
border-style: solid;
@ -210,7 +210,7 @@
border-bottom-left-radius: 0;
}
.comfyui-queue-button[data-v-95bc9be0] .p-splitbutton-dropdown {
.comfyui-queue-button[data-v-d3897845] .p-splitbutton-dropdown {
border-top-right-radius: 0;
border-bottom-right-radius: 0;
}
@ -238,10 +238,11 @@
display: none;
}
.comfyui-menu[data-v-d84a704d] {
.comfyui-menu[data-v-d792da31] {
width: 100vw;
background: var(--comfy-menu-bg);
color: var(--fg-color);
box-shadow: var(--bar-shadow);
font-family: Arial, Helvetica, sans-serif;
font-size: 0.8em;
box-sizing: border-box;
@ -250,13 +251,16 @@
grid-column: 1/-1;
max-height: 90vh;
}
.comfyui-menu.dropzone[data-v-d84a704d] {
.comfyui-menu.dropzone[data-v-d792da31] {
background: var(--p-highlight-background);
}
.comfyui-menu.dropzone-active[data-v-d84a704d] {
.comfyui-menu.dropzone-active[data-v-d792da31] {
background: var(--p-highlight-background-focus);
}
.comfyui-logo[data-v-d84a704d] {
[data-v-d792da31] .p-menubar-item-label {
line-height: revert;
}
.comfyui-logo[data-v-d792da31] {
font-size: 1.2em;
-webkit-user-select: none;
-moz-user-select: none;

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

1
web/assets/GraphView-DMP_lefG.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
[data-v-53e62b05] .p-steppanel {
[data-v-7ef01cf2] .p-steppanel {
background-color: transparent
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

1
web/assets/InstallView-D4T0qJ1I.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,8 +1,8 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, q as computed, g as openBlock, h as createElementBlock, N as Fragment, O as renderList, i as createVNode, y as withCtx, aw as createTextVNode, a6 as toDisplayString, z as unref, aA as script, j as createCommentVNode, r as ref, c6 as FilterMatchMode, M as useKeybindingStore, F as useCommandStore, aJ as watchEffect, bg as useToast, t as resolveDirective, x as createBlock, c7 as SearchBox, A as createBaseVNode, D as script$2, ao as script$4, bk as withModifiers, bT as script$5, aH as script$6, v as withDirectives, c8 as _sfc_main$2, P as pushScopeId, Q as popScopeId, c1 as KeyComboImpl, c9 as KeybindingImpl, _ as _export_sfc } from "./index-CoOvI8ZH.js";
import { s as script$1, a as script$3 } from "./index-DK6Kev7f.js";
import "./index-D4DWQPPQ.js";
import { a as defineComponent, q as computed, f as openBlock, g as createElementBlock, P as Fragment, Q as renderList, h as createVNode, y as withCtx, ay as createTextVNode, a8 as toDisplayString, z as unref, aC as script, i as createCommentVNode, r as ref, ck as FilterMatchMode, O as useKeybindingStore, F as useCommandStore, I as useI18n, aS as normalizeI18nKey, aL as watchEffect, bn as useToast, t as resolveDirective, x as createBlock, cl as SearchBox, A as createBaseVNode, D as script$2, aq as script$4, br as withModifiers, bW as script$5, aI as script$6, v as withDirectives, cm as _sfc_main$2, R as pushScopeId, U as popScopeId, ce as KeyComboImpl, cn as KeybindingImpl, _ as _export_sfc } from "./index-CSl7lfOs.js";
import { s as script$1, a as script$3 } from "./index-CgmI-OoW.js";
import "./index-aSkd2KAK.js";
const _hoisted_1$1 = {
key: 0,
class: "px-2"
@ -35,7 +35,7 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
};
}
});
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-9d7e362e"), n = n(), popScopeId(), n), "_withScopeId");
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-c20ad403"), n = n(), popScopeId(), n), "_withScopeId");
const _hoisted_1 = { class: "actions invisible flex flex-row" };
const _hoisted_2 = ["title"];
const _hoisted_3 = { key: 1 };
@ -47,9 +47,11 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
});
const keybindingStore = useKeybindingStore();
const commandStore = useCommandStore();
const { t } = useI18n();
const commandsData = computed(() => {
return Object.values(commandStore.commands).map((command) => ({
id: command.id,
label: t(`commands.${normalizeI18nKey(command.id)}.label`, command.label),
keybinding: keybindingStore.getKeybindingByCommandId(command.id)
}));
});
@ -140,7 +142,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
createVNode(SearchBox, {
modelValue: filters.value["global"].value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => filters.value["global"].value = $event),
placeholder: _ctx.$t("searchKeybindings") + "..."
placeholder: _ctx.$t("g.searchKeybindings") + "..."
}, null, 8, ["modelValue", "placeholder"])
]),
default: withCtx(() => [
@ -188,7 +190,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
createBaseVNode("div", {
class: "overflow-hidden text-ellipsis whitespace-nowrap",
title: slotProps.data.id
}, toDisplayString(slotProps.data.id), 9, _hoisted_2)
}, toDisplayString(slotProps.data.label), 9, _hoisted_2)
]),
_: 1
}),
@ -257,14 +259,14 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
}, 8, ["visible", "header"]),
withDirectives(createVNode(unref(script$2), {
class: "mt-4",
label: _ctx.$t("reset"),
label: _ctx.$t("g.reset"),
icon: "pi pi-trash",
severity: "danger",
fluid: "",
text: "",
onClick: resetKeybindings
}, null, 8, ["label"]), [
[_directive_tooltip, _ctx.$t("resetKeybindingsTooltip")]
[_directive_tooltip, _ctx.$t("g.resetKeybindingsTooltip")]
])
]),
_: 1
@ -272,8 +274,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
};
}
});
const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-9d7e362e"]]);
const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-c20ad403"]]);
export {
KeybindingPanel as default
};
//# sourceMappingURL=KeybindingPanel-lcJrxHwZ.js.map
//# sourceMappingURL=KeybindingPanel-BlOA8Yhu.js.map

1
web/assets/KeybindingPanel-BlOA8Yhu.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,8 +0,0 @@
[data-v-9d7e362e] .p-datatable-tbody > tr > td {
padding: 0.25rem;
min-height: 2rem
}
[data-v-9d7e362e] .p-datatable-row-selected .actions,[data-v-9d7e362e] .p-datatable-selectable-row:hover .actions {
visibility: visible
}

8
web/assets/KeybindingPanel-C3wT8hYZ.css generated vendored Normal file
View File

@ -0,0 +1,8 @@
[data-v-c20ad403] .p-datatable-tbody > tr > td {
padding: 0.25rem;
min-height: 2rem
}
[data-v-c20ad403] .p-datatable-row-selected .actions,[data-v-c20ad403] .p-datatable-selectable-row:hover .actions {
visibility: visible
}

File diff suppressed because one or more lines are too long

82
web/assets/NotSupportedView-Dhitj9aO.js generated vendored Normal file
View File

@ -0,0 +1,82 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { a as defineComponent, bU as useRouter, t as resolveDirective, f as openBlock, g as createElementBlock, A as createBaseVNode, a8 as toDisplayString, h as createVNode, z as unref, D as script, v as withDirectives } from "./index-CSl7lfOs.js";
const _imports_0 = "" + new URL("images/sad_girl.png", import.meta.url).href;
const _hoisted_1 = { class: "font-sans w-screen h-screen flex items-center m-0 text-neutral-900 bg-neutral-300 pointer-events-auto" };
const _hoisted_2 = { class: "flex-grow flex items-center justify-center" };
const _hoisted_3 = { class: "flex flex-col gap-8 p-8" };
const _hoisted_4 = { class: "text-4xl font-bold text-red-500" };
const _hoisted_5 = { class: "space-y-4" };
const _hoisted_6 = { class: "text-xl" };
const _hoisted_7 = { class: "list-disc list-inside space-y-1 text-neutral-800" };
const _hoisted_8 = { class: "flex gap-4" };
const _hoisted_9 = /* @__PURE__ */ createBaseVNode("div", { class: "h-screen flex-grow-0" }, [
/* @__PURE__ */ createBaseVNode("img", {
src: _imports_0,
alt: "Sad girl illustration",
class: "h-full object-cover"
})
], -1);
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "NotSupportedView",
setup(__props) {
const openDocs = /* @__PURE__ */ __name(() => {
window.open(
"https://github.com/Comfy-Org/desktop#currently-supported-platforms",
"_blank"
);
}, "openDocs");
const reportIssue = /* @__PURE__ */ __name(() => {
window.open("https://forum.comfy.org/c/v1-feedback/", "_blank");
}, "reportIssue");
const router = useRouter();
const continueToInstall = /* @__PURE__ */ __name(() => {
router.push("/install");
}, "continueToInstall");
return (_ctx, _cache) => {
const _directive_tooltip = resolveDirective("tooltip");
return openBlock(), createElementBlock("div", _hoisted_1, [
createBaseVNode("div", _hoisted_2, [
createBaseVNode("div", _hoisted_3, [
createBaseVNode("h1", _hoisted_4, toDisplayString(_ctx.$t("notSupported.title")), 1),
createBaseVNode("div", _hoisted_5, [
createBaseVNode("p", _hoisted_6, toDisplayString(_ctx.$t("notSupported.message")), 1),
createBaseVNode("ul", _hoisted_7, [
createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.macos")), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.windows")), 1)
])
]),
createBaseVNode("div", _hoisted_8, [
createVNode(unref(script), {
label: _ctx.$t("notSupported.learnMore"),
icon: "pi pi-github",
onClick: openDocs,
severity: "secondary"
}, null, 8, ["label"]),
createVNode(unref(script), {
label: _ctx.$t("notSupported.reportIssue"),
icon: "pi pi-flag",
onClick: reportIssue,
severity: "secondary"
}, null, 8, ["label"]),
withDirectives(createVNode(unref(script), {
label: _ctx.$t("notSupported.continue"),
icon: "pi pi-arrow-right",
iconPos: "right",
onClick: continueToInstall,
severity: "danger"
}, null, 8, ["label"]), [
[_directive_tooltip, _ctx.$t("notSupported.continueTooltip")]
])
])
])
]),
_hoisted_9
]);
};
}
});
export {
_sfc_main as default
};
//# sourceMappingURL=NotSupportedView-Dhitj9aO.js.map

1
web/assets/NotSupportedView-Dhitj9aO.js.map generated vendored Normal file
View File

@ -0,0 +1 @@
{"version":3,"file":"NotSupportedView-Dhitj9aO.js","sources":["../../../../../../../assets/images/sad_girl.png","../../src/views/NotSupportedView.vue"],"sourcesContent":["export default \"__VITE_PUBLIC_ASSET__b82952e7__\"","<template>\n <div\n class=\"font-sans w-screen h-screen flex items-center m-0 text-neutral-900 bg-neutral-300 pointer-events-auto\"\n >\n <div class=\"flex-grow flex items-center justify-center\">\n <div class=\"flex flex-col gap-8 p-8\">\n <!-- Header -->\n <h1 class=\"text-4xl font-bold text-red-500\">\n {{ $t('notSupported.title') }}\n </h1>\n\n <!-- Message -->\n <div class=\"space-y-4\">\n <p class=\"text-xl\">\n {{ $t('notSupported.message') }}\n </p>\n <ul class=\"list-disc list-inside space-y-1 text-neutral-800\">\n <li>{{ $t('notSupported.supportedDevices.macos') }}</li>\n <li>{{ $t('notSupported.supportedDevices.windows') }}</li>\n </ul>\n </div>\n\n <!-- Actions -->\n <div class=\"flex gap-4\">\n <Button\n :label=\"$t('notSupported.learnMore')\"\n icon=\"pi pi-github\"\n @click=\"openDocs\"\n severity=\"secondary\"\n />\n <Button\n :label=\"$t('notSupported.reportIssue')\"\n icon=\"pi pi-flag\"\n @click=\"reportIssue\"\n severity=\"secondary\"\n />\n <Button\n :label=\"$t('notSupported.continue')\"\n icon=\"pi pi-arrow-right\"\n iconPos=\"right\"\n @click=\"continueToInstall\"\n severity=\"danger\"\n v-tooltip=\"$t('notSupported.continueTooltip')\"\n />\n </div>\n </div>\n </div>\n\n <!-- Right side image -->\n <div class=\"h-screen flex-grow-0\">\n <img\n src=\"/assets/images/sad_girl.png\"\n alt=\"Sad girl illustration\"\n class=\"h-full object-cover\"\n />\n </div>\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport { useRouter } from 'vue-router'\n\nconst openDocs = () => {\n window.open(\n 'https://github.com/Comfy-Org/desktop#currently-supported-platforms',\n '_blank'\n )\n}\n\nconst reportIssue = () => {\n window.open('https://forum.comfy.org/c/v1-feedback/', '_blank')\n}\n\nconst router = useRouter()\nconst continueToInstall = () => {\n router.push('/install')\n}\n</script>\n"],"names":[],"mappings":";;;AAAA,MAAe,aAAA,KAAA,IAAA,IAAA,uBAAA,YAAA,GAAA,EAAA;;;;;;;;;;;;;;;;;;;AC+Df,UAAM,WAAW,6BAAM;AACd,aAAA;AAAA,QACL;AAAA,QACA;AAAA,MAAA;AAAA,IACF,GAJe;AAOjB,UAAM,cAAc,6BAAM;AACjB,aAAA,KAAK,0CAA0C,QAAQ;AAAA,IAAA,GAD5C;AAIpB,UAAM,SAAS;AACf,UAAM,oBAAoB,6BAAM;AAC9B,aAAO,KAAK,UAAU;AAAA,IAAA,GADE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1,7 +1,7 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { A as createBaseVNode, g as openBlock, h as createElementBlock, aU as markRaw, d as defineComponent, u as useSettingStore, bw as storeToRefs, w as watch, cy as useCopyToClipboard, x as createBlock, y as withCtx, z as unref, bT as script, a6 as toDisplayString, O as renderList, N as Fragment, i as createVNode, D as script$1, j as createCommentVNode, bI as script$2, cz as formatCamelCase, cA as FormItem, c8 as _sfc_main$1, bN as electronAPI } from "./index-CoOvI8ZH.js";
import { u as useServerConfigStore } from "./serverConfigStore-cctR8PGG.js";
import { A as createBaseVNode, f as openBlock, g as createElementBlock, aZ as markRaw, a as defineComponent, u as useSettingStore, aK as storeToRefs, w as watch, cL as useCopyToClipboard, I as useI18n, x as createBlock, y as withCtx, z as unref, bW as script, a8 as toDisplayString, Q as renderList, P as Fragment, h as createVNode, D as script$1, i as createCommentVNode, bN as script$2, cM as FormItem, cm as _sfc_main$1, bZ as electronAPI } from "./index-CSl7lfOs.js";
import { u as useServerConfigStore } from "./serverConfigStore-D4vD2qBB.js";
const _hoisted_1$1 = {
viewBox: "0 0 24 24",
width: "1.2em",
@ -54,6 +54,14 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
const copyCommandLineArgs = /* @__PURE__ */ __name(async () => {
await copyToClipboard(commandLineArgs.value);
}, "copyCommandLineArgs");
const { t } = useI18n();
const translateItem = /* @__PURE__ */ __name((item) => {
return {
...item,
name: t(`serverConfigItems.${item.id}.name`, item.name),
tooltip: item.tooltip ? t(`serverConfigItems.${item.id}.tooltip`, item.tooltip) : void 0
};
}, "translateItem");
return (_ctx, _cache) => {
const _component_i_lucide58terminal = __unplugin_components_0;
return openBlock(), createBlock(_sfc_main$1, {
@ -119,14 +127,14 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
(openBlock(true), createElementBlock(Fragment, null, renderList(Object.entries(unref(serverConfigsByCategory)), ([label, items], i) => {
return openBlock(), createElementBlock("div", { key: label }, [
i > 0 ? (openBlock(), createBlock(unref(script$2), { key: 0 })) : createCommentVNode("", true),
createBaseVNode("h3", null, toDisplayString(unref(formatCamelCase)(label)), 1),
createBaseVNode("h3", null, toDisplayString(_ctx.$t(`serverConfigCategories.${label}`, label)), 1),
(openBlock(true), createElementBlock(Fragment, null, renderList(items, (item) => {
return openBlock(), createElementBlock("div", {
key: item.name,
class: "flex items-center mb-4"
}, [
createVNode(FormItem, {
item,
item: translateItem(item),
formValue: item.value,
"onUpdate:formValue": /* @__PURE__ */ __name(($event) => item.value = $event, "onUpdate:formValue"),
id: item.id,
@ -147,4 +155,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as default
};
//# sourceMappingURL=ServerConfigPanel-x68ubY-c.js.map
//# sourceMappingURL=ServerConfigPanel-6N6BTSXC.js.map

1
web/assets/ServerConfigPanel-6N6BTSXC.js.map generated vendored Normal file
View File

@ -0,0 +1 @@
{"version":3,"file":"ServerConfigPanel-6N6BTSXC.js","sources":["../../src/components/dialog/content/setting/ServerConfigPanel.vue"],"sourcesContent":["<template>\n <PanelTemplate value=\"Server-Config\" class=\"server-config-panel\">\n <template #header>\n <div class=\"flex flex-col gap-2\">\n <Message\n v-if=\"modifiedConfigs.length > 0\"\n severity=\"info\"\n pt:text=\"w-full\"\n >\n <p>\n {{ $t('serverConfig.modifiedConfigs') }}\n </p>\n <ul>\n <li v-for=\"config in modifiedConfigs\" :key=\"config.id\">\n {{ config.name }}: {{ config.initialValue }} → {{ config.value }}\n </li>\n </ul>\n <div class=\"flex justify-end gap-2\">\n <Button\n :label=\"$t('serverConfig.revertChanges')\"\n @click=\"revertChanges\"\n outlined\n />\n <Button\n :label=\"$t('serverConfig.restart')\"\n @click=\"restartApp\"\n outlined\n severity=\"danger\"\n />\n </div>\n </Message>\n <Message v-if=\"commandLineArgs\" severity=\"secondary\" pt:text=\"w-full\">\n <template #icon>\n <i-lucide:terminal class=\"text-xl font-bold\" />\n </template>\n <div class=\"flex items-center justify-between\">\n <p>{{ commandLineArgs }}</p>\n <Button\n icon=\"pi pi-clipboard\"\n @click=\"copyCommandLineArgs\"\n severity=\"secondary\"\n text\n />\n </div>\n </Message>\n </div>\n </template>\n <div\n v-for=\"([label, items], i) in Object.entries(serverConfigsByCategory)\"\n :key=\"label\"\n >\n <Divider v-if=\"i > 0\" />\n <h3>{{ $t(`serverConfigCategories.${label}`, label) }}</h3>\n <div\n v-for=\"item in items\"\n :key=\"item.name\"\n class=\"flex items-center mb-4\"\n >\n <FormItem\n :item=\"translateItem(item)\"\n v-model:formValue=\"item.value\"\n :id=\"item.id\"\n :labelClass=\"{\n 'text-highlight': item.initialValue !== item.value\n }\"\n />\n </div>\n </div>\n </PanelTemplate>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport Message from 'primevue/message'\nimport Divider from 'primevue/divider'\nimport FormItem from '@/components/common/FormItem.vue'\nimport PanelTemplate from './PanelTemplate.vue'\nimport { useServerConfigStore } from '@/stores/serverConfigStore'\nimport { storeToRefs } from 'pinia'\nimport { electronAPI } from '@/utils/envUtil'\nimport { useSettingStore } from '@/stores/settingStore'\nimport { watch } from 'vue'\nimport { useCopyToClipboard } from '@/hooks/clipboardHooks'\nimport type { FormItem as FormItemType } from '@/types/settingTypes'\nimport type { ServerConfig } from '@/constants/serverConfig'\nimport { useI18n } from 'vue-i18n'\n\nconst settingStore = useSettingStore()\nconst serverConfigStore = useServerConfigStore()\nconst {\n serverConfigsByCategory,\n serverConfigValues,\n launchArgs,\n commandLineArgs,\n modifiedConfigs\n} = storeToRefs(serverConfigStore)\n\nconst revertChanges = () => {\n serverConfigStore.revertChanges()\n}\n\nconst restartApp = () => {\n electronAPI().restartApp()\n}\n\nwatch(launchArgs, (newVal) => {\n settingStore.set('Comfy.Server.LaunchArgs', newVal)\n})\n\nwatch(serverConfigValues, (newVal) => {\n settingStore.set('Comfy.Server.ServerConfigValues', newVal)\n})\n\nconst { copyToClipboard } = useCopyToClipboard()\nconst copyCommandLineArgs = async () => {\n await copyToClipboard(commandLineArgs.value)\n}\n\nconst { t } = useI18n()\nconst translateItem = (item: ServerConfig<any>): FormItemType => {\n return {\n ...item,\n name: t(`serverConfigItems.${item.id}.name`, item.name),\n tooltip: item.tooltip\n ? t(`serverConfigItems.${item.id}.tooltip`, item.tooltip)\n : undefined\n }\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuFA,UAAM,eAAe;AACrB,UAAM,oBAAoB;AACpB,UAAA;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA,IACE,YAAY,iBAAiB;AAEjC,UAAM,gBAAgB,6BAAM;AAC1B,wBAAkB,cAAc;AAAA,IAAA,GADZ;AAItB,UAAM,aAAa,6BAAM;AACvB,kBAAA,EAAc;IAAW,GADR;AAIb,UAAA,YAAY,CAAC,WAAW;AACf,mBAAA,IAAI,2BAA2B,MAAM;AAAA,IAAA,CACnD;AAEK,UAAA,oBAAoB,CAAC,WAAW;AACvB,mBAAA,IAAI,mCAAmC,MAAM;AAAA,IAAA,CAC3D;AAEK,UAAA,EAAE,oBAAoB;AAC5B,UAAM,sBAAsB,mCAAY;AAChC,YAAA,gBAAgB,gBAAgB,KAAK;AAAA,IAAA,GADjB;AAItB,UAAA,EAAE,MAAM;AACR,UAAA,gBAAgB,wBAAC,SAA0C;AACxD,aAAA;AAAA,QACL,GAAG;AAAA,QACH,MAAM,EAAE,qBAAqB,KAAK,EAAE,SAAS,KAAK,IAAI;AAAA,QACtD,SAAS,KAAK,UACV,EAAE,qBAAqB,KAAK,EAAE,YAAY,KAAK,OAAO,IACtD;AAAA,MAAA;AAAA,IACN,GAPoB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1 +0,0 @@
{"version":3,"file":"ServerConfigPanel-x68ubY-c.js","sources":["../../src/components/dialog/content/setting/ServerConfigPanel.vue"],"sourcesContent":["<template>\n <PanelTemplate value=\"Server-Config\" class=\"server-config-panel\">\n <template #header>\n <div class=\"flex flex-col gap-2\">\n <Message\n v-if=\"modifiedConfigs.length > 0\"\n severity=\"info\"\n pt:text=\"w-full\"\n >\n <p>\n {{ $t('serverConfig.modifiedConfigs') }}\n </p>\n <ul>\n <li v-for=\"config in modifiedConfigs\" :key=\"config.id\">\n {{ config.name }}: {{ config.initialValue }} → {{ config.value }}\n </li>\n </ul>\n <div class=\"flex justify-end gap-2\">\n <Button\n :label=\"$t('serverConfig.revertChanges')\"\n @click=\"revertChanges\"\n outlined\n />\n <Button\n :label=\"$t('serverConfig.restart')\"\n @click=\"restartApp\"\n outlined\n severity=\"danger\"\n />\n </div>\n </Message>\n <Message v-if=\"commandLineArgs\" severity=\"secondary\" pt:text=\"w-full\">\n <template #icon>\n <i-lucide:terminal class=\"text-xl font-bold\" />\n </template>\n <div class=\"flex items-center justify-between\">\n <p>{{ commandLineArgs }}</p>\n <Button\n icon=\"pi pi-clipboard\"\n @click=\"copyCommandLineArgs\"\n severity=\"secondary\"\n text\n />\n </div>\n </Message>\n </div>\n </template>\n <div\n v-for=\"([label, items], i) in Object.entries(serverConfigsByCategory)\"\n :key=\"label\"\n >\n <Divider v-if=\"i > 0\" />\n <h3>{{ formatCamelCase(label) }}</h3>\n <div\n v-for=\"item in items\"\n :key=\"item.name\"\n class=\"flex items-center mb-4\"\n >\n <FormItem\n :item=\"item\"\n v-model:formValue=\"item.value\"\n :id=\"item.id\"\n :labelClass=\"{\n 'text-highlight': item.initialValue !== item.value\n }\"\n />\n </div>\n </div>\n </PanelTemplate>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport Message from 'primevue/message'\nimport Divider from 'primevue/divider'\nimport FormItem from '@/components/common/FormItem.vue'\nimport PanelTemplate from './PanelTemplate.vue'\nimport { formatCamelCase } from '@/utils/formatUtil'\nimport { useServerConfigStore } from '@/stores/serverConfigStore'\nimport { storeToRefs } from 'pinia'\nimport { electronAPI } from '@/utils/envUtil'\nimport { useSettingStore } from '@/stores/settingStore'\nimport { watch } from 'vue'\nimport { useCopyToClipboard } from '@/hooks/clipboardHooks'\n\nconst settingStore = useSettingStore()\nconst serverConfigStore = useServerConfigStore()\nconst {\n serverConfigsByCategory,\n serverConfigValues,\n launchArgs,\n commandLineArgs,\n modifiedConfigs\n} = storeToRefs(serverConfigStore)\n\nconst revertChanges = () => {\n serverConfigStore.revertChanges()\n}\n\nconst restartApp = () => {\n electronAPI().restartApp()\n}\n\nwatch(launchArgs, (newVal) => {\n settingStore.set('Comfy.Server.LaunchArgs', newVal)\n})\n\nwatch(serverConfigValues, (newVal) => {\n settingStore.set('Comfy.Server.ServerConfigValues', newVal)\n})\n\nconst { copyToClipboard } = useCopyToClipboard()\nconst copyCommandLineArgs = async () => {\n await copyToClipboard(commandLineArgs.value)\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqFA,UAAM,eAAe;AACrB,UAAM,oBAAoB;AACpB,UAAA;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA,IACE,YAAY,iBAAiB;AAEjC,UAAM,gBAAgB,6BAAM;AAC1B,wBAAkB,cAAc;AAAA,IAAA,GADZ;AAItB,UAAM,aAAa,6BAAM;AACvB,kBAAA,EAAc;IAAW,GADR;AAIb,UAAA,YAAY,CAAC,WAAW;AACf,mBAAA,IAAI,2BAA2B,MAAM;AAAA,IAAA,CACnD;AAEK,UAAA,oBAAoB,CAAC,WAAW;AACvB,mBAAA,IAAI,mCAAmC,MAAM;AAAA,IAAA,CAC3D;AAEK,UAAA,EAAE,oBAAoB;AAC5B,UAAM,sBAAsB,mCAAY;AAChC,YAAA,gBAAgB,gBAAgB,KAAK;AAAA,IAAA,GADjB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1,5 +1,5 @@
[data-v-f5429be7] .xterm-helper-textarea {
[data-v-c0d3157e] .xterm-helper-textarea {
/* Hide this as it moves all over when uv is running */
display: none;
}

View File

@ -1,15 +1,15 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, aD as useI18n, r as ref, o as onMounted, g as openBlock, h as createElementBlock, A as createBaseVNode, aw as createTextVNode, a6 as toDisplayString, z as unref, j as createCommentVNode, i as createVNode, D as script, bM as BaseTerminal, P as pushScopeId, Q as popScopeId, bN as electronAPI, _ as _export_sfc } from "./index-CoOvI8ZH.js";
import { P as ProgressStatus } from "./index-BppSBmxJ.js";
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-f5429be7"), n = n(), popScopeId(), n), "_withScopeId");
import { a as defineComponent, I as useI18n, r as ref, bX as ProgressStatus, o as onMounted, f as openBlock, g as createElementBlock, A as createBaseVNode, ay as createTextVNode, a8 as toDisplayString, z as unref, i as createCommentVNode, h as createVNode, D as script, x as createBlock, v as withDirectives, ad as vShow, bY as BaseTerminal, R as pushScopeId, U as popScopeId, bZ as electronAPI, _ as _export_sfc } from "./index-CSl7lfOs.js";
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-c0d3157e"), n = n(), popScopeId(), n), "_withScopeId");
const _hoisted_1 = { class: "font-sans flex flex-col justify-center items-center h-screen m-0 text-neutral-300 bg-neutral-900 dark-theme pointer-events-auto" };
const _hoisted_2 = { class: "text-2xl font-bold" };
const _hoisted_3 = { key: 0 };
const _hoisted_4 = {
key: 0,
class: "flex items-center my-4 gap-2"
class: "flex flex-col items-center gap-4"
};
const _hoisted_5 = { class: "flex items-center my-4 gap-2" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "ServerStartView",
setup(__props) {
@ -18,9 +18,11 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
const status = ref(ProgressStatus.INITIAL_STATE);
const electronVersion = ref("");
let xterm;
const terminalVisible = ref(true);
const updateProgress = /* @__PURE__ */ __name(({ status: newStatus }) => {
status.value = newStatus;
xterm?.clear();
if (newStatus === ProgressStatus.ERROR) terminalVisible.value = false;
else xterm?.clear();
}, "updateProgress");
const terminalCreated = /* @__PURE__ */ __name(({ terminal, useAutoSize }, root) => {
xterm = terminal;
@ -49,31 +51,42 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("span", _hoisted_3, " v" + toDisplayString(electronVersion.value), 1)) : createCommentVNode("", true)
]),
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("div", _hoisted_4, [
createVNode(unref(script), {
icon: "pi pi-flag",
createBaseVNode("div", _hoisted_5, [
createVNode(unref(script), {
icon: "pi pi-flag",
severity: "secondary",
label: unref(t)("serverStart.reportIssue"),
onClick: reportIssue
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-file",
severity: "secondary",
label: unref(t)("serverStart.openLogs"),
onClick: openLogs
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-refresh",
label: unref(t)("serverStart.reinstall"),
onClick: reinstall
}, null, 8, ["label"])
]),
!terminalVisible.value ? (openBlock(), createBlock(unref(script), {
key: 0,
icon: "pi pi-search",
severity: "secondary",
label: unref(t)("serverStart.reportIssue"),
onClick: reportIssue
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-file",
severity: "secondary",
label: unref(t)("serverStart.openLogs"),
onClick: openLogs
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-refresh",
label: unref(t)("serverStart.reinstall"),
onClick: reinstall
}, null, 8, ["label"])
label: unref(t)("serverStart.showTerminal"),
onClick: _cache[0] || (_cache[0] = ($event) => terminalVisible.value = true)
}, null, 8, ["label"])) : createCommentVNode("", true)
])) : createCommentVNode("", true),
createVNode(BaseTerminal, { onCreated: terminalCreated })
withDirectives(createVNode(BaseTerminal, { onCreated: terminalCreated }, null, 512), [
[vShow, terminalVisible.value]
])
]);
};
}
});
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-f5429be7"]]);
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-c0d3157e"]]);
export {
ServerStartView as default
};
//# sourceMappingURL=ServerStartView-CqRVtr1h.js.map
//# sourceMappingURL=ServerStartView-BykYRkoj.js.map

1
web/assets/ServerStartView-BykYRkoj.js.map generated vendored Normal file
View File

@ -0,0 +1 @@
{"version":3,"file":"ServerStartView-BykYRkoj.js","sources":["../../src/views/ServerStartView.vue"],"sourcesContent":["<template>\n <div\n class=\"font-sans flex flex-col justify-center items-center h-screen m-0 text-neutral-300 bg-neutral-900 dark-theme pointer-events-auto\"\n >\n <h2 class=\"text-2xl font-bold\">\n {{ t(`serverStart.process.${status}`) }}\n <span v-if=\"status === ProgressStatus.ERROR\">\n v{{ electronVersion }}\n </span>\n </h2>\n <div\n v-if=\"status === ProgressStatus.ERROR\"\n class=\"flex flex-col items-center gap-4\"\n >\n <div class=\"flex items-center my-4 gap-2\">\n <Button\n icon=\"pi pi-flag\"\n severity=\"secondary\"\n :label=\"t('serverStart.reportIssue')\"\n @click=\"reportIssue\"\n />\n <Button\n icon=\"pi pi-file\"\n severity=\"secondary\"\n :label=\"t('serverStart.openLogs')\"\n @click=\"openLogs\"\n />\n <Button\n icon=\"pi pi-refresh\"\n :label=\"t('serverStart.reinstall')\"\n @click=\"reinstall\"\n />\n </div>\n <Button\n v-if=\"!terminalVisible\"\n icon=\"pi pi-search\"\n severity=\"secondary\"\n :label=\"t('serverStart.showTerminal')\"\n @click=\"terminalVisible = true\"\n />\n </div>\n <BaseTerminal v-show=\"terminalVisible\" @created=\"terminalCreated\" />\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport { ref, onMounted, Ref } from 'vue'\nimport BaseTerminal from '@/components/bottomPanel/tabs/terminal/BaseTerminal.vue'\nimport { ProgressStatus } from '@comfyorg/comfyui-electron-types'\nimport { electronAPI } from '@/utils/envUtil'\nimport type { useTerminal } from '@/hooks/bottomPanelTabs/useTerminal'\nimport { Terminal } from '@xterm/xterm'\nimport { useI18n } from 'vue-i18n'\n\nconst electron = electronAPI()\nconst { t } = useI18n()\n\nconst status = ref<ProgressStatus>(ProgressStatus.INITIAL_STATE)\nconst electronVersion = ref<string>('')\nlet xterm: Terminal | undefined\n\nconst terminalVisible = ref(true)\n\nconst updateProgress = ({ status: newStatus }: { status: ProgressStatus }) => {\n status.value = newStatus\n\n // Make critical error screen more obvious.\n if (newStatus === ProgressStatus.ERROR) terminalVisible.value = false\n else xterm?.clear()\n}\n\nconst terminalCreated = (\n { terminal, useAutoSize }: ReturnType<typeof useTerminal>,\n root: Ref<HTMLElement>\n) => {\n xterm = terminal\n\n useAutoSize(root, true, true)\n electron.onLogMessage((message: string) => {\n terminal.write(message)\n })\n\n terminal.options.cursorBlink = false\n terminal.options.disableStdin = true\n terminal.options.cursorInactiveStyle = 'block'\n}\n\nconst reinstall = () => electron.reinstall()\nconst reportIssue = () => {\n window.open('https://forum.comfy.org/c/v1-feedback/', '_blank')\n}\nconst openLogs = () => electron.openLogsFolder()\n\nonMounted(async () => {\n electron.sendReady()\n electron.onProgressUpdate(updateProgress)\n electronVersion.value = await electron.getElectronVersion()\n})\n</script>\n\n<style scoped>\n:deep(.xterm-helper-textarea) {\n /* Hide this as it moves all over when uv is running */\n display: none;\n}\n</style>\n"],"names":[],"mappings":";;;;;;;;;;;;;;;AAuDA,UAAM,WAAW;AACX,UAAA,EAAE,MAAM;AAER,UAAA,SAAS,IAAoB,eAAe,aAAa;AACzD,UAAA,kBAAkB,IAAY,EAAE;AAClC,QAAA;AAEE,UAAA,kBAAkB,IAAI,IAAI;AAEhC,UAAM,iBAAiB,wBAAC,EAAE,QAAQ,gBAA4C;AAC5E,aAAO,QAAQ;AAGf,UAAI,cAAc,eAAe,MAAO,iBAAgB,QAAQ;AAAA,kBACpD,MAAM;AAAA,IAAA,GALG;AAQvB,UAAM,kBAAkB,wBACtB,EAAE,UAAU,YAAA,GACZ,SACG;AACK,cAAA;AAEI,kBAAA,MAAM,MAAM,IAAI;AACnB,eAAA,aAAa,CAAC,YAAoB;AACzC,iBAAS,MAAM,OAAO;AAAA,MAAA,CACvB;AAED,eAAS,QAAQ,cAAc;AAC/B,eAAS,QAAQ,eAAe;AAChC,eAAS,QAAQ,sBAAsB;AAAA,IAAA,GAbjB;AAgBlB,UAAA,YAAY,6BAAM,SAAS,aAAf;AAClB,UAAM,cAAc,6BAAM;AACjB,aAAA,KAAK,0CAA0C,QAAQ;AAAA,IAAA,GAD5C;AAGd,UAAA,WAAW,6BAAM,SAAS,kBAAf;AAEjB,cAAU,YAAY;AACpB,eAAS,UAAU;AACnB,eAAS,iBAAiB,cAAc;AACxB,sBAAA,QAAQ,MAAM,SAAS,mBAAmB;AAAA,IAAA,CAC3D;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1 +0,0 @@
{"version":3,"file":"ServerStartView-CqRVtr1h.js","sources":["../../src/views/ServerStartView.vue"],"sourcesContent":["<template>\n <div\n class=\"font-sans flex flex-col justify-center items-center h-screen m-0 text-neutral-300 bg-neutral-900 dark-theme pointer-events-auto\"\n >\n <h2 class=\"text-2xl font-bold\">\n {{ t(`serverStart.process.${status}`) }}\n <span v-if=\"status === ProgressStatus.ERROR\">\n v{{ electronVersion }}\n </span>\n </h2>\n <div\n v-if=\"status === ProgressStatus.ERROR\"\n class=\"flex items-center my-4 gap-2\"\n >\n <Button\n icon=\"pi pi-flag\"\n severity=\"secondary\"\n :label=\"t('serverStart.reportIssue')\"\n @click=\"reportIssue\"\n />\n <Button\n icon=\"pi pi-file\"\n severity=\"secondary\"\n :label=\"t('serverStart.openLogs')\"\n @click=\"openLogs\"\n />\n <Button\n icon=\"pi pi-refresh\"\n :label=\"t('serverStart.reinstall')\"\n @click=\"reinstall\"\n />\n </div>\n <BaseTerminal @created=\"terminalCreated\" />\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport { ref, onMounted, Ref } from 'vue'\nimport BaseTerminal from '@/components/bottomPanel/tabs/terminal/BaseTerminal.vue'\nimport { ProgressStatus } from '@comfyorg/comfyui-electron-types'\nimport { electronAPI } from '@/utils/envUtil'\nimport type { useTerminal } from '@/hooks/bottomPanelTabs/useTerminal'\nimport { Terminal } from '@xterm/xterm'\nimport { useI18n } from 'vue-i18n'\n\nconst electron = electronAPI()\nconst { t } = useI18n()\n\nconst status = ref<ProgressStatus>(ProgressStatus.INITIAL_STATE)\nconst electronVersion = ref<string>('')\nlet xterm: Terminal | undefined\n\nconst updateProgress = ({ status: newStatus }: { status: ProgressStatus }) => {\n status.value = newStatus\n xterm?.clear()\n}\n\nconst terminalCreated = (\n { terminal, useAutoSize }: ReturnType<typeof useTerminal>,\n root: Ref<HTMLElement>\n) => {\n xterm = terminal\n\n useAutoSize(root, true, true)\n electron.onLogMessage((message: string) => {\n terminal.write(message)\n })\n\n terminal.options.cursorBlink = false\n terminal.options.disableStdin = true\n terminal.options.cursorInactiveStyle = 'block'\n}\n\nconst reinstall = () => electron.reinstall()\nconst reportIssue = () => {\n window.open('https://forum.comfy.org/c/v1-feedback/', '_blank')\n}\nconst openLogs = () => electron.openLogsFolder()\n\nonMounted(async () => {\n electron.sendReady()\n electron.onProgressUpdate(updateProgress)\n electronVersion.value = await electron.getElectronVersion()\n})\n</script>\n\n<style scoped>\n:deep(.xterm-helper-textarea) {\n /* Hide this as it moves all over when uv is running */\n display: none;\n}\n</style>\n"],"names":[],"mappings":";;;;;;;;;;;;;;;AA8CA,UAAM,WAAW;AACX,UAAA,EAAE,MAAM;AAER,UAAA,SAAS,IAAoB,eAAe,aAAa;AACzD,UAAA,kBAAkB,IAAY,EAAE;AAClC,QAAA;AAEJ,UAAM,iBAAiB,wBAAC,EAAE,QAAQ,gBAA4C;AAC5E,aAAO,QAAQ;AACf,aAAO,MAAM;AAAA,IAAA,GAFQ;AAKvB,UAAM,kBAAkB,wBACtB,EAAE,UAAU,YAAA,GACZ,SACG;AACK,cAAA;AAEI,kBAAA,MAAM,MAAM,IAAI;AACnB,eAAA,aAAa,CAAC,YAAoB;AACzC,iBAAS,MAAM,OAAO;AAAA,MAAA,CACvB;AAED,eAAS,QAAQ,cAAc;AAC/B,eAAS,QAAQ,eAAe;AAChC,eAAS,QAAQ,sBAAsB;AAAA,IAAA,GAbjB;AAgBlB,UAAA,YAAY,6BAAM,SAAS,aAAf;AAClB,UAAM,cAAc,6BAAM;AACjB,aAAA,KAAK,0CAA0C,QAAQ;AAAA,IAAA,GAD5C;AAGd,UAAA,WAAW,6BAAM,SAAS,kBAAf;AAEjB,cAAU,YAAY;AACpB,eAAS,UAAU;AACnB,eAAS,iBAAiB,cAAc;AACxB,sBAAA,QAAQ,MAAM,SAAS,mBAAmB;AAAA,IAAA,CAC3D;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

98
web/assets/UserSelectView-DMDUPUPX.js generated vendored Normal file
View File

@ -0,0 +1,98 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { a as defineComponent, J as useUserStore, bU as useRouter, r as ref, q as computed, o as onMounted, f as openBlock, g as createElementBlock, A as createBaseVNode, a8 as toDisplayString, h as createVNode, z as unref, aq as script, bN as script$1, bV as script$2, x as createBlock, y as withCtx, ay as createTextVNode, bW as script$3, i as createCommentVNode, D as script$4 } from "./index-CSl7lfOs.js";
const _hoisted_1 = {
id: "comfy-user-selection",
class: "font-sans flex flex-col items-center h-screen m-0 text-neutral-300 bg-neutral-900 dark-theme pointer-events-auto"
};
const _hoisted_2 = { class: "mt-[5vh] 2xl:mt-[20vh] min-w-84 relative rounded-lg bg-[var(--comfy-menu-bg)] p-5 px-10 shadow-lg" };
const _hoisted_3 = /* @__PURE__ */ createBaseVNode("h1", { class: "my-2.5 mb-7 font-normal" }, "ComfyUI", -1);
const _hoisted_4 = { class: "flex w-full flex-col items-center" };
const _hoisted_5 = { class: "flex w-full flex-col gap-2" };
const _hoisted_6 = { for: "new-user-input" };
const _hoisted_7 = { class: "flex w-full flex-col gap-2" };
const _hoisted_8 = { for: "existing-user-select" };
const _hoisted_9 = { class: "mt-5" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "UserSelectView",
setup(__props) {
const userStore = useUserStore();
const router = useRouter();
const selectedUser = ref(null);
const newUsername = ref("");
const loginError = ref("");
const createNewUser = computed(() => newUsername.value.trim() !== "");
const newUserExistsError = computed(() => {
return userStore.users.find((user) => user.username === newUsername.value) ? `User "${newUsername.value}" already exists` : "";
});
const error = computed(() => newUserExistsError.value || loginError.value);
const login = /* @__PURE__ */ __name(async () => {
try {
const user = createNewUser.value ? await userStore.createUser(newUsername.value) : selectedUser.value;
if (!user) {
throw new Error("No user selected");
}
userStore.login(user);
router.push("/");
} catch (err) {
loginError.value = err.message ?? JSON.stringify(err);
}
}, "login");
onMounted(async () => {
if (!userStore.initialized) {
await userStore.initialize();
}
});
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1, [
createBaseVNode("main", _hoisted_2, [
_hoisted_3,
createBaseVNode("form", _hoisted_4, [
createBaseVNode("div", _hoisted_5, [
createBaseVNode("label", _hoisted_6, toDisplayString(_ctx.$t("userSelect.newUser")) + ":", 1),
createVNode(unref(script), {
id: "new-user-input",
modelValue: newUsername.value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => newUsername.value = $event),
placeholder: _ctx.$t("userSelect.enterUsername")
}, null, 8, ["modelValue", "placeholder"])
]),
createVNode(unref(script$1)),
createBaseVNode("div", _hoisted_7, [
createBaseVNode("label", _hoisted_8, toDisplayString(_ctx.$t("userSelect.existingUser")) + ":", 1),
createVNode(unref(script$2), {
modelValue: selectedUser.value,
"onUpdate:modelValue": _cache[1] || (_cache[1] = ($event) => selectedUser.value = $event),
class: "w-full",
inputId: "existing-user-select",
options: unref(userStore).users,
"option-label": "username",
placeholder: _ctx.$t("userSelect.selectUser"),
disabled: createNewUser.value
}, null, 8, ["modelValue", "options", "placeholder", "disabled"]),
error.value ? (openBlock(), createBlock(unref(script$3), {
key: 0,
severity: "error"
}, {
default: withCtx(() => [
createTextVNode(toDisplayString(error.value), 1)
]),
_: 1
})) : createCommentVNode("", true)
]),
createBaseVNode("footer", _hoisted_9, [
createVNode(unref(script$4), {
label: _ctx.$t("userSelect.next"),
onClick: login
}, null, 8, ["label"])
])
])
])
]);
};
}
});
export {
_sfc_main as default
};
//# sourceMappingURL=UserSelectView-DMDUPUPX.js.map

1
web/assets/UserSelectView-DMDUPUPX.js.map generated vendored Normal file
View File

@ -0,0 +1 @@
{"version":3,"file":"UserSelectView-DMDUPUPX.js","sources":["../../src/views/UserSelectView.vue"],"sourcesContent":["<template>\n <div\n id=\"comfy-user-selection\"\n class=\"font-sans flex flex-col items-center h-screen m-0 text-neutral-300 bg-neutral-900 dark-theme pointer-events-auto\"\n >\n <main\n class=\"mt-[5vh] 2xl:mt-[20vh] min-w-84 relative rounded-lg bg-[var(--comfy-menu-bg)] p-5 px-10 shadow-lg\"\n >\n <h1 class=\"my-2.5 mb-7 font-normal\">ComfyUI</h1>\n <form class=\"flex w-full flex-col items-center\">\n <div class=\"flex w-full flex-col gap-2\">\n <label for=\"new-user-input\">{{ $t('userSelect.newUser') }}:</label>\n <InputText\n id=\"new-user-input\"\n v-model=\"newUsername\"\n :placeholder=\"$t('userSelect.enterUsername')\"\n />\n </div>\n <Divider />\n <div class=\"flex w-full flex-col gap-2\">\n <label for=\"existing-user-select\"\n >{{ $t('userSelect.existingUser') }}:</label\n >\n <Select\n v-model=\"selectedUser\"\n class=\"w-full\"\n inputId=\"existing-user-select\"\n :options=\"userStore.users\"\n option-label=\"username\"\n :placeholder=\"$t('userSelect.selectUser')\"\n :disabled=\"createNewUser\"\n />\n <Message v-if=\"error\" severity=\"error\">{{ error }}</Message>\n </div>\n <footer class=\"mt-5\">\n <Button :label=\"$t('userSelect.next')\" @click=\"login\" />\n </footer>\n </form>\n </main>\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport Divider from 'primevue/divider'\nimport InputText from 'primevue/inputtext'\nimport Select from 'primevue/select'\nimport Message from 'primevue/message'\nimport { User, useUserStore } from '@/stores/userStore'\nimport { useRouter } from 'vue-router'\nimport { computed, onMounted, ref } from 'vue'\n\nconst userStore = useUserStore()\nconst router = useRouter()\n\nconst selectedUser = ref<User | null>(null)\nconst newUsername = ref('')\nconst loginError = ref('')\n\nconst createNewUser = computed(() => newUsername.value.trim() !== '')\nconst newUserExistsError = computed(() => {\n return userStore.users.find((user) => user.username === newUsername.value)\n ? `User \"${newUsername.value}\" already exists`\n : ''\n})\nconst error = computed(() => newUserExistsError.value || loginError.value)\n\nconst login = async () => {\n try {\n const user = createNewUser.value\n ? await userStore.createUser(newUsername.value)\n : selectedUser.value\n\n if (!user) {\n throw new Error('No user selected')\n }\n\n userStore.login(user)\n router.push('/')\n } catch (err) {\n loginError.value = err.message ?? JSON.stringify(err)\n }\n}\n\nonMounted(async () => {\n if (!userStore.initialized) {\n await userStore.initialize()\n }\n})\n</script>\n"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;AAoDA,UAAM,YAAY;AAClB,UAAM,SAAS;AAET,UAAA,eAAe,IAAiB,IAAI;AACpC,UAAA,cAAc,IAAI,EAAE;AACpB,UAAA,aAAa,IAAI,EAAE;AAEzB,UAAM,gBAAgB,SAAS,MAAM,YAAY,MAAM,KAAA,MAAW,EAAE;AAC9D,UAAA,qBAAqB,SAAS,MAAM;AACxC,aAAO,UAAU,MAAM,KAAK,CAAC,SAAS,KAAK,aAAa,YAAY,KAAK,IACrE,SAAS,YAAY,KAAK,qBAC1B;AAAA,IAAA,CACL;AACD,UAAM,QAAQ,SAAS,MAAM,mBAAmB,SAAS,WAAW,KAAK;AAEzE,UAAM,QAAQ,mCAAY;AACpB,UAAA;AACI,cAAA,OAAO,cAAc,QACvB,MAAM,UAAU,WAAW,YAAY,KAAK,IAC5C,aAAa;AAEjB,YAAI,CAAC,MAAM;AACH,gBAAA,IAAI,MAAM,kBAAkB;AAAA,QACpC;AAEA,kBAAU,MAAM,IAAI;AACpB,eAAO,KAAK,GAAG;AAAA,eACR,KAAK;AACZ,mBAAW,QAAQ,IAAI,WAAW,KAAK,UAAU,GAAG;AAAA,MACtD;AAAA,IAAA,GAdY;AAiBd,cAAU,YAAY;AAChB,UAAA,CAAC,UAAU,aAAa;AAC1B,cAAM,UAAU;MAClB;AAAA,IAAA,CACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1,5 +1,5 @@
.animated-gradient-text[data-v-12b8b11b] {
.animated-gradient-text[data-v-c4d014c5] {
font-weight: 700;
font-size: clamp(2rem, 8vw, 4rem);
background: linear-gradient(to right, #12c2e9, #c471ed, #f64f59, #12c2e9);
@ -7,12 +7,12 @@
background-clip: text;
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
animation: gradient-12b8b11b 8s linear infinite;
animation: gradient-c4d014c5 8s linear infinite;
}
.text-glow[data-v-12b8b11b] {
.text-glow[data-v-c4d014c5] {
filter: drop-shadow(0 0 8px rgba(255, 255, 255, 0.3));
}
@keyframes gradient-12b8b11b {
@keyframes gradient-c4d014c5 {
0% {
background-position: 0% center;
}
@ -20,11 +20,11 @@
background-position: 300% center;
}
}
.fade-in-up[data-v-12b8b11b] {
animation: fadeInUp-12b8b11b 1.5s ease-out;
.fade-in-up[data-v-c4d014c5] {
animation: fadeInUp-c4d014c5 1.5s ease-out;
animation-fill-mode: both;
}
@keyframes fadeInUp-12b8b11b {
@keyframes fadeInUp-c4d014c5 {
0% {
opacity: 0;
transform: translateY(20px);

View File

@ -1 +0,0 @@
{"version":3,"file":"WelcomeView-C4D1cggT.js","sources":[],"sourcesContent":[],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}

View File

@ -1,13 +1,17 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, g as openBlock, h as createElementBlock, A as createBaseVNode, a6 as toDisplayString, i as createVNode, z as unref, D as script, P as pushScopeId, Q as popScopeId, _ as _export_sfc } from "./index-CoOvI8ZH.js";
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-12b8b11b"), n = n(), popScopeId(), n), "_withScopeId");
import { a as defineComponent, bU as useRouter, f as openBlock, g as createElementBlock, A as createBaseVNode, a8 as toDisplayString, h as createVNode, z as unref, D as script, R as pushScopeId, U as popScopeId, _ as _export_sfc } from "./index-CSl7lfOs.js";
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-c4d014c5"), n = n(), popScopeId(), n), "_withScopeId");
const _hoisted_1 = { class: "font-sans flex flex-col justify-center items-center h-screen m-0 text-neutral-300 bg-neutral-900 dark-theme pointer-events-auto" };
const _hoisted_2 = { class: "flex flex-col items-center justify-center gap-8 p-8" };
const _hoisted_3 = { class: "animated-gradient-text text-glow select-none" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "WelcomeView",
setup(__props) {
const router = useRouter();
const navigateTo = /* @__PURE__ */ __name((path) => {
router.push(path);
}, "navigateTo");
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1, [
createBaseVNode("div", _hoisted_2, [
@ -18,7 +22,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
iconPos: "right",
size: "large",
rounded: "",
onClick: _cache[0] || (_cache[0] = ($event) => _ctx.$router.push("/install")),
onClick: _cache[0] || (_cache[0] = ($event) => navigateTo("/install")),
class: "p-4 text-lg fade-in-up"
}, null, 8, ["label"])
])
@ -26,8 +30,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
};
}
});
const WelcomeView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-12b8b11b"]]);
const WelcomeView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-c4d014c5"]]);
export {
WelcomeView as default
};
//# sourceMappingURL=WelcomeView-C4D1cggT.js.map
//# sourceMappingURL=WelcomeView-D6WEsVyp.js.map

1
web/assets/WelcomeView-D6WEsVyp.js.map generated vendored Normal file
View File

@ -0,0 +1 @@
{"version":3,"file":"WelcomeView-D6WEsVyp.js","sources":["../../src/views/WelcomeView.vue"],"sourcesContent":["<template>\n <div\n class=\"font-sans flex flex-col justify-center items-center h-screen m-0 text-neutral-300 bg-neutral-900 dark-theme pointer-events-auto\"\n >\n <div class=\"flex flex-col items-center justify-center gap-8 p-8\">\n <!-- Header -->\n <h1 class=\"animated-gradient-text text-glow select-none\">\n {{ $t('welcome.title') }}\n </h1>\n\n <!-- Get Started Button -->\n <Button\n :label=\"$t('welcome.getStarted')\"\n icon=\"pi pi-arrow-right\"\n iconPos=\"right\"\n size=\"large\"\n rounded\n @click=\"navigateTo('/install')\"\n class=\"p-4 text-lg fade-in-up\"\n />\n </div>\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport Button from 'primevue/button'\nimport { useRouter } from 'vue-router'\n\nconst router = useRouter()\nconst navigateTo = (path: string) => {\n router.push(path)\n}\n</script>\n\n<style scoped>\n.animated-gradient-text {\n @apply font-bold;\n font-size: clamp(2rem, 8vw, 4rem);\n background: linear-gradient(to right, #12c2e9, #c471ed, #f64f59, #12c2e9);\n background-size: 300% auto;\n background-clip: text;\n -webkit-background-clip: text;\n -webkit-text-fill-color: transparent;\n animation: gradient 8s linear infinite;\n}\n\n.text-glow {\n filter: drop-shadow(0 0 8px rgba(255, 255, 255, 0.3));\n}\n\n@keyframes gradient {\n 0% {\n background-position: 0% center;\n }\n\n 100% {\n background-position: 300% center;\n }\n}\n\n.fade-in-up {\n animation: fadeInUp 1.5s ease-out;\n animation-fill-mode: both;\n}\n\n@keyframes fadeInUp {\n 0% {\n opacity: 0;\n transform: translateY(20px);\n }\n\n 100% {\n opacity: 1;\n transform: translateY(0);\n }\n}\n</style>\n"],"names":[],"mappings":";;;;;;;;;;AA4BA,UAAM,SAAS;AACT,UAAA,aAAa,wBAAC,SAAiB;AACnC,aAAO,KAAK,IAAI;AAAA,IAAA,GADC;;;;;;;;;;;;;;;;;;;;"}

1
web/assets/images/Git-Logo-White.svg generated vendored Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="292" height="92pt" viewBox="0 0 219 92"><defs><clipPath id="a"><path d="M159 .79h25V69h-25Zm0 0"/></clipPath><clipPath id="b"><path d="M183 9h35.371v60H183Zm0 0"/></clipPath><clipPath id="c"><path d="M0 .79h92V92H0Zm0 0"/></clipPath></defs><path style="stroke:none;fill-rule:nonzero;fill:#fff;fill-opacity:1" d="M130.871 31.836c-4.785 0-8.351 2.352-8.351 8.008 0 4.261 2.347 7.222 8.093 7.222 4.871 0 8.18-2.867 8.18-7.398 0-5.133-2.961-7.832-7.922-7.832Zm-9.57 39.95c-1.133 1.39-2.262 2.87-2.262 4.612 0 3.48 4.434 4.524 10.527 4.524 5.051 0 11.926-.352 11.926-5.043 0-2.793-3.308-2.965-7.488-3.227Zm25.761-39.688c1.563 2.004 3.22 4.789 3.22 8.793 0 9.656-7.571 15.316-18.536 15.316-2.789 0-5.312-.348-6.879-.785l-2.87 4.613 8.526.52c15.059.96 23.934 1.398 23.934 12.968 0 10.008-8.789 15.665-23.934 15.665-15.75 0-21.757-4.004-21.757-10.88 0-3.917 1.742-6 4.789-8.878-2.875-1.211-3.828-3.387-3.828-5.739 0-1.914.953-3.656 2.523-5.312 1.566-1.652 3.305-3.305 5.395-5.219-4.262-2.09-7.485-6.617-7.485-13.058 0-10.008 6.613-16.88 19.93-16.88 3.742 0 6.004.344 8.008.872h16.972v7.394l-8.007.61"/><g clip-path="url(#a)"><path style="stroke:none;fill-rule:nonzero;fill:#fff;fill-opacity:1" d="M170.379 16.281c-4.961 0-7.832-2.87-7.832-7.836 0-4.957 2.871-7.656 7.832-7.656 5.05 0 7.922 2.7 7.922 7.656 0 4.965-2.871 7.836-7.922 7.836Zm-11.227 52.305V61.71l4.438-.606c1.219-.175 1.394-.437 1.394-1.746V33.773c0-.953-.261-1.566-1.132-1.824l-4.7-1.656.957-7.047h18.016V59.36c0 1.399.086 1.57 1.395 1.746l4.437.606v6.875h-24.805"/></g><g clip-path="url(#b)"><path style="stroke:none;fill-rule:nonzero;fill:#fff;fill-opacity:1" d="M218.371 65.21c-3.742 1.825-9.223 3.481-14.187 3.481-10.356 0-14.27-4.175-14.27-14.015V31.879c0-.524 0-.871-.7-.871h-6.093v-7.746c7.664-.871 10.707-4.703 11.664-14.188h8.27v12.36c0 .609 0 .87.695.87h12.27v8.704h-12.965v20.797c0 5.136 1.218 7.136 5.918 7.136 2.437 0 4.96-.609 7.047-1.39l2.351 7.66"/></g><g clip-path="url(#c)"><path style="stroke:none;fill-rule:nonzero;fill:#fff;fill-opacity:1" d="M89.422 42.371 49.629 2.582a5.868 5.868 0 0 0-8.3 0l-8.263 8.262 10.48 10.484a6.965 6.965 0 0 1 7.173 1.668 6.98 6.98 0 0 1 1.656 7.215l10.102 10.105a6.963 6.963 0 0 1 7.214 1.657 6.976 6.976 0 0 1 0 9.875 6.98 6.98 0 0 1-9.879 0 6.987 6.987 0 0 1-1.519-7.594l-9.422-9.422v24.793a6.979 6.979 0 0 1 1.848 1.32 6.988 6.988 0 0 1 0 9.88c-2.73 2.726-7.153 2.726-9.875 0a6.98 6.98 0 0 1 0-9.88 6.893 6.893 0 0 1 2.285-1.523V34.398a6.893 6.893 0 0 1-2.285-1.523 6.988 6.988 0 0 1-1.508-7.637L29.004 14.902 1.719 42.187a5.868 5.868 0 0 0 0 8.301l39.793 39.793a5.868 5.868 0 0 0 8.3 0l39.61-39.605a5.873 5.873 0 0 0 0-8.305"/></g></svg>

After

Width:  |  Height:  |  Size: 2.6 KiB

BIN
web/assets/images/sad_girl.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 KiB

View File

@ -68,26 +68,26 @@
background-color: rgb(234 179 8 / var(--tw-bg-opacity))
}
.search-box-input[data-v-f28148d1] {
.search-box-input[data-v-e10998c1] {
width: 100%;
padding-left: 36px;
}
.search-box-input.with-filter[data-v-f28148d1] {
.search-box-input.with-filter[data-v-e10998c1] {
padding-right: 36px;
}
.p-button.p-inputicon[data-v-f28148d1] {
.p-button.p-inputicon[data-v-e10998c1] {
padding: 0;
width: auto;
border: none !important;
}
.form-input[data-v-4fbf09d8] .input-slider .p-inputnumber input,
.form-input[data-v-4fbf09d8] .input-slider .slider-part {
.form-input[data-v-e54b447b] .input-slider .p-inputnumber input,
.form-input[data-v-e54b447b] .input-slider .slider-part {
width: 5rem
}
.form-input[data-v-4fbf09d8] .p-inputtext,
.form-input[data-v-4fbf09d8] .p-select {
.form-input[data-v-e54b447b] .p-inputtext,
.form-input[data-v-e54b447b] .p-select {
width: 11rem
}
@ -333,10 +333,10 @@
overflow-y: hidden;
}
[data-v-e8581ca7] .p-terminal .xterm {
[data-v-36dec989] .p-terminal .xterm {
overflow-x: auto;
}
[data-v-e8581ca7] .p-terminal .xterm-screen {
[data-v-36dec989] .p-terminal .xterm-screen {
background-color: black;
overflow-y: hidden;
}
@ -345,7 +345,7 @@
padding-top: 0px !important;
}
.settings-container[data-v-dbb35a0c] {
.settings-container[data-v-d85d6e64] {
display: flex;
height: 70vh;
width: 60vw;
@ -353,21 +353,24 @@
overflow: hidden;
}
@media (max-width: 768px) {
.settings-container[data-v-dbb35a0c] {
.settings-container[data-v-d85d6e64] {
flex-direction: column;
height: auto;
}
.settings-sidebar[data-v-dbb35a0c] {
.settings-sidebar[data-v-d85d6e64] {
width: 100%;
}
.settings-content[data-v-d85d6e64] {
height: 350px;
}
}
/* Show a separator line above the Keybinding tab */
/* This indicates the start of custom setting panels */
.settings-sidebar[data-v-dbb35a0c] .p-listbox-option[aria-label='Keybinding'] {
.settings-sidebar[data-v-d85d6e64] .p-listbox-option[aria-label='Keybinding'] {
position: relative;
}
.settings-sidebar[data-v-dbb35a0c] .p-listbox-option[aria-label='Keybinding']::before {
.settings-sidebar[data-v-d85d6e64] .p-listbox-option[aria-label='Keybinding']::before {
position: absolute;
top: 0px;
left: 0px;
@ -377,25 +380,25 @@
border-top: 1px solid var(--p-divider-border-color);
}
.pi-cog[data-v-f3b37ea3] {
.pi-cog[data-v-43089afc] {
font-size: 1.25rem;
margin-right: 0.5rem;
}
.version-tag[data-v-f3b37ea3] {
.version-tag[data-v-43089afc] {
margin-left: 0.5rem;
}
.comfy-error-report[data-v-db438f98] {
.comfy-error-report[data-v-5c200f18] {
display: flex;
flex-direction: column;
gap: 1rem;
}
.action-container[data-v-db438f98] {
.action-container[data-v-5c200f18] {
display: flex;
gap: 1rem;
justify-content: flex-end;
}
.wrapper-pre[data-v-db438f98] {
.wrapper-pre[data-v-5c200f18] {
white-space: pre-wrap;
word-wrap: break-word;
}
@ -407,6 +410,10 @@
[data-v-98830966] .p-card-subtitle {
text-align: center;
}
.prompt-dialog-content[data-v-abaaab2c] {
white-space: pre-wrap;
}
.mdi.rotate270::before {
transform: rotate(270deg);
}
@ -736,7 +743,7 @@
word-break: break-all;
}
[data-v-b7c3d32e] .tree-explorer-node-label {
[data-v-82fb18cb] .tree-explorer-node-label {
width: 100%;
display: flex;
align-items: center;
@ -749,10 +756,10 @@
* By setting the position to relative on the parent and using an absolutely positioned pseudo-element,
* we can create a visual indicator for the drop target without affecting the layout of other elements.
*/
[data-v-b7c3d32e] .p-tree-node-content:has(.tree-folder) {
[data-v-82fb18cb] .p-tree-node-content:has(.tree-folder) {
position: relative;
}
[data-v-b7c3d32e] .p-tree-node-content:has(.tree-folder.can-drop)::after {
[data-v-82fb18cb] .p-tree-node-content:has(.tree-folder.can-drop)::after {
content: '';
position: absolute;
top: 0;
@ -842,23 +849,23 @@
vertical-align: top;
}
[data-v-827f7782] .pi-fake-spacer {
[data-v-31a92a0f] .pi-fake-spacer {
height: 1px;
width: 16px;
}
.slot_row[data-v-4b126b34] {
.slot_row[data-v-e86c3783] {
padding: 2px;
}
/* Original N-Sidebar styles */
._sb_dot[data-v-4b126b34] {
._sb_dot[data-v-e86c3783] {
width: 8px;
height: 8px;
border-radius: 50%;
background-color: grey;
}
.node_header[data-v-4b126b34] {
.node_header[data-v-e86c3783] {
line-height: 1;
padding: 8px 13px 7px;
margin-bottom: 5px;
@ -868,37 +875,37 @@
display: flex;
align-items: center;
}
.headdot[data-v-4b126b34] {
.headdot[data-v-e86c3783] {
width: 10px;
height: 10px;
float: inline-start;
margin-right: 8px;
}
.IMAGE[data-v-4b126b34] {
.IMAGE[data-v-e86c3783] {
background-color: #64b5f6;
}
.VAE[data-v-4b126b34] {
.VAE[data-v-e86c3783] {
background-color: #ff6e6e;
}
.LATENT[data-v-4b126b34] {
.LATENT[data-v-e86c3783] {
background-color: #ff9cf9;
}
.MASK[data-v-4b126b34] {
.MASK[data-v-e86c3783] {
background-color: #81c784;
}
.CONDITIONING[data-v-4b126b34] {
.CONDITIONING[data-v-e86c3783] {
background-color: #ffa931;
}
.CLIP[data-v-4b126b34] {
.CLIP[data-v-e86c3783] {
background-color: #ffd500;
}
.MODEL[data-v-4b126b34] {
.MODEL[data-v-e86c3783] {
background-color: #b39ddb;
}
.CONTROL_NET[data-v-4b126b34] {
.CONTROL_NET[data-v-e86c3783] {
background-color: #a5d6a7;
}
._sb_node_preview[data-v-4b126b34] {
._sb_node_preview[data-v-e86c3783] {
background-color: var(--comfy-menu-bg);
font-family: 'Open Sans', sans-serif;
font-size: small;
@ -915,7 +922,7 @@
font-size: 12px;
padding-bottom: 10px;
}
._sb_node_preview ._sb_description[data-v-4b126b34] {
._sb_node_preview ._sb_description[data-v-e86c3783] {
margin: 10px;
padding: 6px;
background: var(--border-color);
@ -925,7 +932,7 @@
font-size: 0.9rem;
word-break: break-word;
}
._sb_table[data-v-4b126b34] {
._sb_table[data-v-e86c3783] {
display: grid;
grid-column-gap: 10px;
@ -933,7 +940,7 @@
width: 100%;
/* Imposta la larghezza della tabella al 100% del contenitore */
}
._sb_row[data-v-4b126b34] {
._sb_row[data-v-e86c3783] {
display: grid;
grid-template-columns: 10px 1fr 1fr 1fr 10px;
grid-column-gap: 10px;
@ -941,10 +948,10 @@
padding-left: 9px;
padding-right: 9px;
}
._sb_row_string[data-v-4b126b34] {
._sb_row_string[data-v-e86c3783] {
grid-template-columns: 10px 1fr 1fr 10fr 1fr;
}
._sb_col[data-v-4b126b34] {
._sb_col[data-v-e86c3783] {
border: 0px solid #000;
display: flex;
align-items: flex-end;
@ -953,10 +960,10 @@
align-content: flex-start;
justify-content: flex-end;
}
._sb_inherit[data-v-4b126b34] {
._sb_inherit[data-v-e86c3783] {
display: inherit;
}
._long_field[data-v-4b126b34] {
._long_field[data-v-e86c3783] {
background: var(--bg-color);
border: 2px solid var(--border-color);
margin: 5px 5px 0 5px;
@ -964,45 +971,45 @@
line-height: 1.7;
text-wrap: nowrap;
}
._sb_arrow[data-v-4b126b34] {
._sb_arrow[data-v-e86c3783] {
color: var(--fg-color);
}
._sb_preview_badge[data-v-4b126b34] {
._sb_preview_badge[data-v-e86c3783] {
text-align: center;
background: var(--comfy-input-bg);
font-weight: bold;
color: var(--error-text);
}
.node-lib-node-container[data-v-90dfee08] {
.node-lib-node-container[data-v-20bd95eb] {
height: 100%;
width: 100%
}
.p-selectbutton .p-button[data-v-91077f2a] {
.p-selectbutton .p-button[data-v-29268946] {
padding: 0.5rem;
}
.p-selectbutton .p-button .pi[data-v-91077f2a] {
.p-selectbutton .p-button .pi[data-v-29268946] {
font-size: 1.5rem;
}
.field[data-v-91077f2a] {
.field[data-v-29268946] {
display: flex;
flex-direction: column;
gap: 0.5rem;
}
.color-picker-container[data-v-91077f2a] {
.color-picker-container[data-v-29268946] {
display: flex;
align-items: center;
gap: 0.5rem;
}
._content[data-v-e7b35fd9] {
._content[data-v-2fc57c5b] {
display: flex;
flex-direction: column
}
._content[data-v-e7b35fd9] > :not([hidden]) ~ :not([hidden]) {
._content[data-v-2fc57c5b] > :not([hidden]) ~ :not([hidden]) {
--tw-space-y-reverse: 0;
@ -1010,7 +1017,7 @@
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse))
}
._footer[data-v-e7b35fd9] {
._footer[data-v-2fc57c5b] {
display: flex;
@ -1021,10 +1028,10 @@
padding-top: 1rem
}
.comfy-image-wrap[data-v-9bc23daf] {
.comfy-image-wrap[data-v-ffe66146] {
display: contents;
}
.comfy-image-blur[data-v-9bc23daf] {
.comfy-image-blur[data-v-ffe66146] {
position: absolute;
top: 0;
left: 0;
@ -1033,7 +1040,7 @@
-o-object-fit: cover;
object-fit: cover;
}
.comfy-image-main[data-v-9bc23daf] {
.comfy-image-main[data-v-ffe66146] {
width: 100%;
height: 100%;
-o-object-fit: cover;
@ -1042,19 +1049,19 @@
object-position: center;
z-index: 1;
}
.contain .comfy-image-wrap[data-v-9bc23daf] {
.contain .comfy-image-wrap[data-v-ffe66146] {
position: relative;
width: 100%;
height: 100%;
}
.contain .comfy-image-main[data-v-9bc23daf] {
.contain .comfy-image-main[data-v-ffe66146] {
-o-object-fit: contain;
object-fit: contain;
-webkit-backdrop-filter: blur(10px);
backdrop-filter: blur(10px);
position: absolute;
}
.broken-image-placeholder[data-v-9bc23daf] {
.broken-image-placeholder[data-v-ffe66146] {
display: flex;
flex-direction: column;
align-items: center;
@ -1063,7 +1070,7 @@
height: 100%;
margin: 2rem;
}
.broken-image-placeholder i[data-v-9bc23daf] {
.broken-image-placeholder i[data-v-ffe66146] {
font-size: 3rem;
margin-bottom: 0.5rem;
}
@ -1164,17 +1171,17 @@ img.galleria-image {
z-index: 1;
}
.scroll-container[data-v-93f5af09] {
.scroll-container[data-v-375f3c50] {
height: 100%;
overflow-y: auto;
}
.scroll-container[data-v-93f5af09]::-webkit-scrollbar {
.scroll-container[data-v-375f3c50]::-webkit-scrollbar {
width: 1px;
}
.scroll-container[data-v-93f5af09]::-webkit-scrollbar-thumb {
.scroll-container[data-v-375f3c50]::-webkit-scrollbar-thumb {
background-color: transparent;
}
.queue-grid[data-v-93f5af09] {
.queue-grid[data-v-375f3c50] {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
padding: 0.5rem;
@ -2016,12 +2023,22 @@ img.galleria-image {
.z-\[1000\]{
z-index: 1000;
}
.col-start-1{
grid-column-start: 1;
}
.row-start-1{
grid-row-start: 1;
}
.m-0{
margin: 0px;
}
.m-2{
margin: 0.5rem;
}
.mx-0{
margin-left: 0px;
margin-right: 0px;
}
.mx-1{
margin-left: 0.25rem;
margin-right: 0.25rem;
@ -2034,6 +2051,10 @@ img.galleria-image {
margin-left: 1.5rem;
margin-right: 1.5rem;
}
.mx-auto{
margin-left: auto;
margin-right: auto;
}
.my-0{
margin-top: 0px;
margin-bottom: 0px;
@ -2042,6 +2063,14 @@ img.galleria-image {
margin-top: 0.25rem;
margin-bottom: 0.25rem;
}
.my-2{
margin-top: 0.5rem;
margin-bottom: 0.5rem;
}
.my-2\.5{
margin-top: 0.625rem;
margin-bottom: 0.625rem;
}
.my-4{
margin-top: 1rem;
margin-bottom: 1rem;
@ -2058,6 +2087,9 @@ img.galleria-image {
.mb-6{
margin-bottom: 1.5rem;
}
.mb-7{
margin-bottom: 1.75rem;
}
.ml-2{
margin-left: 0.5rem;
}
@ -2082,9 +2114,15 @@ img.galleria-image {
.mt-2{
margin-top: 0.5rem;
}
.mt-24{
margin-top: 6rem;
}
.mt-4{
margin-top: 1rem;
}
.mt-5{
margin-top: 1.25rem;
}
.mt-\[5vh\]{
margin-top: 5vh;
}
@ -2173,6 +2211,12 @@ img.galleria-image {
.w-full{
width: 100%;
}
.w-screen{
width: 100vw;
}
.min-w-84{
min-width: 22rem;
}
.min-w-96{
min-width: 26rem;
}
@ -2185,6 +2229,9 @@ img.galleria-image {
.max-w-full{
max-width: 100%;
}
.max-w-screen-sm{
max-width: 640px;
}
.flex-1{
flex: 1 1 0%;
}
@ -2197,6 +2244,9 @@ img.galleria-image {
.flex-grow{
flex-grow: 1;
}
.flex-grow-0{
flex-grow: 0;
}
.grow{
flex-grow: 1;
}
@ -2225,6 +2275,9 @@ img.galleria-image {
.resize{
resize: both;
}
.list-inside{
list-style-position: inside;
}
.list-disc{
list-style-type: disc;
}
@ -2234,6 +2287,9 @@ img.galleria-image {
.flex-row{
flex-direction: row;
}
.flex-row-reverse{
flex-direction: row-reverse;
}
.flex-col{
flex-direction: column;
}
@ -2243,6 +2299,12 @@ img.galleria-image {
.flex-nowrap{
flex-wrap: nowrap;
}
.place-content-center{
place-content: center;
}
.place-items-center{
place-items: center;
}
.content-around{
align-content: space-around;
}
@ -2289,6 +2351,11 @@ img.galleria-image {
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse));
}
.space-y-4 > :not([hidden]) ~ :not([hidden]){
--tw-space-y-reverse: 0;
margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(1rem * var(--tw-space-y-reverse));
}
.justify-self-end{
justify-self: end;
}
@ -2343,6 +2410,9 @@ img.galleria-image {
.border-none{
border-style: none;
}
.bg-\[var\(--comfy-menu-bg\)\]{
background-color: var(--comfy-menu-bg);
}
.bg-\[var\(--p-tree-background\)\]{
background-color: var(--p-tree-background);
}
@ -2358,6 +2428,10 @@ img.galleria-image {
--tw-bg-opacity: 1;
background-color: rgb(150 206 76 / var(--tw-bg-opacity));
}
.bg-neutral-300{
--tw-bg-opacity: 1;
background-color: rgb(212 212 212 / var(--tw-bg-opacity));
}
.bg-neutral-800{
--tw-bg-opacity: 1;
background-color: rgb(38 38 38 / var(--tw-bg-opacity));
@ -2380,6 +2454,18 @@ img.galleria-image {
.bg-opacity-50{
--tw-bg-opacity: 0.5;
}
.bg-\[url\(\'\/assets\/images\/Git-Logo-White\.svg\'\)\]{
background-image: url('../assets/images/Git-Logo-White.svg');
}
.bg-right-top{
background-position: right top;
}
.bg-no-repeat{
background-repeat: no-repeat;
}
.bg-origin-padding{
background-origin: padding-box;
}
.object-cover{
-o-object-fit: cover;
object-fit: cover;
@ -2399,6 +2485,9 @@ img.galleria-image {
.p-4{
padding: 1rem;
}
.p-5{
padding: 1.25rem;
}
.p-8{
padding: 2rem;
}
@ -2406,6 +2495,10 @@ img.galleria-image {
padding-left: 0px;
padding-right: 0px;
}
.px-10{
padding-left: 2.5rem;
padding-right: 2.5rem;
}
.px-2{
padding-left: 0.5rem;
padding-right: 0.5rem;
@ -2425,6 +2518,9 @@ img.galleria-image {
.pb-0{
padding-bottom: 0px;
}
.pl-4{
padding-left: 1rem;
}
.pl-6{
padding-left: 1.5rem;
}
@ -2455,6 +2551,9 @@ img.galleria-image {
.text-2xl{
font-size: 1.5rem;
}
.text-4xl{
font-size: 2.25rem;
}
.text-lg{
font-size: 1.125rem;
}
@ -2476,6 +2575,9 @@ img.galleria-image {
.font-medium{
font-weight: 500;
}
.font-normal{
font-weight: 400;
}
.font-semibold{
font-weight: 600;
}
@ -2493,6 +2595,10 @@ img.galleria-image {
--tw-text-opacity: 1;
color: rgb(203 213 224 / var(--tw-text-opacity));
}
.text-green-500{
--tw-text-opacity: 1;
color: rgb(150 206 76 / var(--tw-text-opacity));
}
.text-highlight{
color: var(--p-primary-color);
}
@ -2515,12 +2621,29 @@ img.galleria-image {
--tw-text-opacity: 1;
color: rgb(163 163 163 / var(--tw-text-opacity));
}
.text-neutral-800{
--tw-text-opacity: 1;
color: rgb(38 38 38 / var(--tw-text-opacity));
}
.text-neutral-900{
--tw-text-opacity: 1;
color: rgb(23 23 23 / var(--tw-text-opacity));
}
.text-red-500{
--tw-text-opacity: 1;
color: rgb(239 68 68 / var(--tw-text-opacity));
}
.no-underline{
text-decoration-line: none;
}
.opacity-0{
opacity: 0;
}
.shadow-lg{
--tw-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1);
--tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color);
box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow);
}
.outline{
outline-style: solid;
}
@ -2580,6 +2703,7 @@ img.galleria-image {
--fg-color: #000;
--bg-color: #fff;
--comfy-menu-bg: #353535;
--comfy-menu-secondary-bg: #292929;
--comfy-input-bg: #222;
--input-text: #ddd;
--descrip-text: #999;
@ -2750,8 +2874,8 @@ body {
}
.comfy-modal select,
.comfy-modal input[type=button],
.comfy-modal input[type=checkbox] {
.comfy-modal input[type='button'],
.comfy-modal input[type='checkbox'] {
margin: 3px 3px 3px 4px;
}
@ -2865,8 +2989,8 @@ span.drag-handle {
padding: 3px 4px;
cursor: move;
vertical-align: middle;
margin-top: -.4em;
margin-left: -.2em;
margin-top: -0.4em;
margin-left: -0.2em;
font-size: 12px;
font-family: sans-serif;
letter-spacing: 2px;
@ -2933,11 +3057,11 @@ button.comfy-queue-btn {
z-index: 99;
}
.comfy-modal.comfy-settings input[type="range"] {
.comfy-modal.comfy-settings input[type='range'] {
vertical-align: middle;
}
.comfy-modal.comfy-settings input[type="range"] + input[type="number"] {
.comfy-modal.comfy-settings input[type='range'] + input[type='number'] {
width: 3.5em;
}
@ -2968,7 +3092,7 @@ button.comfy-queue-btn {
.comfy-menu span.drag-handle {
display: none;
}
.comfy-menu-queue-size {
flex: unset;
}
@ -3002,7 +3126,9 @@ button.comfy-queue-btn {
padding-right: 8px;
}
.graphdialog input, .graphdialog textarea, .graphdialog select {
.graphdialog input,
.graphdialog textarea,
.graphdialog select {
background-color: var(--comfy-input-bg);
border: 2px solid;
border-color: var(--border-color);
@ -3064,7 +3190,8 @@ dialog::backdrop {
text-align: right;
}
#comfy-settings-dialog tbody button, #comfy-settings-dialog table > button {
#comfy-settings-dialog tbody button,
#comfy-settings-dialog table > button {
background-color: var(--bg-color);
border: 1px var(--border-color) solid;
border-radius: 0;
@ -3145,7 +3272,7 @@ dialog::backdrop {
}
.litemenu-entry.has_submenu::after {
content: ">";
content: '>';
position: absolute;
top: 0;
right: 2px;
@ -3159,7 +3286,8 @@ dialog::backdrop {
will-change: transform;
}
.litegraph.litecontextmenu .litemenu-entry:hover:not(.disabled):not(.separator) {
.litegraph.litecontextmenu
.litemenu-entry:hover:not(.disabled):not(.separator) {
background-color: var(--comfy-menu-bg) !important;
filter: brightness(155%);
will-change: transform;
@ -3288,6 +3416,17 @@ audio.comfy-audio.empty-audio-widget {
opacity: 1;
}
@media (min-width: 768px){
.md\:flex{
display: flex;
}
.md\:hidden{
display: none;
}
}
@media (min-width: 1536px){
.\32xl\:mx-4{

File diff suppressed because it is too large Load Diff

1
web/assets/index-BNMdgttb.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

1
web/assets/index-Ba7IybyO.js.map generated vendored

File diff suppressed because one or more lines are too long

106
web/assets/index-BppSBmxJ.js generated vendored
View File

@ -1,106 +0,0 @@
const IPC_CHANNELS = {
LOADING_PROGRESS: "loading-progress",
IS_PACKAGED: "is-packaged",
RENDERER_READY: "renderer-ready",
RESTART_APP: "restart-app",
REINSTALL: "reinstall",
LOG_MESSAGE: "log-message",
OPEN_DIALOG: "open-dialog",
DOWNLOAD_PROGRESS: "download-progress",
START_DOWNLOAD: "start-download",
PAUSE_DOWNLOAD: "pause-download",
RESUME_DOWNLOAD: "resume-download",
CANCEL_DOWNLOAD: "cancel-download",
DELETE_MODEL: "delete-model",
GET_ALL_DOWNLOADS: "get-all-downloads",
GET_ELECTRON_VERSION: "get-electron-version",
SEND_ERROR_TO_SENTRY: "send-error-to-sentry",
GET_BASE_PATH: "get-base-path",
GET_MODEL_CONFIG_PATH: "get-model-config-path",
OPEN_PATH: "open-path",
OPEN_LOGS_PATH: "open-logs-path",
OPEN_DEV_TOOLS: "open-dev-tools",
TERMINAL_WRITE: "execute-terminal-command",
TERMINAL_RESIZE: "resize-terminal",
TERMINAL_RESTORE: "restore-terminal",
TERMINAL_ON_OUTPUT: "terminal-output",
IS_FIRST_TIME_SETUP: "is-first-time-setup",
GET_SYSTEM_PATHS: "get-system-paths",
VALIDATE_INSTALL_PATH: "validate-install-path",
VALIDATE_COMFYUI_SOURCE: "validate-comfyui-source",
SHOW_DIRECTORY_PICKER: "show-directory-picker",
INSTALL_COMFYUI: "install-comfyui"
};
var ProgressStatus = /* @__PURE__ */ ((ProgressStatus2) => {
ProgressStatus2["INITIAL_STATE"] = "initial-state";
ProgressStatus2["PYTHON_SETUP"] = "python-setup";
ProgressStatus2["STARTING_SERVER"] = "starting-server";
ProgressStatus2["READY"] = "ready";
ProgressStatus2["ERROR"] = "error";
return ProgressStatus2;
})(ProgressStatus || {});
const ProgressMessages = {
[
"initial-state"
/* INITIAL_STATE */
]: "Loading...",
[
"python-setup"
/* PYTHON_SETUP */
]: "Setting up Python Environment...",
[
"starting-server"
/* STARTING_SERVER */
]: "Starting ComfyUI server...",
[
"ready"
/* READY */
]: "Finishing...",
[
"error"
/* ERROR */
]: "Was not able to start ComfyUI. Please check the logs for more details. You can open it from the Help menu. Please report issues to: https://forum.comfy.org"
};
const ELECTRON_BRIDGE_API = "electronAPI";
const SENTRY_URL_ENDPOINT = "https://942cadba58d247c9cab96f45221aa813@o4507954455314432.ingest.us.sentry.io/4508007940685824";
const MigrationItems = [
{
id: "user_files",
label: "User Files",
description: "Settings and user-created workflows"
},
{
id: "models",
label: "Models",
description: "Reference model files from existing ComfyUI installations. (No copy)"
}
// TODO: Decide whether we want to auto-migrate custom nodes, and install their dependencies.
// huchenlei: This is a very essential thing for migration experience.
// {
// id: 'custom_nodes',
// label: 'Custom Nodes',
// description: 'Reference custom node files from existing ComfyUI installations. (No copy)',
// },
];
const DEFAULT_SERVER_ARGS = {
/** The host to use for the ComfyUI server. */
host: "127.0.0.1",
/** The port to use for the ComfyUI server. */
port: 8e3,
// Extra arguments to pass to the ComfyUI server.
extraServerArgs: {}
};
var DownloadStatus = /* @__PURE__ */ ((DownloadStatus2) => {
DownloadStatus2["PENDING"] = "pending";
DownloadStatus2["IN_PROGRESS"] = "in_progress";
DownloadStatus2["COMPLETED"] = "completed";
DownloadStatus2["PAUSED"] = "paused";
DownloadStatus2["ERROR"] = "error";
DownloadStatus2["CANCELLED"] = "cancelled";
return DownloadStatus2;
})(DownloadStatus || {});
export {
MigrationItems as M,
ProgressStatus as P
};
//# sourceMappingURL=index-BppSBmxJ.js.map

Some files were not shown because too many files have changed in this diff Show More