mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-10 18:05:16 +00:00
Fix and enforce no trailing whitespace.
This commit is contained in:
parent
a90aafafc1
commit
b7572b2f87
@ -28,7 +28,7 @@ class TerminalService:
|
|||||||
|
|
||||||
if columns != self.cols:
|
if columns != self.cols:
|
||||||
self.cols = columns
|
self.cols = columns
|
||||||
changed = True
|
changed = True
|
||||||
|
|
||||||
if lines != self.rows:
|
if lines != self.rows:
|
||||||
self.rows = lines
|
self.rows = lines
|
||||||
|
@ -10,7 +10,7 @@ class CustomNodeManager:
|
|||||||
Placeholder to refactor the custom node management features from ComfyUI-Manager.
|
Placeholder to refactor the custom node management features from ComfyUI-Manager.
|
||||||
Currently it only contains the custom workflow templates feature.
|
Currently it only contains the custom workflow templates feature.
|
||||||
"""
|
"""
|
||||||
def add_routes(self, routes, webapp, loadedModules):
|
def add_routes(self, routes, webapp, loadedModules):
|
||||||
|
|
||||||
@routes.get("/workflow_templates")
|
@routes.get("/workflow_templates")
|
||||||
async def get_workflow_templates(request):
|
async def get_workflow_templates(request):
|
||||||
|
@ -226,7 +226,7 @@ def model_wrapper(
|
|||||||
The input `model` has the following format:
|
The input `model` has the following format:
|
||||||
``
|
``
|
||||||
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
||||||
``
|
``
|
||||||
|
|
||||||
The input `classifier_fn` has the following format:
|
The input `classifier_fn` has the following format:
|
||||||
``
|
``
|
||||||
@ -240,7 +240,7 @@ def model_wrapper(
|
|||||||
The input `model` has the following format:
|
The input `model` has the following format:
|
||||||
``
|
``
|
||||||
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
|
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
|
||||||
``
|
``
|
||||||
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
|
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
|
||||||
|
|
||||||
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
|
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
|
||||||
@ -254,7 +254,7 @@ def model_wrapper(
|
|||||||
``
|
``
|
||||||
def model_fn(x, t_continuous) -> noise:
|
def model_fn(x, t_continuous) -> noise:
|
||||||
t_input = get_model_input_time(t_continuous)
|
t_input = get_model_input_time(t_continuous)
|
||||||
return noise_pred(model, x, t_input, **model_kwargs)
|
return noise_pred(model, x, t_input, **model_kwargs)
|
||||||
``
|
``
|
||||||
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
|
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ class UniPC:
|
|||||||
max_val=1.,
|
max_val=1.,
|
||||||
variant='bh1',
|
variant='bh1',
|
||||||
):
|
):
|
||||||
"""Construct a UniPC.
|
"""Construct a UniPC.
|
||||||
|
|
||||||
We support both data_prediction and noise_prediction.
|
We support both data_prediction and noise_prediction.
|
||||||
"""
|
"""
|
||||||
@ -372,7 +372,7 @@ class UniPC:
|
|||||||
|
|
||||||
def dynamic_thresholding_fn(self, x0, t=None):
|
def dynamic_thresholding_fn(self, x0, t=None):
|
||||||
"""
|
"""
|
||||||
The dynamic thresholding method.
|
The dynamic thresholding method.
|
||||||
"""
|
"""
|
||||||
dims = x0.dim()
|
dims = x0.dim()
|
||||||
p = self.dynamic_thresholding_ratio
|
p = self.dynamic_thresholding_ratio
|
||||||
@ -404,7 +404,7 @@ class UniPC:
|
|||||||
|
|
||||||
def model_fn(self, x, t):
|
def model_fn(self, x, t):
|
||||||
"""
|
"""
|
||||||
Convert the model to the noise prediction model or the data prediction model.
|
Convert the model to the noise prediction model or the data prediction model.
|
||||||
"""
|
"""
|
||||||
if self.predict_x0:
|
if self.predict_x0:
|
||||||
return self.data_prediction_fn(x, t)
|
return self.data_prediction_fn(x, t)
|
||||||
@ -461,7 +461,7 @@ class UniPC:
|
|||||||
|
|
||||||
def denoise_to_zero_fn(self, x, s):
|
def denoise_to_zero_fn(self, x, s):
|
||||||
"""
|
"""
|
||||||
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
|
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
|
||||||
"""
|
"""
|
||||||
return self.data_prediction_fn(x, s)
|
return self.data_prediction_fn(x, s)
|
||||||
|
|
||||||
@ -510,7 +510,7 @@ class UniPC:
|
|||||||
col = torch.ones_like(rks)
|
col = torch.ones_like(rks)
|
||||||
for k in range(1, K + 1):
|
for k in range(1, K + 1):
|
||||||
C.append(col)
|
C.append(col)
|
||||||
col = col * rks / (k + 1)
|
col = col * rks / (k + 1)
|
||||||
C = torch.stack(C, dim=1)
|
C = torch.stack(C, dim=1)
|
||||||
|
|
||||||
if len(D1s) > 0:
|
if len(D1s) > 0:
|
||||||
@ -626,7 +626,7 @@ class UniPC:
|
|||||||
R.append(torch.pow(rks, i - 1))
|
R.append(torch.pow(rks, i - 1))
|
||||||
b.append(h_phi_k * factorial_i / B_h)
|
b.append(h_phi_k * factorial_i / B_h)
|
||||||
factorial_i *= (i + 1)
|
factorial_i *= (i + 1)
|
||||||
h_phi_k = h_phi_k / hh - 1 / factorial_i
|
h_phi_k = h_phi_k / hh - 1 / factorial_i
|
||||||
|
|
||||||
R = torch.stack(R)
|
R = torch.stack(R)
|
||||||
b = torch.tensor(b, device=x.device)
|
b = torch.tensor(b, device=x.device)
|
||||||
|
@ -138,7 +138,7 @@ class StageB(nn.Module):
|
|||||||
# nn.init.normal_(self.pixels_mapper[2].weight, std=0.02) # conditionings
|
# nn.init.normal_(self.pixels_mapper[2].weight, std=0.02) # conditionings
|
||||||
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs
|
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs
|
||||||
# nn.init.constant_(self.clf[1].weight, 0) # outputs
|
# nn.init.constant_(self.clf[1].weight, 0) # outputs
|
||||||
#
|
#
|
||||||
# # blocks
|
# # blocks
|
||||||
# for level_block in self.down_blocks + self.up_blocks:
|
# for level_block in self.down_blocks + self.up_blocks:
|
||||||
# for block in level_block:
|
# for block in level_block:
|
||||||
@ -148,7 +148,7 @@ class StageB(nn.Module):
|
|||||||
# for layer in block.modules():
|
# for layer in block.modules():
|
||||||
# if isinstance(layer, nn.Linear):
|
# if isinstance(layer, nn.Linear):
|
||||||
# nn.init.constant_(layer.weight, 0)
|
# nn.init.constant_(layer.weight, 0)
|
||||||
#
|
#
|
||||||
# def _init_weights(self, m):
|
# def _init_weights(self, m):
|
||||||
# if isinstance(m, (nn.Conv2d, nn.Linear)):
|
# if isinstance(m, (nn.Conv2d, nn.Linear)):
|
||||||
# torch.nn.init.xavier_uniform_(m.weight)
|
# torch.nn.init.xavier_uniform_(m.weight)
|
||||||
|
@ -142,7 +142,7 @@ class StageC(nn.Module):
|
|||||||
# nn.init.normal_(self.clip_img_mapper.weight, std=0.02) # conditionings
|
# nn.init.normal_(self.clip_img_mapper.weight, std=0.02) # conditionings
|
||||||
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs
|
# torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs
|
||||||
# nn.init.constant_(self.clf[1].weight, 0) # outputs
|
# nn.init.constant_(self.clf[1].weight, 0) # outputs
|
||||||
#
|
#
|
||||||
# # blocks
|
# # blocks
|
||||||
# for level_block in self.down_blocks + self.up_blocks:
|
# for level_block in self.down_blocks + self.up_blocks:
|
||||||
# for block in level_block:
|
# for block in level_block:
|
||||||
@ -152,7 +152,7 @@ class StageC(nn.Module):
|
|||||||
# for layer in block.modules():
|
# for layer in block.modules():
|
||||||
# if isinstance(layer, nn.Linear):
|
# if isinstance(layer, nn.Linear):
|
||||||
# nn.init.constant_(layer.weight, 0)
|
# nn.init.constant_(layer.weight, 0)
|
||||||
#
|
#
|
||||||
# def _init_weights(self, m):
|
# def _init_weights(self, m):
|
||||||
# if isinstance(m, (nn.Conv2d, nn.Linear)):
|
# if isinstance(m, (nn.Conv2d, nn.Linear)):
|
||||||
# torch.nn.init.xavier_uniform_(m.weight)
|
# torch.nn.init.xavier_uniform_(m.weight)
|
||||||
|
@ -168,7 +168,7 @@ class Flux(nn.Module):
|
|||||||
out = blocks_replace[("single_block", i)]({"img": img,
|
out = blocks_replace[("single_block", i)]({"img": img,
|
||||||
"vec": vec,
|
"vec": vec,
|
||||||
"pe": pe,
|
"pe": pe,
|
||||||
"attn_mask": attn_mask},
|
"attn_mask": attn_mask},
|
||||||
{"original_block": block_wrap})
|
{"original_block": block_wrap})
|
||||||
img = out["img"]
|
img = out["img"]
|
||||||
else:
|
else:
|
||||||
|
@ -159,7 +159,7 @@ class CrossAttention(nn.Module):
|
|||||||
|
|
||||||
q = q.transpose(-2, -3).contiguous() # q -> B, L1, H, C - B, H, L1, C
|
q = q.transpose(-2, -3).contiguous() # q -> B, L1, H, C - B, H, L1, C
|
||||||
k = k.transpose(-2, -3).contiguous() # k -> B, L2, H, C - B, H, C, L2
|
k = k.transpose(-2, -3).contiguous() # k -> B, L2, H, C - B, H, C, L2
|
||||||
v = v.transpose(-2, -3).contiguous()
|
v = v.transpose(-2, -3).contiguous()
|
||||||
|
|
||||||
context = optimized_attention(q, k, v, self.num_heads, skip_reshape=True, attn_precision=self.attn_precision)
|
context = optimized_attention(q, k, v, self.num_heads, skip_reshape=True, attn_precision=self.attn_precision)
|
||||||
|
|
||||||
|
@ -787,7 +787,7 @@ class Flux(BaseModel):
|
|||||||
cross_attn = kwargs.get("cross_attn", None)
|
cross_attn = kwargs.get("cross_attn", None)
|
||||||
if cross_attn is not None:
|
if cross_attn is not None:
|
||||||
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
||||||
# upscale the attention mask, since now we
|
# upscale the attention mask, since now we
|
||||||
attention_mask = kwargs.get("attention_mask", None)
|
attention_mask = kwargs.get("attention_mask", None)
|
||||||
if attention_mask is not None:
|
if attention_mask is not None:
|
||||||
shape = kwargs["noise"].shape
|
shape = kwargs["noise"].shape
|
||||||
|
@ -576,7 +576,7 @@ def unet_config_from_diffusers_unet(state_dict, dtype=None):
|
|||||||
'dtype': dtype, 'in_channels': 9, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0],
|
'dtype': dtype, 'in_channels': 9, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0],
|
||||||
'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8,
|
'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8,
|
||||||
'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
|
'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||||
'use_temporal_attention': False, 'use_temporal_resblock': False}
|
'use_temporal_attention': False, 'use_temporal_resblock': False}
|
||||||
|
|
||||||
|
|
||||||
supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega, KOALA_700M, KOALA_1B, SD09_XS, SD_XS, SDXL_diffusers_ip2p, SD15_diffusers_inpaint]
|
supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega, KOALA_700M, KOALA_1B, SD09_XS, SD_XS, SDXL_diffusers_ip2p, SD15_diffusers_inpaint]
|
||||||
|
@ -727,7 +727,7 @@ def bislerp(samples, width, height):
|
|||||||
res *= (b1_norms * (1.0-r) + b2_norms * r).expand(-1,c)
|
res *= (b1_norms * (1.0-r) + b2_norms * r).expand(-1,c)
|
||||||
|
|
||||||
#edge cases for same or polar opposites
|
#edge cases for same or polar opposites
|
||||||
res[dot > 1 - 1e-5] = b1[dot > 1 - 1e-5]
|
res[dot > 1 - 1e-5] = b1[dot > 1 - 1e-5]
|
||||||
res[dot < 1e-5 - 1] = (b1 * (1.0-r) + b2 * r)[dot < 1e-5 - 1]
|
res[dot < 1e-5 - 1] = (b1 * (1.0-r) + b2 * r)[dot < 1e-5 - 1]
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ NOISE_LEVELS = {
|
|||||||
[14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
[14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.89115214, 0.72133851, 0.59516323, 0.4783645, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
|
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.89115214, 0.72133851, 0.59516323, 0.4783645, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
|
||||||
],
|
],
|
||||||
1.15: [
|
1.15: [
|
||||||
[14.61464119, 0.83188516, 0.02916753],
|
[14.61464119, 0.83188516, 0.02916753],
|
||||||
[14.61464119, 1.84880662, 0.59516323, 0.02916753],
|
[14.61464119, 1.84880662, 0.59516323, 0.02916753],
|
||||||
@ -246,7 +246,7 @@ NOISE_LEVELS = {
|
|||||||
[14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.83188516, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
[14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.83188516, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||||
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||||
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.4783645, 0.41087446, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.4783645, 0.41087446, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||||
],
|
],
|
||||||
1.35: [
|
1.35: [
|
||||||
[14.61464119, 0.69515091, 0.02916753],
|
[14.61464119, 0.69515091, 0.02916753],
|
||||||
[14.61464119, 0.95350921, 0.34370604, 0.02916753],
|
[14.61464119, 0.95350921, 0.34370604, 0.02916753],
|
||||||
|
@ -73,7 +73,7 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider):
|
|||||||
comfy.samplers.calc_cond_batch(self.inner_model, [positive_cond, negative_cond, empty_cond], x, timestep, model_options)
|
comfy.samplers.calc_cond_batch(self.inner_model, [positive_cond, negative_cond, empty_cond], x, timestep, model_options)
|
||||||
cfg_result = perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_empty, self.neg_scale, self.cfg)
|
cfg_result = perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_empty, self.neg_scale, self.cfg)
|
||||||
|
|
||||||
# normally this would be done in cfg_function, but we skipped
|
# normally this would be done in cfg_function, but we skipped
|
||||||
# that for efficiency: we can compute the noise predictions in
|
# that for efficiency: we can compute the noise predictions in
|
||||||
# a single call to calc_cond_batch() (rather than two)
|
# a single call to calc_cond_batch() (rather than two)
|
||||||
# so we replicate the hook here
|
# so we replicate the hook here
|
||||||
|
@ -62,7 +62,7 @@ class IsChangedCache:
|
|||||||
class CacheSet:
|
class CacheSet:
|
||||||
def __init__(self, lru_size=None):
|
def __init__(self, lru_size=None):
|
||||||
if lru_size is None or lru_size == 0:
|
if lru_size is None or lru_size == 0:
|
||||||
self.init_classic_cache()
|
self.init_classic_cache()
|
||||||
else:
|
else:
|
||||||
self.init_lru_cache(lru_size)
|
self.init_lru_cache(lru_size)
|
||||||
self.all = [self.outputs, self.ui, self.objects]
|
self.all = [self.outputs, self.ui, self.objects]
|
||||||
@ -168,7 +168,7 @@ def _map_node_over_list(obj, input_data_all, func, allow_interrupt=False, execut
|
|||||||
process_inputs(input_data_all, 0, input_is_list=input_is_list)
|
process_inputs(input_data_all, 0, input_is_list=input_is_list)
|
||||||
elif max_len_input == 0:
|
elif max_len_input == 0:
|
||||||
process_inputs({})
|
process_inputs({})
|
||||||
else:
|
else:
|
||||||
for i in range(max_len_input):
|
for i in range(max_len_input):
|
||||||
input_dict = slice_dict(input_data_all, i)
|
input_dict = slice_dict(input_data_all, i)
|
||||||
process_inputs(input_dict, i)
|
process_inputs(input_dict, i)
|
||||||
@ -232,7 +232,7 @@ def get_output_data(obj, input_data_all, execution_block_cb=None, pre_execute_cb
|
|||||||
output = merge_result_data(results, obj)
|
output = merge_result_data(results, obj)
|
||||||
else:
|
else:
|
||||||
output = []
|
output = []
|
||||||
ui = dict()
|
ui = dict()
|
||||||
if len(uis) > 0:
|
if len(uis) > 0:
|
||||||
ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()}
|
ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()}
|
||||||
return output, ui, has_subgraph
|
return output, ui, has_subgraph
|
||||||
|
16
nodes.py
16
nodes.py
@ -51,7 +51,7 @@ class CLIPTextEncode(ComfyNodeABC):
|
|||||||
def INPUT_TYPES(s) -> InputTypeDict:
|
def INPUT_TYPES(s) -> InputTypeDict:
|
||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"text": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}),
|
"text": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}),
|
||||||
"clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."})
|
"clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -269,8 +269,8 @@ class VAEDecode:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"samples": ("LATENT", {"tooltip": "The latent to be decoded."}),
|
"samples": ("LATENT", {"tooltip": "The latent to be decoded."}),
|
||||||
"vae": ("VAE", {"tooltip": "The VAE model used for decoding the latent."})
|
"vae": ("VAE", {"tooltip": "The VAE model used for decoding the latent."})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -550,13 +550,13 @@ class CheckpointLoaderSimple:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}),
|
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
||||||
OUTPUT_TOOLTIPS = ("The model used for denoising latents.",
|
OUTPUT_TOOLTIPS = ("The model used for denoising latents.",
|
||||||
"The CLIP model used for encoding text prompts.",
|
"The CLIP model used for encoding text prompts.",
|
||||||
"The VAE model used for encoding and decoding images to and from latent space.")
|
"The VAE model used for encoding and decoding images to and from latent space.")
|
||||||
FUNCTION = "load_checkpoint"
|
FUNCTION = "load_checkpoint"
|
||||||
|
|
||||||
@ -633,7 +633,7 @@ class LoraLoader:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
|
"model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
|
||||||
"clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}),
|
"clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}),
|
||||||
"lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
|
"lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
|
||||||
@ -1162,7 +1162,7 @@ class EmptyLatentImage:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8, "tooltip": "The width of the latent images in pixels."}),
|
"width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8, "tooltip": "The width of the latent images in pixels."}),
|
||||||
"height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8, "tooltip": "The height of the latent images in pixels."}),
|
"height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8, "tooltip": "The height of the latent images in pixels."}),
|
||||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."})
|
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."})
|
||||||
|
@ -5,6 +5,7 @@ lint.ignore = ["ALL"]
|
|||||||
lint.select = [
|
lint.select = [
|
||||||
"S307", # suspicious-eval-usage
|
"S307", # suspicious-eval-usage
|
||||||
"T201", # print-usage
|
"T201", # print-usage
|
||||||
|
"W291",
|
||||||
"W292",
|
"W292",
|
||||||
"W293",
|
"W293",
|
||||||
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
|
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
|
||||||
|
@ -6,8 +6,8 @@ from folder_paths import filter_files_content_types
|
|||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def file_extensions():
|
def file_extensions():
|
||||||
return {
|
return {
|
||||||
'image': ['gif', 'heif', 'ico', 'jpeg', 'jpg', 'png', 'pnm', 'ppm', 'svg', 'tiff', 'webp', 'xbm', 'xpm'],
|
'image': ['gif', 'heif', 'ico', 'jpeg', 'jpg', 'png', 'pnm', 'ppm', 'svg', 'tiff', 'webp', 'xbm', 'xpm'],
|
||||||
'audio': ['aif', 'aifc', 'aiff', 'au', 'flac', 'm4a', 'mp2', 'mp3', 'ogg', 'snd', 'wav'],
|
'audio': ['aif', 'aifc', 'aiff', 'au', 'flac', 'm4a', 'mp2', 'mp3', 'ogg', 'snd', 'wav'],
|
||||||
'video': ['avi', 'm2v', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ogv', 'qt', 'webm', 'wmv']
|
'video': ['avi', 'm2v', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ogv', 'qt', 'webm', 'wmv']
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,8 +57,8 @@ def mock_yaml_safe_load(mock_yaml_content):
|
|||||||
def test_load_extra_model_paths_expands_userpath(
|
def test_load_extra_model_paths_expands_userpath(
|
||||||
mock_file,
|
mock_file,
|
||||||
monkeypatch,
|
monkeypatch,
|
||||||
mock_add_model_folder_path,
|
mock_add_model_folder_path,
|
||||||
mock_expanduser,
|
mock_expanduser,
|
||||||
mock_yaml_safe_load,
|
mock_yaml_safe_load,
|
||||||
mock_expanded_home
|
mock_expanded_home
|
||||||
):
|
):
|
||||||
@ -78,7 +78,7 @@ def test_load_extra_model_paths_expands_userpath(
|
|||||||
|
|
||||||
# Check if add_model_folder_path was called with the correct arguments
|
# Check if add_model_folder_path was called with the correct arguments
|
||||||
for actual_call, expected_call in zip(mock_add_model_folder_path.call_args_list, expected_calls):
|
for actual_call, expected_call in zip(mock_add_model_folder_path.call_args_list, expected_calls):
|
||||||
assert actual_call.args[0] == expected_call[0]
|
assert actual_call.args[0] == expected_call[0]
|
||||||
assert os.path.normpath(actual_call.args[1]) == os.path.normpath(expected_call[1]) # Normalize and check the path to check on multiple OS.
|
assert os.path.normpath(actual_call.args[1]) == os.path.normpath(expected_call[1]) # Normalize and check the path to check on multiple OS.
|
||||||
assert actual_call.args[2] == expected_call[2]
|
assert actual_call.args[2] == expected_call[2]
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ def test_load_extra_model_paths_expands_appdata(
|
|||||||
yaml_config_with_appdata,
|
yaml_config_with_appdata,
|
||||||
mock_yaml_content_appdata
|
mock_yaml_content_appdata
|
||||||
):
|
):
|
||||||
# Set the mock_file to return yaml with appdata as a variable
|
# Set the mock_file to return yaml with appdata as a variable
|
||||||
mock_file.return_value.read.return_value = yaml_config_with_appdata
|
mock_file.return_value.read.return_value = yaml_config_with_appdata
|
||||||
|
|
||||||
# Attach mocks
|
# Attach mocks
|
||||||
|
@ -32,7 +32,7 @@ class TestCompareImageMetrics:
|
|||||||
@fixture(scope="class")
|
@fixture(scope="class")
|
||||||
def test_file_names(self, args_pytest):
|
def test_file_names(self, args_pytest):
|
||||||
test_dir = args_pytest['test_dir']
|
test_dir = args_pytest['test_dir']
|
||||||
fnames = self.gather_file_basenames(test_dir)
|
fnames = self.gather_file_basenames(test_dir)
|
||||||
yield fnames
|
yield fnames
|
||||||
del fnames
|
del fnames
|
||||||
|
|
||||||
@ -84,7 +84,7 @@ class TestCompareImageMetrics:
|
|||||||
file_match = self.find_file_match(baseline_file_path, file_paths)
|
file_match = self.find_file_match(baseline_file_path, file_paths)
|
||||||
assert file_match is not None, f"Could not find a file in {args_pytest['test_dir']} with matching metadata to {baseline_file_path}"
|
assert file_match is not None, f"Could not find a file in {args_pytest['test_dir']} with matching metadata to {baseline_file_path}"
|
||||||
|
|
||||||
# For a baseline image file, finds the corresponding file name in test_dir and
|
# For a baseline image file, finds the corresponding file name in test_dir and
|
||||||
# compares the images using the metrics in METRICS
|
# compares the images using the metrics in METRICS
|
||||||
@pytest.mark.parametrize("metric", METRICS.keys())
|
@pytest.mark.parametrize("metric", METRICS.keys())
|
||||||
def test_pipeline_compare(
|
def test_pipeline_compare(
|
||||||
@ -181,7 +181,7 @@ class TestCompareImageMetrics:
|
|||||||
|
|
||||||
# Find file match
|
# Find file match
|
||||||
# Reorder test_file_names so that the file with matching name is first
|
# Reorder test_file_names so that the file with matching name is first
|
||||||
# This is an optimization because matching file names are more likely
|
# This is an optimization because matching file names are more likely
|
||||||
# to have matching metadata if they were generated with the same script
|
# to have matching metadata if they were generated with the same script
|
||||||
basename = os.path.basename(baseline_file)
|
basename = os.path.basename(baseline_file)
|
||||||
file_path_basenames = [os.path.basename(f) for f in file_paths]
|
file_path_basenames = [os.path.basename(f) for f in file_paths]
|
||||||
|
@ -40,8 +40,8 @@ class ComfyClient:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.test_name = ""
|
self.test_name = ""
|
||||||
|
|
||||||
def connect(self,
|
def connect(self,
|
||||||
listen:str = '127.0.0.1',
|
listen:str = '127.0.0.1',
|
||||||
port:Union[str,int] = 8188,
|
port:Union[str,int] = 8188,
|
||||||
client_id: str = str(uuid.uuid4())
|
client_id: str = str(uuid.uuid4())
|
||||||
):
|
):
|
||||||
@ -125,7 +125,7 @@ class TestExecution:
|
|||||||
def _server(self, args_pytest, request):
|
def _server(self, args_pytest, request):
|
||||||
# Start server
|
# Start server
|
||||||
pargs = [
|
pargs = [
|
||||||
'python','main.py',
|
'python','main.py',
|
||||||
'--output-directory', args_pytest["output_dir"],
|
'--output-directory', args_pytest["output_dir"],
|
||||||
'--listen', args_pytest["listen"],
|
'--listen', args_pytest["listen"],
|
||||||
'--port', str(args_pytest["port"]),
|
'--port', str(args_pytest["port"]),
|
||||||
|
@ -23,7 +23,7 @@ These tests generate and save images through a range of parameters
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
class ComfyGraph:
|
class ComfyGraph:
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
graph: dict,
|
graph: dict,
|
||||||
sampler_nodes: list[str],
|
sampler_nodes: list[str],
|
||||||
):
|
):
|
||||||
@ -59,8 +59,8 @@ class ComfyGraph:
|
|||||||
class ComfyClient:
|
class ComfyClient:
|
||||||
# From examples/websockets_api_example.py
|
# From examples/websockets_api_example.py
|
||||||
|
|
||||||
def connect(self,
|
def connect(self,
|
||||||
listen:str = '127.0.0.1',
|
listen:str = '127.0.0.1',
|
||||||
port:Union[str,int] = 8188,
|
port:Union[str,int] = 8188,
|
||||||
client_id: str = str(uuid.uuid4())
|
client_id: str = str(uuid.uuid4())
|
||||||
):
|
):
|
||||||
@ -152,7 +152,7 @@ class TestInference:
|
|||||||
def _server(self, args_pytest):
|
def _server(self, args_pytest):
|
||||||
# Start server
|
# Start server
|
||||||
p = subprocess.Popen([
|
p = subprocess.Popen([
|
||||||
'python','main.py',
|
'python','main.py',
|
||||||
'--output-directory', args_pytest["output_dir"],
|
'--output-directory', args_pytest["output_dir"],
|
||||||
'--listen', args_pytest["listen"],
|
'--listen', args_pytest["listen"],
|
||||||
'--port', str(args_pytest["port"]),
|
'--port', str(args_pytest["port"]),
|
||||||
|
Loading…
Reference in New Issue
Block a user