mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-10 18:05:16 +00:00
Lint all unused variables (#5989)
* Enable F841 * Autofix * Remove all unused variable assignment
This commit is contained in:
parent
fd5dfb812c
commit
d9d7f3c619
@ -33,12 +33,12 @@ def pull(repo, remote_name='origin', branch='master'):
|
|||||||
|
|
||||||
user = repo.default_signature
|
user = repo.default_signature
|
||||||
tree = repo.index.write_tree()
|
tree = repo.index.write_tree()
|
||||||
commit = repo.create_commit('HEAD',
|
repo.create_commit('HEAD',
|
||||||
user,
|
user,
|
||||||
user,
|
user,
|
||||||
'Merge!',
|
'Merge!',
|
||||||
tree,
|
tree,
|
||||||
[repo.head.target, remote_master_id])
|
[repo.head.target, remote_master_id])
|
||||||
# We need to do this or git CLI will think we are still merging.
|
# We need to do this or git CLI will think we are still merging.
|
||||||
repo.state_cleanup()
|
repo.state_cleanup()
|
||||||
else:
|
else:
|
||||||
|
@ -413,7 +413,6 @@ class ControlNet(nn.Module):
|
|||||||
out_output = []
|
out_output = []
|
||||||
out_middle = []
|
out_middle = []
|
||||||
|
|
||||||
hs = []
|
|
||||||
if self.num_classes is not None:
|
if self.num_classes is not None:
|
||||||
assert y.shape[0] == x.shape[0]
|
assert y.shape[0] == x.shape[0]
|
||||||
emb = emb + self.label_emb(y)
|
emb = emb + self.label_emb(y)
|
||||||
|
@ -297,7 +297,6 @@ class ControlLoraOps:
|
|||||||
class Linear(torch.nn.Module, comfy.ops.CastWeightBiasOp):
|
class Linear(torch.nn.Module, comfy.ops.CastWeightBiasOp):
|
||||||
def __init__(self, in_features: int, out_features: int, bias: bool = True,
|
def __init__(self, in_features: int, out_features: int, bias: bool = True,
|
||||||
device=None, dtype=None) -> None:
|
device=None, dtype=None) -> None:
|
||||||
factory_kwargs = {'device': device, 'dtype': dtype}
|
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.in_features = in_features
|
self.in_features = in_features
|
||||||
self.out_features = out_features
|
self.out_features = out_features
|
||||||
@ -382,7 +381,6 @@ class ControlLora(ControlNet):
|
|||||||
self.control_model.to(comfy.model_management.get_torch_device())
|
self.control_model.to(comfy.model_management.get_torch_device())
|
||||||
diffusion_model = model.diffusion_model
|
diffusion_model = model.diffusion_model
|
||||||
sd = diffusion_model.state_dict()
|
sd = diffusion_model.state_dict()
|
||||||
cm = self.control_model.state_dict()
|
|
||||||
|
|
||||||
for k in sd:
|
for k in sd:
|
||||||
weight = sd[k]
|
weight = sd[k]
|
||||||
|
@ -703,7 +703,6 @@ class UniPC:
|
|||||||
):
|
):
|
||||||
# t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
# t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
||||||
# t_T = self.noise_schedule.T if t_start is None else t_start
|
# t_T = self.noise_schedule.T if t_start is None else t_start
|
||||||
device = x.device
|
|
||||||
steps = len(timesteps) - 1
|
steps = len(timesteps) - 1
|
||||||
if method == 'multistep':
|
if method == 'multistep':
|
||||||
assert steps >= order
|
assert steps >= order
|
||||||
|
@ -130,7 +130,7 @@ class WeightHook(Hook):
|
|||||||
weights = self.weights
|
weights = self.weights
|
||||||
else:
|
else:
|
||||||
weights = self.weights_clip
|
weights = self.weights_clip
|
||||||
k = model.add_hook_patches(hook=self, patches=weights, strength_patch=strength)
|
model.add_hook_patches(hook=self, patches=weights, strength_patch=strength)
|
||||||
registered.append(self)
|
registered.append(self)
|
||||||
return True
|
return True
|
||||||
# TODO: add logs about any keys that were not applied
|
# TODO: add logs about any keys that were not applied
|
||||||
|
@ -11,7 +11,6 @@ import numpy as np
|
|||||||
# Transfer from the input time (sigma) used in EDM to that (t) used in DEIS.
|
# Transfer from the input time (sigma) used in EDM to that (t) used in DEIS.
|
||||||
|
|
||||||
def edm2t(edm_steps, epsilon_s=1e-3, sigma_min=0.002, sigma_max=80):
|
def edm2t(edm_steps, epsilon_s=1e-3, sigma_min=0.002, sigma_max=80):
|
||||||
vp_sigma = lambda beta_d, beta_min: lambda t: (np.e ** (0.5 * beta_d * (t ** 2) + beta_min * t) - 1) ** 0.5
|
|
||||||
vp_sigma_inv = lambda beta_d, beta_min: lambda sigma: ((beta_min ** 2 + 2 * beta_d * (sigma ** 2 + 1).log()).sqrt() - beta_min) / beta_d
|
vp_sigma_inv = lambda beta_d, beta_min: lambda sigma: ((beta_min ** 2 + 2 * beta_d * (sigma ** 2 + 1).log()).sqrt() - beta_min) / beta_d
|
||||||
vp_beta_d = 2 * (np.log(torch.tensor(sigma_min).cpu() ** 2 + 1) / epsilon_s - np.log(torch.tensor(sigma_max).cpu() ** 2 + 1)) / (epsilon_s - 1)
|
vp_beta_d = 2 * (np.log(torch.tensor(sigma_min).cpu() ** 2 + 1) / epsilon_s - np.log(torch.tensor(sigma_max).cpu() ** 2 + 1)) / (epsilon_s - 1)
|
||||||
vp_beta_min = np.log(torch.tensor(sigma_max).cpu() ** 2 + 1) - 0.5 * vp_beta_d
|
vp_beta_min = np.log(torch.tensor(sigma_max).cpu() ** 2 + 1) - 0.5 * vp_beta_d
|
||||||
|
@ -158,7 +158,6 @@ class RotaryEmbedding(nn.Module):
|
|||||||
def forward(self, t):
|
def forward(self, t):
|
||||||
# device = self.inv_freq.device
|
# device = self.inv_freq.device
|
||||||
device = t.device
|
device = t.device
|
||||||
dtype = t.dtype
|
|
||||||
|
|
||||||
# t = t.to(torch.float32)
|
# t = t.to(torch.float32)
|
||||||
|
|
||||||
@ -346,18 +345,13 @@ class Attention(nn.Module):
|
|||||||
|
|
||||||
# determine masking
|
# determine masking
|
||||||
masks = []
|
masks = []
|
||||||
final_attn_mask = None # The mask that will be applied to the attention matrix, taking all masks into account
|
|
||||||
|
|
||||||
if input_mask is not None:
|
if input_mask is not None:
|
||||||
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
|
input_mask = rearrange(input_mask, 'b j -> b 1 1 j')
|
||||||
masks.append(~input_mask)
|
masks.append(~input_mask)
|
||||||
|
|
||||||
# Other masks will be added here later
|
# Other masks will be added here later
|
||||||
|
n = q.shape[-2]
|
||||||
if len(masks) > 0:
|
|
||||||
final_attn_mask = ~or_reduce(masks)
|
|
||||||
|
|
||||||
n, device = q.shape[-2], q.device
|
|
||||||
|
|
||||||
causal = self.causal if causal is None else causal
|
causal = self.causal if causal is None else causal
|
||||||
|
|
||||||
|
@ -147,7 +147,6 @@ class DoubleAttention(nn.Module):
|
|||||||
|
|
||||||
bsz, seqlen1, _ = c.shape
|
bsz, seqlen1, _ = c.shape
|
||||||
bsz, seqlen2, _ = x.shape
|
bsz, seqlen2, _ = x.shape
|
||||||
seqlen = seqlen1 + seqlen2
|
|
||||||
|
|
||||||
cq, ck, cv = self.w1q(c), self.w1k(c), self.w1v(c)
|
cq, ck, cv = self.w1q(c), self.w1k(c), self.w1v(c)
|
||||||
cq = cq.view(bsz, seqlen1, self.n_heads, self.head_dim)
|
cq = cq.view(bsz, seqlen1, self.n_heads, self.head_dim)
|
||||||
|
@ -461,8 +461,6 @@ class AsymmDiTJoint(nn.Module):
|
|||||||
pH, pW = H // self.patch_size, W // self.patch_size
|
pH, pW = H // self.patch_size, W // self.patch_size
|
||||||
x = self.embed_x(x) # (B, N, D), where N = T * H * W / patch_size ** 2
|
x = self.embed_x(x) # (B, N, D), where N = T * H * W / patch_size ** 2
|
||||||
assert x.ndim == 3
|
assert x.ndim == 3
|
||||||
B = x.size(0)
|
|
||||||
|
|
||||||
|
|
||||||
pH, pW = H // self.patch_size, W // self.patch_size
|
pH, pW = H // self.patch_size, W // self.patch_size
|
||||||
N = T * pH * pW
|
N = T * pH * pW
|
||||||
|
@ -164,9 +164,6 @@ class HunYuanControlNet(nn.Module):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Image embedding
|
|
||||||
num_patches = self.x_embedder.num_patches
|
|
||||||
|
|
||||||
# HUnYuanDiT Blocks
|
# HUnYuanDiT Blocks
|
||||||
self.blocks = nn.ModuleList(
|
self.blocks = nn.ModuleList(
|
||||||
[
|
[
|
||||||
|
@ -248,9 +248,6 @@ class HunYuanDiT(nn.Module):
|
|||||||
operations.Linear(hidden_size * 4, hidden_size, bias=True, dtype=dtype, device=device),
|
operations.Linear(hidden_size * 4, hidden_size, bias=True, dtype=dtype, device=device),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Image embedding
|
|
||||||
num_patches = self.x_embedder.num_patches
|
|
||||||
|
|
||||||
# HUnYuanDiT Blocks
|
# HUnYuanDiT Blocks
|
||||||
self.blocks = nn.ModuleList([
|
self.blocks = nn.ModuleList([
|
||||||
HunYuanDiTBlock(hidden_size=hidden_size,
|
HunYuanDiTBlock(hidden_size=hidden_size,
|
||||||
|
@ -157,8 +157,6 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None,
|
|||||||
b, _, dim_head = query.shape
|
b, _, dim_head = query.shape
|
||||||
dim_head //= heads
|
dim_head //= heads
|
||||||
|
|
||||||
scale = dim_head ** -0.5
|
|
||||||
|
|
||||||
if skip_reshape:
|
if skip_reshape:
|
||||||
query = query.reshape(b * heads, -1, dim_head)
|
query = query.reshape(b * heads, -1, dim_head)
|
||||||
value = value.reshape(b * heads, -1, dim_head)
|
value = value.reshape(b * heads, -1, dim_head)
|
||||||
@ -177,9 +175,8 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None,
|
|||||||
bytes_per_token = torch.finfo(query.dtype).bits//8
|
bytes_per_token = torch.finfo(query.dtype).bits//8
|
||||||
batch_x_heads, q_tokens, _ = query.shape
|
batch_x_heads, q_tokens, _ = query.shape
|
||||||
_, _, k_tokens = key.shape
|
_, _, k_tokens = key.shape
|
||||||
qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
|
|
||||||
|
|
||||||
mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
|
mem_free_total, _ = model_management.get_free_memory(query.device, True)
|
||||||
|
|
||||||
kv_chunk_size_min = None
|
kv_chunk_size_min = None
|
||||||
kv_chunk_size = None
|
kv_chunk_size = None
|
||||||
@ -230,7 +227,6 @@ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
|
|||||||
|
|
||||||
scale = dim_head ** -0.5
|
scale = dim_head ** -0.5
|
||||||
|
|
||||||
h = heads
|
|
||||||
if skip_reshape:
|
if skip_reshape:
|
||||||
q, k, v = map(
|
q, k, v = map(
|
||||||
lambda t: t.reshape(b * heads, -1, dim_head),
|
lambda t: t.reshape(b * heads, -1, dim_head),
|
||||||
|
@ -162,7 +162,6 @@ def slice_attention(q, k, v):
|
|||||||
|
|
||||||
mem_free_total = model_management.get_free_memory(q.device)
|
mem_free_total = model_management.get_free_memory(q.device)
|
||||||
|
|
||||||
gb = 1024 ** 3
|
|
||||||
tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
|
tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
|
||||||
modifier = 3 if q.element_size() == 2 else 2.5
|
modifier = 3 if q.element_size() == 2 else 2.5
|
||||||
mem_required = tensor_size * modifier
|
mem_required = tensor_size * modifier
|
||||||
@ -218,7 +217,7 @@ def xformers_attention(q, k, v):
|
|||||||
try:
|
try:
|
||||||
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
|
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
|
||||||
out = out.transpose(1, 2).reshape(B, C, H, W)
|
out = out.transpose(1, 2).reshape(B, C, H, W)
|
||||||
except NotImplementedError as e:
|
except NotImplementedError:
|
||||||
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
|
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
@ -233,7 +232,7 @@ def pytorch_attention(q, k, v):
|
|||||||
try:
|
try:
|
||||||
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
|
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
|
||||||
out = out.transpose(2, 3).reshape(B, C, H, W)
|
out = out.transpose(2, 3).reshape(B, C, H, W)
|
||||||
except model_management.OOM_EXCEPTION as e:
|
except model_management.OOM_EXCEPTION:
|
||||||
logging.warning("scaled_dot_product_attention OOMed: switched to slice attention")
|
logging.warning("scaled_dot_product_attention OOMed: switched to slice attention")
|
||||||
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
|
out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W)
|
||||||
return out
|
return out
|
||||||
@ -546,7 +545,6 @@ class Decoder(nn.Module):
|
|||||||
attn_op=AttnBlock,
|
attn_op=AttnBlock,
|
||||||
**ignorekwargs):
|
**ignorekwargs):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
if use_linear_attn: attn_type = "linear"
|
|
||||||
self.ch = ch
|
self.ch = ch
|
||||||
self.temb_ch = 0
|
self.temb_ch = 0
|
||||||
self.num_resolutions = len(ch_mult)
|
self.num_resolutions = len(ch_mult)
|
||||||
@ -556,8 +554,7 @@ class Decoder(nn.Module):
|
|||||||
self.give_pre_end = give_pre_end
|
self.give_pre_end = give_pre_end
|
||||||
self.tanh_out = tanh_out
|
self.tanh_out = tanh_out
|
||||||
|
|
||||||
# compute in_ch_mult, block_in and curr_res at lowest res
|
# compute block_in and curr_res at lowest res
|
||||||
in_ch_mult = (1,)+tuple(ch_mult)
|
|
||||||
block_in = ch*ch_mult[self.num_resolutions-1]
|
block_in = ch*ch_mult[self.num_resolutions-1]
|
||||||
curr_res = resolution // 2**(self.num_resolutions-1)
|
curr_res = resolution // 2**(self.num_resolutions-1)
|
||||||
self.z_shape = (1,z_channels,curr_res,curr_res)
|
self.z_shape = (1,z_channels,curr_res,curr_res)
|
||||||
|
@ -133,7 +133,6 @@ class AdamWwithEMAandWings(optim.Optimizer):
|
|||||||
exp_avgs = []
|
exp_avgs = []
|
||||||
exp_avg_sqs = []
|
exp_avg_sqs = []
|
||||||
ema_params_with_grad = []
|
ema_params_with_grad = []
|
||||||
state_sums = []
|
|
||||||
max_exp_avg_sqs = []
|
max_exp_avg_sqs = []
|
||||||
state_steps = []
|
state_steps = []
|
||||||
amsgrad = group['amsgrad']
|
amsgrad = group['amsgrad']
|
||||||
|
@ -427,7 +427,6 @@ class SVD_img2vid(BaseModel):
|
|||||||
|
|
||||||
latent_image = kwargs.get("concat_latent_image", None)
|
latent_image = kwargs.get("concat_latent_image", None)
|
||||||
noise = kwargs.get("noise", None)
|
noise = kwargs.get("noise", None)
|
||||||
device = kwargs["device"]
|
|
||||||
|
|
||||||
if latent_image is None:
|
if latent_image is None:
|
||||||
latent_image = torch.zeros_like(noise)
|
latent_image = torch.zeros_like(noise)
|
||||||
@ -711,8 +710,6 @@ class HunyuanDiT(BaseModel):
|
|||||||
|
|
||||||
width = kwargs.get("width", 768)
|
width = kwargs.get("width", 768)
|
||||||
height = kwargs.get("height", 768)
|
height = kwargs.get("height", 768)
|
||||||
crop_w = kwargs.get("crop_w", 0)
|
|
||||||
crop_h = kwargs.get("crop_h", 0)
|
|
||||||
target_width = kwargs.get("target_width", width)
|
target_width = kwargs.get("target_width", width)
|
||||||
target_height = kwargs.get("target_height", height)
|
target_height = kwargs.get("target_height", height)
|
||||||
|
|
||||||
|
@ -216,7 +216,6 @@ def detect_unet_config(state_dict, key_prefix):
|
|||||||
|
|
||||||
num_res_blocks = []
|
num_res_blocks = []
|
||||||
channel_mult = []
|
channel_mult = []
|
||||||
attention_resolutions = []
|
|
||||||
transformer_depth = []
|
transformer_depth = []
|
||||||
transformer_depth_output = []
|
transformer_depth_output = []
|
||||||
context_dim = None
|
context_dim = None
|
||||||
@ -388,7 +387,6 @@ def convert_config(unet_config):
|
|||||||
t_out += [d] * (res + 1)
|
t_out += [d] * (res + 1)
|
||||||
s *= 2
|
s *= 2
|
||||||
transformer_depth = t_in
|
transformer_depth = t_in
|
||||||
transformer_depth_output = t_out
|
|
||||||
new_config["transformer_depth"] = t_in
|
new_config["transformer_depth"] = t_in
|
||||||
new_config["transformer_depth_output"] = t_out
|
new_config["transformer_depth_output"] = t_out
|
||||||
new_config["transformer_depth_middle"] = transformer_depth_middle
|
new_config["transformer_depth_middle"] = transformer_depth_middle
|
||||||
|
@ -512,7 +512,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
|
|||||||
if vram_set_state == VRAMState.NO_VRAM:
|
if vram_set_state == VRAMState.NO_VRAM:
|
||||||
lowvram_model_memory = 64 * 1024 * 1024
|
lowvram_model_memory = 64 * 1024 * 1024
|
||||||
|
|
||||||
cur_loaded_model = loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
|
loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
|
||||||
current_loaded_models.insert(0, loaded_model)
|
current_loaded_models.insert(0, loaded_model)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -103,7 +103,6 @@ def cleanup_additional_models(models):
|
|||||||
|
|
||||||
|
|
||||||
def prepare_sampling(model: 'ModelPatcher', noise_shape, conds):
|
def prepare_sampling(model: 'ModelPatcher', noise_shape, conds):
|
||||||
device = model.load_device
|
|
||||||
real_model: 'BaseModel' = None
|
real_model: 'BaseModel' = None
|
||||||
models, inference_memory = get_additional_models(conds, model.model_dtype())
|
models, inference_memory = get_additional_models(conds, model.model_dtype())
|
||||||
models += model.get_nested_additional_models() # TODO: does this require inference_memory update?
|
models += model.get_nested_additional_models() # TODO: does this require inference_memory update?
|
||||||
|
@ -130,11 +130,6 @@ def can_concat_cond(c1, c2):
|
|||||||
return cond_equal_size(c1.conditioning, c2.conditioning)
|
return cond_equal_size(c1.conditioning, c2.conditioning)
|
||||||
|
|
||||||
def cond_cat(c_list):
|
def cond_cat(c_list):
|
||||||
c_crossattn = []
|
|
||||||
c_concat = []
|
|
||||||
c_adm = []
|
|
||||||
crossattn_max_len = 0
|
|
||||||
|
|
||||||
temp = {}
|
temp = {}
|
||||||
for x in c_list:
|
for x in c_list:
|
||||||
for k in x:
|
for k in x:
|
||||||
@ -608,8 +603,6 @@ def pre_run_control(model, conds):
|
|||||||
for t in range(len(conds)):
|
for t in range(len(conds)):
|
||||||
x = conds[t]
|
x = conds[t]
|
||||||
|
|
||||||
timestep_start = None
|
|
||||||
timestep_end = None
|
|
||||||
percent_to_timestep_function = lambda a: s.percent_to_sigma(a)
|
percent_to_timestep_function = lambda a: s.percent_to_sigma(a)
|
||||||
if 'control' in x:
|
if 'control' in x:
|
||||||
x['control'].pre_run(model, percent_to_timestep_function)
|
x['control'].pre_run(model, percent_to_timestep_function)
|
||||||
|
@ -435,7 +435,7 @@ class VAE:
|
|||||||
if pixel_samples is None:
|
if pixel_samples is None:
|
||||||
pixel_samples = torch.empty((samples_in.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
|
pixel_samples = torch.empty((samples_in.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
|
||||||
pixel_samples[x:x+batch_number] = out
|
pixel_samples[x:x+batch_number] = out
|
||||||
except model_management.OOM_EXCEPTION as e:
|
except model_management.OOM_EXCEPTION:
|
||||||
logging.warning("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
|
logging.warning("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
|
||||||
dims = samples_in.ndim - 2
|
dims = samples_in.ndim - 2
|
||||||
if dims == 1:
|
if dims == 1:
|
||||||
@ -490,7 +490,7 @@ class VAE:
|
|||||||
samples = torch.empty((pixel_samples.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
|
samples = torch.empty((pixel_samples.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
|
||||||
samples[x:x + batch_number] = out
|
samples[x:x + batch_number] = out
|
||||||
|
|
||||||
except model_management.OOM_EXCEPTION as e:
|
except model_management.OOM_EXCEPTION:
|
||||||
logging.warning("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
|
logging.warning("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.")
|
||||||
if len(pixel_samples.shape) == 3:
|
if len(pixel_samples.shape) == 3:
|
||||||
samples = self.encode_tiled_1d(pixel_samples)
|
samples = self.encode_tiled_1d(pixel_samples)
|
||||||
@ -691,7 +691,6 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl
|
|||||||
config = yaml.safe_load(stream)
|
config = yaml.safe_load(stream)
|
||||||
model_config_params = config['model']['params']
|
model_config_params = config['model']['params']
|
||||||
clip_config = model_config_params['cond_stage_config']
|
clip_config = model_config_params['cond_stage_config']
|
||||||
scale_factor = model_config_params['scale_factor']
|
|
||||||
|
|
||||||
if "parameterization" in model_config_params:
|
if "parameterization" in model_config_params:
|
||||||
if model_config_params["parameterization"] == "v":
|
if model_config_params["parameterization"] == "v":
|
||||||
|
@ -336,7 +336,6 @@ def expand_directory_list(directories):
|
|||||||
return list(dirs)
|
return list(dirs)
|
||||||
|
|
||||||
def bundled_embed(embed, prefix, suffix): #bundled embedding in lora format
|
def bundled_embed(embed, prefix, suffix): #bundled embedding in lora format
|
||||||
i = 0
|
|
||||||
out_list = []
|
out_list = []
|
||||||
for k in embed:
|
for k in embed:
|
||||||
if k.startswith(prefix) and k.endswith(suffix):
|
if k.startswith(prefix) and k.endswith(suffix):
|
||||||
@ -392,7 +391,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
|
|||||||
embed_out = safe_load_embed_zip(embed_path)
|
embed_out = safe_load_embed_zip(embed_path)
|
||||||
else:
|
else:
|
||||||
embed = torch.load(embed_path, map_location="cpu")
|
embed = torch.load(embed_path, map_location="cpu")
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name))
|
logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -224,7 +224,6 @@ class SDXL(supported_models_base.BASE):
|
|||||||
|
|
||||||
def process_clip_state_dict_for_saving(self, state_dict):
|
def process_clip_state_dict_for_saving(self, state_dict):
|
||||||
replace_prefix = {}
|
replace_prefix = {}
|
||||||
keys_to_replace = {}
|
|
||||||
state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g")
|
state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g")
|
||||||
for k in state_dict:
|
for k in state_dict:
|
||||||
if k.startswith("clip_l"):
|
if k.startswith("clip_l"):
|
||||||
@ -527,7 +526,6 @@ class SD3(supported_models_base.BASE):
|
|||||||
clip_l = False
|
clip_l = False
|
||||||
clip_g = False
|
clip_g = False
|
||||||
t5 = False
|
t5 = False
|
||||||
dtype_t5 = None
|
|
||||||
pref = self.text_encoder_key_prefix[0]
|
pref = self.text_encoder_key_prefix[0]
|
||||||
if "{}clip_l.transformer.text_model.final_layer_norm.weight".format(pref) in state_dict:
|
if "{}clip_l.transformer.text_model.final_layer_norm.weight".format(pref) in state_dict:
|
||||||
clip_l = True
|
clip_l = True
|
||||||
|
@ -172,7 +172,6 @@ class T5LayerSelfAttention(torch.nn.Module):
|
|||||||
# self.dropout = nn.Dropout(config.dropout_rate)
|
# self.dropout = nn.Dropout(config.dropout_rate)
|
||||||
|
|
||||||
def forward(self, x, mask=None, past_bias=None, optimized_attention=None):
|
def forward(self, x, mask=None, past_bias=None, optimized_attention=None):
|
||||||
normed_hidden_states = self.layer_norm(x)
|
|
||||||
output, past_bias = self.SelfAttention(self.layer_norm(x), mask=mask, past_bias=past_bias, optimized_attention=optimized_attention)
|
output, past_bias = self.SelfAttention(self.layer_norm(x), mask=mask, past_bias=past_bias, optimized_attention=optimized_attention)
|
||||||
# x = x + self.dropout(attention_output)
|
# x = x + self.dropout(attention_output)
|
||||||
x += output
|
x += output
|
||||||
|
@ -35,8 +35,6 @@ class HyperTile:
|
|||||||
CATEGORY = "model_patches/unet"
|
CATEGORY = "model_patches/unet"
|
||||||
|
|
||||||
def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
|
def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
|
||||||
model_channels = model.model.model_config.unet_config["model_channels"]
|
|
||||||
|
|
||||||
latent_tile_size = max(32, tile_size) // 8
|
latent_tile_size = max(32, tile_size) // 8
|
||||||
self.temp = None
|
self.temp = None
|
||||||
|
|
||||||
|
@ -240,7 +240,6 @@ class ModelSamplingContinuousV:
|
|||||||
def patch(self, model, sampling, sigma_max, sigma_min):
|
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||||
m = model.clone()
|
m = model.clone()
|
||||||
|
|
||||||
latent_format = None
|
|
||||||
sigma_data = 1.0
|
sigma_data = 1.0
|
||||||
if sampling == "v_prediction":
|
if sampling == "v_prediction":
|
||||||
sampling_type = comfy.model_sampling.V_PREDICTION
|
sampling_type = comfy.model_sampling.V_PREDICTION
|
||||||
|
@ -22,7 +22,7 @@ def fix_pytorch_libomp():
|
|||||||
if b"libomp140.x86_64.dll" not in contents:
|
if b"libomp140.x86_64.dll" not in contents:
|
||||||
break
|
break
|
||||||
try:
|
try:
|
||||||
mydll = ctypes.cdll.LoadLibrary(test_file)
|
ctypes.cdll.LoadLibrary(test_file)
|
||||||
except FileNotFoundError as e:
|
except FileNotFoundError:
|
||||||
logging.warning("Detected pytorch version with libomp issue, patching.")
|
logging.warning("Detected pytorch version with libomp issue, patching.")
|
||||||
shutil.copyfile(os.path.join(lib_folder, "libiomp5md.dll"), dest)
|
shutil.copyfile(os.path.join(lib_folder, "libiomp5md.dll"), dest)
|
||||||
|
@ -5,4 +5,5 @@ lint.ignore = ["ALL"]
|
|||||||
lint.select = [
|
lint.select = [
|
||||||
"S307", # suspicious-eval-usage
|
"S307", # suspicious-eval-usage
|
||||||
"F401", # unused-import
|
"F401", # unused-import
|
||||||
|
"F841", # unused-local-variable
|
||||||
]
|
]
|
@ -563,7 +563,7 @@ class PromptServer():
|
|||||||
for x in nodes.NODE_CLASS_MAPPINGS:
|
for x in nodes.NODE_CLASS_MAPPINGS:
|
||||||
try:
|
try:
|
||||||
out[x] = node_info(x)
|
out[x] = node_info(x)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.")
|
logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.")
|
||||||
logging.error(traceback.format_exc())
|
logging.error(traceback.format_exc())
|
||||||
return web.json_response(out)
|
return web.json_response(out)
|
||||||
@ -599,8 +599,6 @@ class PromptServer():
|
|||||||
@routes.post("/prompt")
|
@routes.post("/prompt")
|
||||||
async def post_prompt(request):
|
async def post_prompt(request):
|
||||||
logging.info("got prompt")
|
logging.info("got prompt")
|
||||||
resp_code = 200
|
|
||||||
out_string = ""
|
|
||||||
json_data = await request.json()
|
json_data = await request.json()
|
||||||
json_data = self.trigger_on_prompt(json_data)
|
json_data = self.trigger_on_prompt(json_data)
|
||||||
|
|
||||||
@ -832,7 +830,7 @@ class PromptServer():
|
|||||||
for handler in self.on_prompt_handlers:
|
for handler in self.on_prompt_handlers:
|
||||||
try:
|
try:
|
||||||
json_data = handler(json_data)
|
json_data = handler(json_data)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing")
|
logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing")
|
||||||
logging.warning(traceback.format_exc())
|
logging.warning(traceback.format_exc())
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ class TestForLoopOpen:
|
|||||||
graph = GraphBuilder()
|
graph = GraphBuilder()
|
||||||
if "initial_value0" in kwargs:
|
if "initial_value0" in kwargs:
|
||||||
remaining = kwargs["initial_value0"]
|
remaining = kwargs["initial_value0"]
|
||||||
while_open = graph.node("TestWhileLoopOpen", condition=remaining, initial_value0=remaining, **{(f"initial_value{i}"): kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)})
|
graph.node("TestWhileLoopOpen", condition=remaining, initial_value0=remaining, **{(f"initial_value{i}"): kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)})
|
||||||
outputs = [kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)]
|
outputs = [kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)]
|
||||||
return {
|
return {
|
||||||
"result": tuple(["stub", remaining] + outputs),
|
"result": tuple(["stub", remaining] + outputs),
|
||||||
|
Loading…
Reference in New Issue
Block a user