From d9d7f3c6199d4d0547dff1410ca8fb3d9215f43e Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Thu, 12 Dec 2024 14:59:16 -0800 Subject: [PATCH] Lint all unused variables (#5989) * Enable F841 * Autofix * Remove all unused variable assignment --- .ci/update_windows/update.py | 12 ++++++------ comfy/cldm/cldm.py | 1 - comfy/controlnet.py | 2 -- comfy/extra_samplers/uni_pc.py | 1 - comfy/hooks.py | 2 +- comfy/k_diffusion/deis.py | 1 - comfy/ldm/audio/dit.py | 8 +------- comfy/ldm/aura/mmdit.py | 1 - comfy/ldm/genmo/joint_model/asymm_models_joint.py | 2 -- comfy/ldm/hydit/controlnet.py | 3 --- comfy/ldm/hydit/models.py | 3 --- comfy/ldm/modules/attention.py | 6 +----- comfy/ldm/modules/diffusionmodules/model.py | 9 +++------ comfy/ldm/util.py | 1 - comfy/model_base.py | 3 --- comfy/model_detection.py | 2 -- comfy/model_management.py | 2 +- comfy/sampler_helpers.py | 1 - comfy/samplers.py | 7 ------- comfy/sd.py | 5 ++--- comfy/sd1_clip.py | 3 +-- comfy/supported_models.py | 2 -- comfy/text_encoders/t5.py | 1 - comfy_extras/nodes_hypertile.py | 2 -- comfy_extras/nodes_model_advanced.py | 1 - fix_torch.py | 4 ++-- ruff.toml | 1 + server.py | 6 ++---- tests/inference/testing_nodes/testing-pack/util.py | 2 +- 29 files changed, 22 insertions(+), 72 deletions(-) diff --git a/.ci/update_windows/update.py b/.ci/update_windows/update.py index 6a04e5e1..59bee980 100755 --- a/.ci/update_windows/update.py +++ b/.ci/update_windows/update.py @@ -33,12 +33,12 @@ def pull(repo, remote_name='origin', branch='master'): user = repo.default_signature tree = repo.index.write_tree() - commit = repo.create_commit('HEAD', - user, - user, - 'Merge!', - tree, - [repo.head.target, remote_master_id]) + repo.create_commit('HEAD', + user, + user, + 'Merge!', + tree, + [repo.head.target, remote_master_id]) # We need to do this or git CLI will think we are still merging. repo.state_cleanup() else: diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 05282a3b..f12cd6ee 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -413,7 +413,6 @@ class ControlNet(nn.Module): out_output = [] out_middle = [] - hs = [] if self.num_classes is not None: assert y.shape[0] == x.shape[0] emb = emb + self.label_emb(y) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index e6a0d1e5..666014ef 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -297,7 +297,6 @@ class ControlLoraOps: class Linear(torch.nn.Module, comfy.ops.CastWeightBiasOp): def __init__(self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None) -> None: - factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() self.in_features = in_features self.out_features = out_features @@ -382,7 +381,6 @@ class ControlLora(ControlNet): self.control_model.to(comfy.model_management.get_torch_device()) diffusion_model = model.diffusion_model sd = diffusion_model.state_dict() - cm = self.control_model.state_dict() for k in sd: weight = sd[k] diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 39365752..18ff9266 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -703,7 +703,6 @@ class UniPC: ): # t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end # t_T = self.noise_schedule.T if t_start is None else t_start - device = x.device steps = len(timesteps) - 1 if method == 'multistep': assert steps >= order diff --git a/comfy/hooks.py b/comfy/hooks.py index ccb8183b..356b7d65 100644 --- a/comfy/hooks.py +++ b/comfy/hooks.py @@ -130,7 +130,7 @@ class WeightHook(Hook): weights = self.weights else: weights = self.weights_clip - k = model.add_hook_patches(hook=self, patches=weights, strength_patch=strength) + model.add_hook_patches(hook=self, patches=weights, strength_patch=strength) registered.append(self) return True # TODO: add logs about any keys that were not applied diff --git a/comfy/k_diffusion/deis.py b/comfy/k_diffusion/deis.py index 60741065..a1167a4a 100644 --- a/comfy/k_diffusion/deis.py +++ b/comfy/k_diffusion/deis.py @@ -11,7 +11,6 @@ import numpy as np # Transfer from the input time (sigma) used in EDM to that (t) used in DEIS. def edm2t(edm_steps, epsilon_s=1e-3, sigma_min=0.002, sigma_max=80): - vp_sigma = lambda beta_d, beta_min: lambda t: (np.e ** (0.5 * beta_d * (t ** 2) + beta_min * t) - 1) ** 0.5 vp_sigma_inv = lambda beta_d, beta_min: lambda sigma: ((beta_min ** 2 + 2 * beta_d * (sigma ** 2 + 1).log()).sqrt() - beta_min) / beta_d vp_beta_d = 2 * (np.log(torch.tensor(sigma_min).cpu() ** 2 + 1) / epsilon_s - np.log(torch.tensor(sigma_max).cpu() ** 2 + 1)) / (epsilon_s - 1) vp_beta_min = np.log(torch.tensor(sigma_max).cpu() ** 2 + 1) - 0.5 * vp_beta_d diff --git a/comfy/ldm/audio/dit.py b/comfy/ldm/audio/dit.py index 5b3f498f..2992d3da 100644 --- a/comfy/ldm/audio/dit.py +++ b/comfy/ldm/audio/dit.py @@ -158,7 +158,6 @@ class RotaryEmbedding(nn.Module): def forward(self, t): # device = self.inv_freq.device device = t.device - dtype = t.dtype # t = t.to(torch.float32) @@ -346,18 +345,13 @@ class Attention(nn.Module): # determine masking masks = [] - final_attn_mask = None # The mask that will be applied to the attention matrix, taking all masks into account if input_mask is not None: input_mask = rearrange(input_mask, 'b j -> b 1 1 j') masks.append(~input_mask) # Other masks will be added here later - - if len(masks) > 0: - final_attn_mask = ~or_reduce(masks) - - n, device = q.shape[-2], q.device + n = q.shape[-2] causal = self.causal if causal is None else causal diff --git a/comfy/ldm/aura/mmdit.py b/comfy/ldm/aura/mmdit.py index 77090372..7792151a 100644 --- a/comfy/ldm/aura/mmdit.py +++ b/comfy/ldm/aura/mmdit.py @@ -147,7 +147,6 @@ class DoubleAttention(nn.Module): bsz, seqlen1, _ = c.shape bsz, seqlen2, _ = x.shape - seqlen = seqlen1 + seqlen2 cq, ck, cv = self.w1q(c), self.w1k(c), self.w1v(c) cq = cq.view(bsz, seqlen1, self.n_heads, self.head_dim) diff --git a/comfy/ldm/genmo/joint_model/asymm_models_joint.py b/comfy/ldm/genmo/joint_model/asymm_models_joint.py index 45c93896..2c46c24b 100644 --- a/comfy/ldm/genmo/joint_model/asymm_models_joint.py +++ b/comfy/ldm/genmo/joint_model/asymm_models_joint.py @@ -461,8 +461,6 @@ class AsymmDiTJoint(nn.Module): pH, pW = H // self.patch_size, W // self.patch_size x = self.embed_x(x) # (B, N, D), where N = T * H * W / patch_size ** 2 assert x.ndim == 3 - B = x.size(0) - pH, pW = H // self.patch_size, W // self.patch_size N = T * pH * pW diff --git a/comfy/ldm/hydit/controlnet.py b/comfy/ldm/hydit/controlnet.py index e1fb4529..31a6bff9 100644 --- a/comfy/ldm/hydit/controlnet.py +++ b/comfy/ldm/hydit/controlnet.py @@ -164,9 +164,6 @@ class HunYuanControlNet(nn.Module): ), ) - # Image embedding - num_patches = self.x_embedder.num_patches - # HUnYuanDiT Blocks self.blocks = nn.ModuleList( [ diff --git a/comfy/ldm/hydit/models.py b/comfy/ldm/hydit/models.py index 4de60795..359f6a96 100644 --- a/comfy/ldm/hydit/models.py +++ b/comfy/ldm/hydit/models.py @@ -248,9 +248,6 @@ class HunYuanDiT(nn.Module): operations.Linear(hidden_size * 4, hidden_size, bias=True, dtype=dtype, device=device), ) - # Image embedding - num_patches = self.x_embedder.num_patches - # HUnYuanDiT Blocks self.blocks = nn.ModuleList([ HunYuanDiTBlock(hidden_size=hidden_size, diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 885b2401..f9a571e0 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -157,8 +157,6 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, b, _, dim_head = query.shape dim_head //= heads - scale = dim_head ** -0.5 - if skip_reshape: query = query.reshape(b * heads, -1, dim_head) value = value.reshape(b * heads, -1, dim_head) @@ -177,9 +175,8 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, bytes_per_token = torch.finfo(query.dtype).bits//8 batch_x_heads, q_tokens, _ = query.shape _, _, k_tokens = key.shape - qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens - mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) + mem_free_total, _ = model_management.get_free_memory(query.device, True) kv_chunk_size_min = None kv_chunk_size = None @@ -230,7 +227,6 @@ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape scale = dim_head ** -0.5 - h = heads if skip_reshape: q, k, v = map( lambda t: t.reshape(b * heads, -1, dim_head), diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index a60ca307..449e4652 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -162,7 +162,6 @@ def slice_attention(q, k, v): mem_free_total = model_management.get_free_memory(q.device) - gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size() modifier = 3 if q.element_size() == 2 else 2.5 mem_required = tensor_size * modifier @@ -218,7 +217,7 @@ def xformers_attention(q, k, v): try: out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) out = out.transpose(1, 2).reshape(B, C, H, W) - except NotImplementedError as e: + except NotImplementedError: out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W) return out @@ -233,7 +232,7 @@ def pytorch_attention(q, k, v): try: out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) out = out.transpose(2, 3).reshape(B, C, H, W) - except model_management.OOM_EXCEPTION as e: + except model_management.OOM_EXCEPTION: logging.warning("scaled_dot_product_attention OOMed: switched to slice attention") out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W) return out @@ -546,7 +545,6 @@ class Decoder(nn.Module): attn_op=AttnBlock, **ignorekwargs): super().__init__() - if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) @@ -556,8 +554,7 @@ class Decoder(nn.Module): self.give_pre_end = give_pre_end self.tanh_out = tanh_out - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) + # compute block_in and curr_res at lowest res block_in = ch*ch_mult[self.num_resolutions-1] curr_res = resolution // 2**(self.num_resolutions-1) self.z_shape = (1,z_channels,curr_res,curr_res) diff --git a/comfy/ldm/util.py b/comfy/ldm/util.py index 8c09ca1c..fdd8b84a 100644 --- a/comfy/ldm/util.py +++ b/comfy/ldm/util.py @@ -133,7 +133,6 @@ class AdamWwithEMAandWings(optim.Optimizer): exp_avgs = [] exp_avg_sqs = [] ema_params_with_grad = [] - state_sums = [] max_exp_avg_sqs = [] state_steps = [] amsgrad = group['amsgrad'] diff --git a/comfy/model_base.py b/comfy/model_base.py index f90ceebb..c64ab646 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -427,7 +427,6 @@ class SVD_img2vid(BaseModel): latent_image = kwargs.get("concat_latent_image", None) noise = kwargs.get("noise", None) - device = kwargs["device"] if latent_image is None: latent_image = torch.zeros_like(noise) @@ -711,8 +710,6 @@ class HunyuanDiT(BaseModel): width = kwargs.get("width", 768) height = kwargs.get("height", 768) - crop_w = kwargs.get("crop_w", 0) - crop_h = kwargs.get("crop_h", 0) target_width = kwargs.get("target_width", width) target_height = kwargs.get("target_height", height) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index c5c5dbb2..a742a4b2 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -216,7 +216,6 @@ def detect_unet_config(state_dict, key_prefix): num_res_blocks = [] channel_mult = [] - attention_resolutions = [] transformer_depth = [] transformer_depth_output = [] context_dim = None @@ -388,7 +387,6 @@ def convert_config(unet_config): t_out += [d] * (res + 1) s *= 2 transformer_depth = t_in - transformer_depth_output = t_out new_config["transformer_depth"] = t_in new_config["transformer_depth_output"] = t_out new_config["transformer_depth_middle"] = transformer_depth_middle diff --git a/comfy/model_management.py b/comfy/model_management.py index cc2ae82a..177c7998 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -512,7 +512,7 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu if vram_set_state == VRAMState.NO_VRAM: lowvram_model_memory = 64 * 1024 * 1024 - cur_loaded_model = loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights) + loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights) current_loaded_models.insert(0, loaded_model) return diff --git a/comfy/sampler_helpers.py b/comfy/sampler_helpers.py index 1924a8c5..0691de63 100644 --- a/comfy/sampler_helpers.py +++ b/comfy/sampler_helpers.py @@ -103,7 +103,6 @@ def cleanup_additional_models(models): def prepare_sampling(model: 'ModelPatcher', noise_shape, conds): - device = model.load_device real_model: 'BaseModel' = None models, inference_memory = get_additional_models(conds, model.model_dtype()) models += model.get_nested_additional_models() # TODO: does this require inference_memory update? diff --git a/comfy/samplers.py b/comfy/samplers.py index 18ba1374..27686722 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -130,11 +130,6 @@ def can_concat_cond(c1, c2): return cond_equal_size(c1.conditioning, c2.conditioning) def cond_cat(c_list): - c_crossattn = [] - c_concat = [] - c_adm = [] - crossattn_max_len = 0 - temp = {} for x in c_list: for k in x: @@ -608,8 +603,6 @@ def pre_run_control(model, conds): for t in range(len(conds)): x = conds[t] - timestep_start = None - timestep_end = None percent_to_timestep_function = lambda a: s.percent_to_sigma(a) if 'control' in x: x['control'].pre_run(model, percent_to_timestep_function) diff --git a/comfy/sd.py b/comfy/sd.py index ebae7f99..444ec248 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -435,7 +435,7 @@ class VAE: if pixel_samples is None: pixel_samples = torch.empty((samples_in.shape[0],) + tuple(out.shape[1:]), device=self.output_device) pixel_samples[x:x+batch_number] = out - except model_management.OOM_EXCEPTION as e: + except model_management.OOM_EXCEPTION: logging.warning("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") dims = samples_in.ndim - 2 if dims == 1: @@ -490,7 +490,7 @@ class VAE: samples = torch.empty((pixel_samples.shape[0],) + tuple(out.shape[1:]), device=self.output_device) samples[x:x + batch_number] = out - except model_management.OOM_EXCEPTION as e: + except model_management.OOM_EXCEPTION: logging.warning("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") if len(pixel_samples.shape) == 3: samples = self.encode_tiled_1d(pixel_samples) @@ -691,7 +691,6 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl config = yaml.safe_load(stream) model_config_params = config['model']['params'] clip_config = model_config_params['cond_stage_config'] - scale_factor = model_config_params['scale_factor'] if "parameterization" in model_config_params: if model_config_params["parameterization"] == "v": diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 8ee69eaf..fac26b07 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -336,7 +336,6 @@ def expand_directory_list(directories): return list(dirs) def bundled_embed(embed, prefix, suffix): #bundled embedding in lora format - i = 0 out_list = [] for k in embed: if k.startswith(prefix) and k.endswith(suffix): @@ -392,7 +391,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No embed_out = safe_load_embed_zip(embed_path) else: embed = torch.load(embed_path, map_location="cpu") - except Exception as e: + except Exception: logging.warning("{}\n\nerror loading embedding, skipping loading: {}".format(traceback.format_exc(), embedding_name)) return None diff --git a/comfy/supported_models.py b/comfy/supported_models.py index b426d308..4ae7c08f 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -224,7 +224,6 @@ class SDXL(supported_models_base.BASE): def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {} - keys_to_replace = {} state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g") for k in state_dict: if k.startswith("clip_l"): @@ -527,7 +526,6 @@ class SD3(supported_models_base.BASE): clip_l = False clip_g = False t5 = False - dtype_t5 = None pref = self.text_encoder_key_prefix[0] if "{}clip_l.transformer.text_model.final_layer_norm.weight".format(pref) in state_dict: clip_l = True diff --git a/comfy/text_encoders/t5.py b/comfy/text_encoders/t5.py index f88ed9cf..38d8d523 100644 --- a/comfy/text_encoders/t5.py +++ b/comfy/text_encoders/t5.py @@ -172,7 +172,6 @@ class T5LayerSelfAttention(torch.nn.Module): # self.dropout = nn.Dropout(config.dropout_rate) def forward(self, x, mask=None, past_bias=None, optimized_attention=None): - normed_hidden_states = self.layer_norm(x) output, past_bias = self.SelfAttention(self.layer_norm(x), mask=mask, past_bias=past_bias, optimized_attention=optimized_attention) # x = x + self.dropout(attention_output) x += output diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py index 227133f3..b366117c 100644 --- a/comfy_extras/nodes_hypertile.py +++ b/comfy_extras/nodes_hypertile.py @@ -35,8 +35,6 @@ class HyperTile: CATEGORY = "model_patches/unet" def patch(self, model, tile_size, swap_size, max_depth, scale_depth): - model_channels = model.model.model_config.unet_config["model_channels"] - latent_tile_size = max(32, tile_size) // 8 self.temp = None diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index e57d1d56..285dbf53 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -240,7 +240,6 @@ class ModelSamplingContinuousV: def patch(self, model, sampling, sigma_max, sigma_min): m = model.clone() - latent_format = None sigma_data = 1.0 if sampling == "v_prediction": sampling_type = comfy.model_sampling.V_PREDICTION diff --git a/fix_torch.py b/fix_torch.py index 4aecb23f..ce117b63 100644 --- a/fix_torch.py +++ b/fix_torch.py @@ -22,7 +22,7 @@ def fix_pytorch_libomp(): if b"libomp140.x86_64.dll" not in contents: break try: - mydll = ctypes.cdll.LoadLibrary(test_file) - except FileNotFoundError as e: + ctypes.cdll.LoadLibrary(test_file) + except FileNotFoundError: logging.warning("Detected pytorch version with libomp issue, patching.") shutil.copyfile(os.path.join(lib_folder, "libiomp5md.dll"), dest) diff --git a/ruff.toml b/ruff.toml index d8334c06..39de4d40 100644 --- a/ruff.toml +++ b/ruff.toml @@ -5,4 +5,5 @@ lint.ignore = ["ALL"] lint.select = [ "S307", # suspicious-eval-usage "F401", # unused-import + "F841", # unused-local-variable ] \ No newline at end of file diff --git a/server.py b/server.py index 86800984..728360bc 100644 --- a/server.py +++ b/server.py @@ -563,7 +563,7 @@ class PromptServer(): for x in nodes.NODE_CLASS_MAPPINGS: try: out[x] = node_info(x) - except Exception as e: + except Exception: logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.") logging.error(traceback.format_exc()) return web.json_response(out) @@ -599,8 +599,6 @@ class PromptServer(): @routes.post("/prompt") async def post_prompt(request): logging.info("got prompt") - resp_code = 200 - out_string = "" json_data = await request.json() json_data = self.trigger_on_prompt(json_data) @@ -832,7 +830,7 @@ class PromptServer(): for handler in self.on_prompt_handlers: try: json_data = handler(json_data) - except Exception as e: + except Exception: logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing") logging.warning(traceback.format_exc()) diff --git a/tests/inference/testing_nodes/testing-pack/util.py b/tests/inference/testing_nodes/testing-pack/util.py index ca116c16..9c0e04dc 100644 --- a/tests/inference/testing_nodes/testing-pack/util.py +++ b/tests/inference/testing_nodes/testing-pack/util.py @@ -259,7 +259,7 @@ class TestForLoopOpen: graph = GraphBuilder() if "initial_value0" in kwargs: remaining = kwargs["initial_value0"] - while_open = graph.node("TestWhileLoopOpen", condition=remaining, initial_value0=remaining, **{(f"initial_value{i}"): kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)}) + graph.node("TestWhileLoopOpen", condition=remaining, initial_value0=remaining, **{(f"initial_value{i}"): kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)}) outputs = [kwargs.get(f"initial_value{i}", None) for i in range(1, NUM_FLOW_SOCKETS)] return { "result": tuple(["stub", remaining] + outputs),