From 60749f345d6159b6d4d40f73e7ac2ac16e177833 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Thu, 12 Dec 2024 15:49:40 -0800 Subject: [PATCH] Lint and fix undefined names (3/N) (#6030) --- comfy/ldm/audio/autoencoder.py | 2 +- comfy/ldm/audio/dit.py | 2 +- comfy/ldm/models/autoencoder.py | 2 +- comfy/ldm/modules/sub_quadratic_attention.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/audio/autoencoder.py b/comfy/ldm/audio/autoencoder.py index 21044d17..9e7e7c87 100644 --- a/comfy/ldm/audio/autoencoder.py +++ b/comfy/ldm/audio/autoencoder.py @@ -97,7 +97,7 @@ def get_activation(activation: Literal["elu", "snake", "none"], antialias=False, raise ValueError(f"Unknown activation {activation}") if antialias: - act = Activation1d(act) + act = Activation1d(act) # noqa: F821 Activation1d is not defined return act diff --git a/comfy/ldm/audio/dit.py b/comfy/ldm/audio/dit.py index 2992d3da..3e2b4ebc 100644 --- a/comfy/ldm/audio/dit.py +++ b/comfy/ldm/audio/dit.py @@ -169,7 +169,7 @@ class RotaryEmbedding(nn.Module): if self.scale is None: return freqs, 1. - power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base + power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base # noqa: F821 seq_len is not defined scale = comfy.ops.cast_to_input(self.scale, t) ** rearrange(power, 'n -> n 1') scale = torch.cat((scale, scale), dim = -1) diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index 3eeff24e..3b5bc0e6 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Tuple, Union from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution -from comfy.ldm.util import instantiate_from_config +from comfy.ldm.util import get_obj_from_str, instantiate_from_config from comfy.ldm.modules.ema import LitEma import comfy.ops diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index 47b8b151..6d2de0fe 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -172,7 +172,7 @@ def _get_attention_scores_no_kv_chunking( del attn_scores except model_management.OOM_EXCEPTION: logging.warning("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead") - attn_scores -= attn_scores.max(dim=-1, keepdim=True).values + attn_scores -= attn_scores.max(dim=-1, keepdim=True).values # noqa: F821 attn_scores is not defined torch.exp(attn_scores, out=attn_scores) summed = torch.sum(attn_scores, dim=-1, keepdim=True) attn_scores /= summed