From a40fcfc2d5392a5014cd87588035ebce194cb015 Mon Sep 17 00:00:00 2001 From: Chenlei Hu Date: Fri, 28 Mar 2025 02:27:01 -0400 Subject: [PATCH 1/6] Update frontend to 1.14.6 (#7416) Cherry-pick the fix: https://github.com/Comfy-Org/ComfyUI_frontend/pull/3252 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c78d3c22..806fbc75 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.14.5 +comfyui-frontend-package==1.14.6 torch torchsde torchvision From 2d17d8910c7d34383feaf1aaac8d08571fe42077 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Mar 2025 08:40:25 -0400 Subject: [PATCH 2/6] Don't error if wan concat image has extra channels. --- comfy/model_base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_base.py b/comfy/model_base.py index 8f588e2b..f55cbe18 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1013,6 +1013,9 @@ class WAN21(BaseModel): if not self.image_to_video or extra_channels == image.shape[1]: return image + if image.shape[1] > (extra_channels - 4): + image = image[:, :(extra_channels - 4)] + mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) if mask is None: mask = torch.zeros_like(noise)[:, :4] From 832fc02330c1843b9817b8ee90b061d2298a5911 Mon Sep 17 00:00:00 2001 From: Michael Kupchick Date: Sun, 30 Mar 2025 03:03:02 +0300 Subject: [PATCH 3/6] ltxv: fix preprocessing exception when compression is 0. (#7431) --- comfy_extras/nodes_lt.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index fdc6c7c1..52588920 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -446,10 +446,9 @@ class LTXVPreprocess: CATEGORY = "image" def preprocess(self, image, img_compression): - if img_compression > 0: - output_images = [] - for i in range(image.shape[0]): - output_images.append(preprocess(image[i], img_compression)) + output_images = [] + for i in range(image.shape[0]): + output_images.append(preprocess(image[i], img_compression)) return (torch.stack(output_images),) From a3100c8452862e914996648e0fbc56098ab26b60 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Mar 2025 20:11:43 -0400 Subject: [PATCH 4/6] Remove useless code. --- comfy/model_base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index f55cbe18..6bc627ae 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1000,7 +1000,6 @@ class WAN21(BaseModel): device = kwargs["device"] if image is None: - image = torch.zeros_like(noise) shape_image = list(noise.shape) shape_image[1] = extra_channels image = torch.zeros(shape_image, dtype=noise.dtype, layout=noise.layout, device=noise.device) From 0b4584c7413f1c3f6a34875a790c0381b3510447 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 30 Mar 2025 21:47:05 -0400 Subject: [PATCH 5/6] Fix latent composite node not working when source has alpha. --- comfy_extras/nodes_mask.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 63fd13b9..2dd826b2 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -87,6 +87,8 @@ class ImageCompositeMasked: CATEGORY = "image" def composite(self, destination, source, x, y, resize_source, mask = None): + if destination.shape[-1] < source.shape[-1]: + source = source[...,:destination.shape[-1]] destination = destination.clone().movedim(-1, 1) output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1) return (output,) From 548457bac47bb6c0ce233a9f5abb3467582d710d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 31 Mar 2025 20:59:12 -0400 Subject: [PATCH 6/6] Fix alpha channel mismatch on destination in ImageCompositeMasked --- comfy_extras/nodes_mask.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 2dd826b2..e1f0c822 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -89,6 +89,9 @@ class ImageCompositeMasked: def composite(self, destination, source, x, y, resize_source, mask = None): if destination.shape[-1] < source.shape[-1]: source = source[...,:destination.shape[-1]] + elif destination.shape[-1] > source.shape[-1]: + destination = torch.nn.functional.pad(destination, (0, 1)) + destination[..., -1] = source[..., -1] destination = destination.clone().movedim(-1, 1) output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1) return (output,)