mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-16 16:43:36 +00:00

* Allow disabling pe in flux code for some other models. * Initial Hunyuan3Dv2 implementation. Supports the multiview, mini, turbo models and VAEs. * Fix orientation of hunyuan 3d model. * A few fixes for the hunyuan3d models. * Update frontend to 1.13 (#7331) * Add backend primitive nodes (#7328) * Add backend primitive nodes * Add control after generate to int primitive * Nodes to convert images to YUV and back. Can be used to convert an image to black and white. * Update frontend to 1.14 (#7343) * Native LotusD Implementation (#7125) * draft pass at a native comfy implementation of Lotus-D depth and normal est * fix model_sampling kludges * fix ruff --------- Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> * Automatically set the right sampling type for lotus. * support output normal and lineart once (#7290) * [nit] Format error strings (#7345) * ComfyUI version v0.3.27 * Fallback to pytorch attention if sage attention fails. * Add model merging node for WAN 2.1 * Add Hunyuan3D to readme. * Support more float8 types. * Add CFGZeroStar node. Works on all models that use a negative prompt but is meant for rectified flow models. * Support the WAN 2.1 fun control models. Use the new WanFunControlToVideo node. * Add WanFunInpaintToVideo node for the Wan fun inpaint models. * Update frontend to 1.14.6 (#7416) Cherry-pick the fix: https://github.com/Comfy-Org/ComfyUI_frontend/pull/3252 * Don't error if wan concat image has extra channels. * ltxv: fix preprocessing exception when compression is 0. (#7431) * Remove useless code. * Fix latent composite node not working when source has alpha. * Fix alpha channel mismatch on destination in ImageCompositeMasked * Add option to store TE in bf16 (#7461) * User missing (#7439) * Ensuring a 401 error is returned when user data is not found in multi-user context. * Returning a 401 error when provided comfy-user does not exists on server side. * Fix comment. This function does not support quads. * MLU memory optimization (#7470) Co-authored-by: huzhan <huzhan@cambricon.com> * Fix alpha image issue in more nodes. * Fix problem. * Disable partial offloading of audio VAE. * Add activations_shape info in UNet models (#7482) * Add activations_shape info in UNet models * activations_shape should be a list * Support 512 siglip model. * Show a proper error to the user when a vision model file is invalid. * Support the wan fun reward loras. --------- Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Michael Kupchick <michael@lightricks.com> Co-authored-by: BVH <82035780+bvhari@users.noreply.github.com> Co-authored-by: Laurent Erignoux <lerignoux@gmail.com> Co-authored-by: BiologicalExplosion <49753622+BiologicalExplosion@users.noreply.github.com> Co-authored-by: huzhan <huzhan@cambricon.com> Co-authored-by: Raphael Walker <slickytail.mc@gmail.com>
88 lines
3.0 KiB
Python
88 lines
3.0 KiB
Python
import torch
|
|
import comfy.model_management
|
|
|
|
from kornia.morphology import dilation, erosion, opening, closing, gradient, top_hat, bottom_hat
|
|
import kornia.color
|
|
|
|
|
|
class Morphology:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {"image": ("IMAGE",),
|
|
"operation": (["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"],),
|
|
"kernel_size": ("INT", {"default": 3, "min": 3, "max": 999, "step": 1}),
|
|
}}
|
|
|
|
RETURN_TYPES = ("IMAGE",)
|
|
FUNCTION = "process"
|
|
|
|
CATEGORY = "image/postprocessing"
|
|
|
|
def process(self, image, operation, kernel_size):
|
|
device = comfy.model_management.get_torch_device()
|
|
kernel = torch.ones(kernel_size, kernel_size, device=device)
|
|
image_k = image.to(device).movedim(-1, 1)
|
|
if operation == "erode":
|
|
output = erosion(image_k, kernel)
|
|
elif operation == "dilate":
|
|
output = dilation(image_k, kernel)
|
|
elif operation == "open":
|
|
output = opening(image_k, kernel)
|
|
elif operation == "close":
|
|
output = closing(image_k, kernel)
|
|
elif operation == "gradient":
|
|
output = gradient(image_k, kernel)
|
|
elif operation == "top_hat":
|
|
output = top_hat(image_k, kernel)
|
|
elif operation == "bottom_hat":
|
|
output = bottom_hat(image_k, kernel)
|
|
else:
|
|
raise ValueError(f"Invalid operation {operation} for morphology. Must be one of 'erode', 'dilate', 'open', 'close', 'gradient', 'tophat', 'bottomhat'")
|
|
img_out = output.to(comfy.model_management.intermediate_device()).movedim(1, -1)
|
|
return (img_out,)
|
|
|
|
|
|
class ImageRGBToYUV:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": { "image": ("IMAGE",),
|
|
}}
|
|
|
|
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE")
|
|
RETURN_NAMES = ("Y", "U", "V")
|
|
FUNCTION = "execute"
|
|
|
|
CATEGORY = "image/batch"
|
|
|
|
def execute(self, image):
|
|
out = kornia.color.rgb_to_ycbcr(image.movedim(-1, 1)).movedim(1, -1)
|
|
return (out[..., 0:1].expand_as(image), out[..., 1:2].expand_as(image), out[..., 2:3].expand_as(image))
|
|
|
|
class ImageYUVToRGB:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {"Y": ("IMAGE",),
|
|
"U": ("IMAGE",),
|
|
"V": ("IMAGE",),
|
|
}}
|
|
|
|
RETURN_TYPES = ("IMAGE",)
|
|
FUNCTION = "execute"
|
|
|
|
CATEGORY = "image/batch"
|
|
|
|
def execute(self, Y, U, V):
|
|
image = torch.cat([torch.mean(Y, dim=-1, keepdim=True), torch.mean(U, dim=-1, keepdim=True), torch.mean(V, dim=-1, keepdim=True)], dim=-1)
|
|
out = kornia.color.ycbcr_to_rgb(image.movedim(-1, 1)).movedim(1, -1)
|
|
return (out,)
|
|
|
|
NODE_CLASS_MAPPINGS = {
|
|
"Morphology": Morphology,
|
|
"ImageRGBToYUV": ImageRGBToYUV,
|
|
"ImageYUVToRGB": ImageYUVToRGB,
|
|
}
|
|
|
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
"Morphology": "ImageMorphology",
|
|
}
|