mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-18 18:33:30 +00:00

* Allow disabling pe in flux code for some other models. * Initial Hunyuan3Dv2 implementation. Supports the multiview, mini, turbo models and VAEs. * Fix orientation of hunyuan 3d model. * A few fixes for the hunyuan3d models. * Update frontend to 1.13 (#7331) * Add backend primitive nodes (#7328) * Add backend primitive nodes * Add control after generate to int primitive * Nodes to convert images to YUV and back. Can be used to convert an image to black and white. * Update frontend to 1.14 (#7343) * Native LotusD Implementation (#7125) * draft pass at a native comfy implementation of Lotus-D depth and normal est * fix model_sampling kludges * fix ruff --------- Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> * Automatically set the right sampling type for lotus. * support output normal and lineart once (#7290) * [nit] Format error strings (#7345) * ComfyUI version v0.3.27 * Fallback to pytorch attention if sage attention fails. * Add model merging node for WAN 2.1 * Add Hunyuan3D to readme. * Support more float8 types. * Add CFGZeroStar node. Works on all models that use a negative prompt but is meant for rectified flow models. * Support the WAN 2.1 fun control models. Use the new WanFunControlToVideo node. * Add WanFunInpaintToVideo node for the Wan fun inpaint models. * Update frontend to 1.14.6 (#7416) Cherry-pick the fix: https://github.com/Comfy-Org/ComfyUI_frontend/pull/3252 * Don't error if wan concat image has extra channels. * ltxv: fix preprocessing exception when compression is 0. (#7431) * Remove useless code. * Fix latent composite node not working when source has alpha. * Fix alpha channel mismatch on destination in ImageCompositeMasked * Add option to store TE in bf16 (#7461) * User missing (#7439) * Ensuring a 401 error is returned when user data is not found in multi-user context. * Returning a 401 error when provided comfy-user does not exists on server side. * Fix comment. This function does not support quads. * MLU memory optimization (#7470) Co-authored-by: huzhan <huzhan@cambricon.com> * Fix alpha image issue in more nodes. * Fix problem. * Disable partial offloading of audio VAE. * Add activations_shape info in UNet models (#7482) * Add activations_shape info in UNet models * activations_shape should be a list * Support 512 siglip model. * Show a proper error to the user when a vision model file is invalid. * Support the wan fun reward loras. --------- Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Michael Kupchick <michael@lightricks.com> Co-authored-by: BVH <82035780+bvhari@users.noreply.github.com> Co-authored-by: Laurent Erignoux <lerignoux@gmail.com> Co-authored-by: BiologicalExplosion <49753622+BiologicalExplosion@users.noreply.github.com> Co-authored-by: huzhan <huzhan@cambricon.com> Co-authored-by: Raphael Walker <slickytail.mc@gmail.com>
385 lines
13 KiB
Python
385 lines
13 KiB
Python
import numpy as np
|
|
import scipy.ndimage
|
|
import torch
|
|
import comfy.utils
|
|
import node_helpers
|
|
|
|
from nodes import MAX_RESOLUTION
|
|
|
|
def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False):
|
|
source = source.to(destination.device)
|
|
if resize_source:
|
|
source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear")
|
|
|
|
source = comfy.utils.repeat_to_batch_size(source, destination.shape[0])
|
|
|
|
x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier))
|
|
y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier))
|
|
|
|
left, top = (x // multiplier, y // multiplier)
|
|
right, bottom = (left + source.shape[3], top + source.shape[2],)
|
|
|
|
if mask is None:
|
|
mask = torch.ones_like(source)
|
|
else:
|
|
mask = mask.to(destination.device, copy=True)
|
|
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear")
|
|
mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0])
|
|
|
|
# calculate the bounds of the source that will be overlapping the destination
|
|
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
|
# of the destination
|
|
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
|
|
|
|
mask = mask[:, :, :visible_height, :visible_width]
|
|
inverse_mask = torch.ones_like(mask) - mask
|
|
|
|
source_portion = mask * source[:, :, :visible_height, :visible_width]
|
|
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
|
|
|
|
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
|
|
return destination
|
|
|
|
class LatentCompositeMasked:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"destination": ("LATENT",),
|
|
"source": ("LATENT",),
|
|
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
|
|
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
|
|
"resize_source": ("BOOLEAN", {"default": False}),
|
|
},
|
|
"optional": {
|
|
"mask": ("MASK",),
|
|
}
|
|
}
|
|
RETURN_TYPES = ("LATENT",)
|
|
FUNCTION = "composite"
|
|
|
|
CATEGORY = "latent"
|
|
|
|
def composite(self, destination, source, x, y, resize_source, mask = None):
|
|
output = destination.copy()
|
|
destination = destination["samples"].clone()
|
|
source = source["samples"]
|
|
output["samples"] = composite(destination, source, x, y, mask, 8, resize_source)
|
|
return (output,)
|
|
|
|
class ImageCompositeMasked:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"destination": ("IMAGE",),
|
|
"source": ("IMAGE",),
|
|
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"resize_source": ("BOOLEAN", {"default": False}),
|
|
},
|
|
"optional": {
|
|
"mask": ("MASK",),
|
|
}
|
|
}
|
|
RETURN_TYPES = ("IMAGE",)
|
|
FUNCTION = "composite"
|
|
|
|
CATEGORY = "image"
|
|
|
|
def composite(self, destination, source, x, y, resize_source, mask = None):
|
|
destination, source = node_helpers.image_alpha_fix(destination, source)
|
|
destination = destination.clone().movedim(-1, 1)
|
|
output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1)
|
|
return (output,)
|
|
|
|
class MaskToImage:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"mask": ("MASK",),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("IMAGE",)
|
|
FUNCTION = "mask_to_image"
|
|
|
|
def mask_to_image(self, mask):
|
|
result = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
|
|
return (result,)
|
|
|
|
class ImageToMask:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"image": ("IMAGE",),
|
|
"channel": (["red", "green", "blue", "alpha"],),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
FUNCTION = "image_to_mask"
|
|
|
|
def image_to_mask(self, image, channel):
|
|
channels = ["red", "green", "blue", "alpha"]
|
|
mask = image[:, :, :, channels.index(channel)]
|
|
return (mask,)
|
|
|
|
class ImageColorToMask:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"image": ("IMAGE",),
|
|
"color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
FUNCTION = "image_to_mask"
|
|
|
|
def image_to_mask(self, image, color):
|
|
temp = (torch.clamp(image, 0, 1.0) * 255.0).round().to(torch.int)
|
|
temp = torch.bitwise_left_shift(temp[:,:,:,0], 16) + torch.bitwise_left_shift(temp[:,:,:,1], 8) + temp[:,:,:,2]
|
|
mask = torch.where(temp == color, 255, 0).float()
|
|
return (mask,)
|
|
|
|
class SolidMask:
|
|
@classmethod
|
|
def INPUT_TYPES(cls):
|
|
return {
|
|
"required": {
|
|
"value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
|
"width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
|
|
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
|
|
FUNCTION = "solid"
|
|
|
|
def solid(self, value, width, height):
|
|
out = torch.full((1, height, width), value, dtype=torch.float32, device="cpu")
|
|
return (out,)
|
|
|
|
class InvertMask:
|
|
@classmethod
|
|
def INPUT_TYPES(cls):
|
|
return {
|
|
"required": {
|
|
"mask": ("MASK",),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
|
|
FUNCTION = "invert"
|
|
|
|
def invert(self, mask):
|
|
out = 1.0 - mask
|
|
return (out,)
|
|
|
|
class CropMask:
|
|
@classmethod
|
|
def INPUT_TYPES(cls):
|
|
return {
|
|
"required": {
|
|
"mask": ("MASK",),
|
|
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
|
|
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
|
|
FUNCTION = "crop"
|
|
|
|
def crop(self, mask, x, y, width, height):
|
|
mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1]))
|
|
out = mask[:, y:y + height, x:x + width]
|
|
return (out,)
|
|
|
|
class MaskComposite:
|
|
@classmethod
|
|
def INPUT_TYPES(cls):
|
|
return {
|
|
"required": {
|
|
"destination": ("MASK",),
|
|
"source": ("MASK",),
|
|
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"operation": (["multiply", "add", "subtract", "and", "or", "xor"],),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
|
|
FUNCTION = "combine"
|
|
|
|
def combine(self, destination, source, x, y, operation):
|
|
output = destination.reshape((-1, destination.shape[-2], destination.shape[-1])).clone()
|
|
source = source.reshape((-1, source.shape[-2], source.shape[-1]))
|
|
|
|
left, top = (x, y,)
|
|
right, bottom = (min(left + source.shape[-1], destination.shape[-1]), min(top + source.shape[-2], destination.shape[-2]))
|
|
visible_width, visible_height = (right - left, bottom - top,)
|
|
|
|
source_portion = source[:, :visible_height, :visible_width]
|
|
destination_portion = destination[:, top:bottom, left:right]
|
|
|
|
if operation == "multiply":
|
|
output[:, top:bottom, left:right] = destination_portion * source_portion
|
|
elif operation == "add":
|
|
output[:, top:bottom, left:right] = destination_portion + source_portion
|
|
elif operation == "subtract":
|
|
output[:, top:bottom, left:right] = destination_portion - source_portion
|
|
elif operation == "and":
|
|
output[:, top:bottom, left:right] = torch.bitwise_and(destination_portion.round().bool(), source_portion.round().bool()).float()
|
|
elif operation == "or":
|
|
output[:, top:bottom, left:right] = torch.bitwise_or(destination_portion.round().bool(), source_portion.round().bool()).float()
|
|
elif operation == "xor":
|
|
output[:, top:bottom, left:right] = torch.bitwise_xor(destination_portion.round().bool(), source_portion.round().bool()).float()
|
|
|
|
output = torch.clamp(output, 0.0, 1.0)
|
|
|
|
return (output,)
|
|
|
|
class FeatherMask:
|
|
@classmethod
|
|
def INPUT_TYPES(cls):
|
|
return {
|
|
"required": {
|
|
"mask": ("MASK",),
|
|
"left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
"bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
|
|
FUNCTION = "feather"
|
|
|
|
def feather(self, mask, left, top, right, bottom):
|
|
output = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).clone()
|
|
|
|
left = min(left, output.shape[-1])
|
|
right = min(right, output.shape[-1])
|
|
top = min(top, output.shape[-2])
|
|
bottom = min(bottom, output.shape[-2])
|
|
|
|
for x in range(left):
|
|
feather_rate = (x + 1.0) / left
|
|
output[:, :, x] *= feather_rate
|
|
|
|
for x in range(right):
|
|
feather_rate = (x + 1) / right
|
|
output[:, :, -x] *= feather_rate
|
|
|
|
for y in range(top):
|
|
feather_rate = (y + 1) / top
|
|
output[:, y, :] *= feather_rate
|
|
|
|
for y in range(bottom):
|
|
feather_rate = (y + 1) / bottom
|
|
output[:, -y, :] *= feather_rate
|
|
|
|
return (output,)
|
|
|
|
class GrowMask:
|
|
@classmethod
|
|
def INPUT_TYPES(cls):
|
|
return {
|
|
"required": {
|
|
"mask": ("MASK",),
|
|
"expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}),
|
|
"tapered_corners": ("BOOLEAN", {"default": True}),
|
|
},
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
|
|
FUNCTION = "expand_mask"
|
|
|
|
def expand_mask(self, mask, expand, tapered_corners):
|
|
c = 0 if tapered_corners else 1
|
|
kernel = np.array([[c, 1, c],
|
|
[1, 1, 1],
|
|
[c, 1, c]])
|
|
mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1]))
|
|
out = []
|
|
for m in mask:
|
|
output = m.numpy()
|
|
for _ in range(abs(expand)):
|
|
if expand < 0:
|
|
output = scipy.ndimage.grey_erosion(output, footprint=kernel)
|
|
else:
|
|
output = scipy.ndimage.grey_dilation(output, footprint=kernel)
|
|
output = torch.from_numpy(output)
|
|
out.append(output)
|
|
return (torch.stack(out, dim=0),)
|
|
|
|
class ThresholdMask:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"mask": ("MASK",),
|
|
"value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
|
}
|
|
}
|
|
|
|
CATEGORY = "mask"
|
|
|
|
RETURN_TYPES = ("MASK",)
|
|
FUNCTION = "image_to_mask"
|
|
|
|
def image_to_mask(self, mask, value):
|
|
mask = (mask > value).float()
|
|
return (mask,)
|
|
|
|
|
|
NODE_CLASS_MAPPINGS = {
|
|
"LatentCompositeMasked": LatentCompositeMasked,
|
|
"ImageCompositeMasked": ImageCompositeMasked,
|
|
"MaskToImage": MaskToImage,
|
|
"ImageToMask": ImageToMask,
|
|
"ImageColorToMask": ImageColorToMask,
|
|
"SolidMask": SolidMask,
|
|
"InvertMask": InvertMask,
|
|
"CropMask": CropMask,
|
|
"MaskComposite": MaskComposite,
|
|
"FeatherMask": FeatherMask,
|
|
"GrowMask": GrowMask,
|
|
"ThresholdMask": ThresholdMask,
|
|
}
|
|
|
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
"ImageToMask": "Convert Image to Mask",
|
|
"MaskToImage": "Convert Mask to Image",
|
|
}
|