mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-18 10:03:31 +00:00

* Allow disabling pe in flux code for some other models. * Initial Hunyuan3Dv2 implementation. Supports the multiview, mini, turbo models and VAEs. * Fix orientation of hunyuan 3d model. * A few fixes for the hunyuan3d models. * Update frontend to 1.13 (#7331) * Add backend primitive nodes (#7328) * Add backend primitive nodes * Add control after generate to int primitive * Nodes to convert images to YUV and back. Can be used to convert an image to black and white. * Update frontend to 1.14 (#7343) * Native LotusD Implementation (#7125) * draft pass at a native comfy implementation of Lotus-D depth and normal est * fix model_sampling kludges * fix ruff --------- Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> * Automatically set the right sampling type for lotus. * support output normal and lineart once (#7290) * [nit] Format error strings (#7345) * ComfyUI version v0.3.27 * Fallback to pytorch attention if sage attention fails. * Add model merging node for WAN 2.1 * Add Hunyuan3D to readme. * Support more float8 types. * Add CFGZeroStar node. Works on all models that use a negative prompt but is meant for rectified flow models. * Support the WAN 2.1 fun control models. Use the new WanFunControlToVideo node. * Add WanFunInpaintToVideo node for the Wan fun inpaint models. * Update frontend to 1.14.6 (#7416) Cherry-pick the fix: https://github.com/Comfy-Org/ComfyUI_frontend/pull/3252 * Don't error if wan concat image has extra channels. * ltxv: fix preprocessing exception when compression is 0. (#7431) * Remove useless code. * Fix latent composite node not working when source has alpha. * Fix alpha channel mismatch on destination in ImageCompositeMasked * Add option to store TE in bf16 (#7461) * User missing (#7439) * Ensuring a 401 error is returned when user data is not found in multi-user context. * Returning a 401 error when provided comfy-user does not exists on server side. * Fix comment. This function does not support quads. * MLU memory optimization (#7470) Co-authored-by: huzhan <huzhan@cambricon.com> * Fix alpha image issue in more nodes. * Fix problem. * Disable partial offloading of audio VAE. * Add activations_shape info in UNet models (#7482) * Add activations_shape info in UNet models * activations_shape should be a list * Support 512 siglip model. * Show a proper error to the user when a vision model file is invalid. * Support the wan fun reward loras. --------- Co-authored-by: comfyanonymous <comfyanonymous@protonmail.com> Co-authored-by: Chenlei Hu <hcl@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com> Co-authored-by: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Co-authored-by: Terry Jia <terryjia88@gmail.com> Co-authored-by: Michael Kupchick <michael@lightricks.com> Co-authored-by: BVH <82035780+bvhari@users.noreply.github.com> Co-authored-by: Laurent Erignoux <lerignoux@gmail.com> Co-authored-by: BiologicalExplosion <49753622+BiologicalExplosion@users.noreply.github.com> Co-authored-by: huzhan <huzhan@cambricon.com> Co-authored-by: Raphael Walker <slickytail.mc@gmail.com>
285 lines
9.0 KiB
Python
285 lines
9.0 KiB
Python
import comfy_extras.nodes_model_merging
|
|
|
|
class ModelMergeSD1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["time_embed."] = argument
|
|
arg_dict["label_emb."] = argument
|
|
|
|
for i in range(12):
|
|
arg_dict["input_blocks.{}.".format(i)] = argument
|
|
|
|
for i in range(3):
|
|
arg_dict["middle_block.{}.".format(i)] = argument
|
|
|
|
for i in range(12):
|
|
arg_dict["output_blocks.{}.".format(i)] = argument
|
|
|
|
arg_dict["out."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
|
|
class ModelMergeSDXL(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["time_embed."] = argument
|
|
arg_dict["label_emb."] = argument
|
|
|
|
for i in range(9):
|
|
arg_dict["input_blocks.{}".format(i)] = argument
|
|
|
|
for i in range(3):
|
|
arg_dict["middle_block.{}".format(i)] = argument
|
|
|
|
for i in range(9):
|
|
arg_dict["output_blocks.{}".format(i)] = argument
|
|
|
|
arg_dict["out."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["pos_embed."] = argument
|
|
arg_dict["x_embedder."] = argument
|
|
arg_dict["context_embedder."] = argument
|
|
arg_dict["y_embedder."] = argument
|
|
arg_dict["t_embedder."] = argument
|
|
|
|
for i in range(24):
|
|
arg_dict["joint_blocks.{}.".format(i)] = argument
|
|
|
|
arg_dict["final_layer."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
|
|
class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["init_x_linear."] = argument
|
|
arg_dict["positional_encoding"] = argument
|
|
arg_dict["cond_seq_linear."] = argument
|
|
arg_dict["register_tokens"] = argument
|
|
arg_dict["t_embedder."] = argument
|
|
|
|
for i in range(4):
|
|
arg_dict["double_layers.{}.".format(i)] = argument
|
|
|
|
for i in range(32):
|
|
arg_dict["single_layers.{}.".format(i)] = argument
|
|
|
|
arg_dict["modF."] = argument
|
|
arg_dict["final_linear."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["img_in."] = argument
|
|
arg_dict["time_in."] = argument
|
|
arg_dict["guidance_in"] = argument
|
|
arg_dict["vector_in."] = argument
|
|
arg_dict["txt_in."] = argument
|
|
|
|
for i in range(19):
|
|
arg_dict["double_blocks.{}.".format(i)] = argument
|
|
|
|
for i in range(38):
|
|
arg_dict["single_blocks.{}.".format(i)] = argument
|
|
|
|
arg_dict["final_layer."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["pos_embed."] = argument
|
|
arg_dict["x_embedder."] = argument
|
|
arg_dict["context_embedder."] = argument
|
|
arg_dict["y_embedder."] = argument
|
|
arg_dict["t_embedder."] = argument
|
|
|
|
for i in range(38):
|
|
arg_dict["joint_blocks.{}.".format(i)] = argument
|
|
|
|
arg_dict["final_layer."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeMochiPreview(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["pos_frequencies."] = argument
|
|
arg_dict["t_embedder."] = argument
|
|
arg_dict["t5_y_embedder."] = argument
|
|
arg_dict["t5_yproj."] = argument
|
|
|
|
for i in range(48):
|
|
arg_dict["blocks.{}.".format(i)] = argument
|
|
|
|
arg_dict["final_layer."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["patchify_proj."] = argument
|
|
arg_dict["adaln_single."] = argument
|
|
arg_dict["caption_projection."] = argument
|
|
|
|
for i in range(28):
|
|
arg_dict["transformer_blocks.{}.".format(i)] = argument
|
|
|
|
arg_dict["scale_shift_table"] = argument
|
|
arg_dict["proj_out."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["pos_embedder."] = argument
|
|
arg_dict["extra_pos_embedder."] = argument
|
|
arg_dict["x_embedder."] = argument
|
|
arg_dict["t_embedder."] = argument
|
|
arg_dict["affline_norm."] = argument
|
|
|
|
|
|
for i in range(28):
|
|
arg_dict["blocks.block{}.".format(i)] = argument
|
|
|
|
arg_dict["final_layer."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["pos_embedder."] = argument
|
|
arg_dict["extra_pos_embedder."] = argument
|
|
arg_dict["x_embedder."] = argument
|
|
arg_dict["t_embedder."] = argument
|
|
arg_dict["affline_norm."] = argument
|
|
|
|
|
|
for i in range(36):
|
|
arg_dict["blocks.block{}.".format(i)] = argument
|
|
|
|
arg_dict["final_layer."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
class ModelMergeWAN2_1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
|
CATEGORY = "advanced/model_merging/model_specific"
|
|
DESCRIPTION = "1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb."
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
arg_dict = { "model1": ("MODEL",),
|
|
"model2": ("MODEL",)}
|
|
|
|
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
|
|
|
arg_dict["patch_embedding."] = argument
|
|
arg_dict["time_embedding."] = argument
|
|
arg_dict["time_projection."] = argument
|
|
arg_dict["text_embedding."] = argument
|
|
arg_dict["img_emb."] = argument
|
|
|
|
for i in range(40):
|
|
arg_dict["blocks.{}.".format(i)] = argument
|
|
|
|
arg_dict["head."] = argument
|
|
|
|
return {"required": arg_dict}
|
|
|
|
NODE_CLASS_MAPPINGS = {
|
|
"ModelMergeSD1": ModelMergeSD1,
|
|
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
|
|
"ModelMergeSDXL": ModelMergeSDXL,
|
|
"ModelMergeSD3_2B": ModelMergeSD3_2B,
|
|
"ModelMergeAuraflow": ModelMergeAuraflow,
|
|
"ModelMergeFlux1": ModelMergeFlux1,
|
|
"ModelMergeSD35_Large": ModelMergeSD35_Large,
|
|
"ModelMergeMochiPreview": ModelMergeMochiPreview,
|
|
"ModelMergeLTXV": ModelMergeLTXV,
|
|
"ModelMergeCosmos7B": ModelMergeCosmos7B,
|
|
"ModelMergeCosmos14B": ModelMergeCosmos14B,
|
|
"ModelMergeWAN2_1": ModelMergeWAN2_1,
|
|
}
|