VAE scaling

This commit is contained in:
kijai 2025-02-18 17:02:11 +02:00
parent f8169ace7a
commit 7d9f51753b
2 changed files with 2 additions and 2 deletions

View File

@ -879,7 +879,7 @@ class HunyuanVideo(BaseModel):
else:
padding_shape = (noise.shape[0], 16, noise.shape[2] - 1, noise.shape[3], noise.shape[4])
latent_padding = torch.zeros(padding_shape, device=noise.device, dtype=noise.dtype)
image_latents = torch.cat([image.to(noise), latent_padding], dim=2)
image_latents = torch.cat([image.to(noise), latent_padding], dim=2) * 0.476986
process_image_in = lambda image: image
out['c_concat'] = comfy.conds.CONDNoiseShape(process_image_in(image_latents))

View File

@ -136,7 +136,7 @@ def detect_unet_config(state_dict, key_prefix):
if '{}txt_in.individual_token_refiner.blocks.0.norm1.weight'.format(key_prefix) in state_dict_keys: #Hunyuan Video
dit_config = {}
dit_config["image_model"] = "hunyuan_video"
dit_config["in_channels"] = state_dict["img_in.proj.weight"].shape[1] #SkyReels img2video 32 has input channels
dit_config["in_channels"] = state_dict["img_in.proj.weight"].shape[1] #SkyReels img2video has 32 input channels
dit_config["patch_size"] = [1, 2, 2]
dit_config["out_channels"] = 16
dit_config["vec_in_dim"] = 768