mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 10:25:16 +00:00
Move to using generator for the noise, making seed required
This commit is contained in:
parent
43cf3f6ec2
commit
2d3c1d689f
@ -33,9 +33,8 @@ class SVD_img2vid_Conditioning:
|
|||||||
"video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}),
|
"video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}),
|
||||||
"motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}),
|
"motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}),
|
||||||
"fps": ("INT", {"default": 6, "min": 1, "max": 1024}),
|
"fps": ("INT", {"default": 6, "min": 1, "max": 1024}),
|
||||||
"augmentation_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01})
|
"augmentation_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
||||||
},
|
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 2**32-1}),
|
||||||
"optional": { "noise_seed": ("INT", {"default": 0, "min": 0, "max": 2**32-1}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
|
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
|
||||||
@ -46,15 +45,14 @@ class SVD_img2vid_Conditioning:
|
|||||||
CATEGORY = "conditioning/video_models"
|
CATEGORY = "conditioning/video_models"
|
||||||
|
|
||||||
def encode(self, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id,
|
def encode(self, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id,
|
||||||
fps, augmentation_level, noise_seed=None):
|
fps, augmentation_level, noise_seed):
|
||||||
output = clip_vision.encode_image(init_image)
|
output = clip_vision.encode_image(init_image)
|
||||||
pooled = output.image_embeds.unsqueeze(0)
|
pooled = output.image_embeds.unsqueeze(0)
|
||||||
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
|
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
|
||||||
encode_pixels = pixels[:,:,:,:3]
|
encode_pixels = pixels[:,:,:,:3]
|
||||||
if augmentation_level > 0:
|
if augmentation_level > 0:
|
||||||
if noise_seed is not None:
|
generator = torch.manual_seed(noise_seed)
|
||||||
torch.manual_seed(noise_seed)
|
encode_pixels += torch.randn(pixels.shape, generator=generator) * augmentation_level
|
||||||
encode_pixels += torch.randn_like(pixels) * augmentation_level
|
|
||||||
t = vae.encode(encode_pixels)
|
t = vae.encode(encode_pixels)
|
||||||
positive = [[pooled, {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}]]
|
positive = [[pooled, {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}]]
|
||||||
negative = [[torch.zeros_like(pooled), {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}]]
|
negative = [[torch.zeros_like(pooled), {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}]]
|
||||||
|
Loading…
Reference in New Issue
Block a user