uupdated nodes

This commit is contained in:
drunkplato 2025-01-14 00:27:20 +00:00 committed by Ubuntu
parent 4e788eb2ea
commit e907deff81
4 changed files with 71 additions and 117 deletions

2
.gitignore vendored
View File

@ -42,5 +42,5 @@ comfy-venv
comfy_venv_3.11
models-2
!comfy/ldm/models/autoencoder.py

View File

@ -1,3 +1,4 @@
import json
from comfy_extras.nodes_custom_sampler import Noise_RandomNoise
@ -16,7 +17,7 @@ class MD_VideoInputs:
"description": "The length of the video."
}),
"steps": ("INT", {
"default": 25,
"default": 15,
"description": "Number of steps to generate the video."
}),
"width": ("INT", {
@ -58,10 +59,21 @@ class MD_VideoInputs:
}
}
RETURN_TYPES = ("STRING", "INT", "INT", "INT", "INT", "INT", "FLOAT", "STRING", "STRING", "STRING", "STRING", "NOISE",)
RETURN_NAMES = ("image_url", "length", "steps", "width", "height", "crf", "terminal", "user_prompt", "pre_prompt", "post_prompt", "negative_prompt", "seed")
RETURN_TYPES = ("STRING", "INT", "INT", "INT", "INT", "INT", "FLOAT", "STRING", "STRING", "STRING", "STRING", "NOISE", "STRING")
RETURN_NAMES = ("image_url", "length", "steps", "width", "height", "crf", "terminal", "user_prompt", "pre_prompt", "post_prompt", "negative_prompt", "seed", "input_metadata")
FUNCTION = "load_inputs"
CATEGORY = "MemeDeck"
def load_inputs(self, image_url, length=121, steps=25, width=768, height=768, crf=28, terminal=0.1, user_prompt="", pre_prompt="", post_prompt="", negative_prompt="", seed=None):
return (image_url, length, steps, width, height, crf, terminal, user_prompt, pre_prompt, post_prompt, negative_prompt, Noise_RandomNoise(seed))
def load_inputs(self, image_url, length=121, steps=15, width=768, height=768, crf=28, terminal=0.1, user_prompt="", pre_prompt="", post_prompt="", negative_prompt="", seed=None):
input_metadata = json.dumps({
"length": length,
"steps": steps,
"width": width,
"height": height,
"crf": crf,
"terminal": terminal,
"user_prompt": user_prompt,
"pre_prompt": pre_prompt,
"post_prompt": post_prompt,
})
return (image_url, length, steps, width, height, crf, terminal, user_prompt, pre_prompt, post_prompt, negative_prompt, Noise_RandomNoise(seed), input_metadata)

View File

@ -47,11 +47,13 @@ class MD_SaveMP4:
"crf": ("FLOAT",),
"motion_prompt": ("STRING", ),
"negative_prompt": ("STRING", ),
"img2vid_metadata": ("STRING", ),
"sampler_metadata": ("STRING", ),
},
"optional": {
"seed_value": ("NOISE", ),
"input_metadata": ("STRING", ),
},
# "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
}
RETURN_TYPES = ()
FUNCTION = "save_images"
@ -60,7 +62,7 @@ class MD_SaveMP4:
CATEGORY = "MemeDeck"
def save_images(self, images, fps, filename_prefix, crf=None, motion_prompt=None, negative_prompt=None, img2vid_metadata=None, sampler_metadata=None):
def save_images(self, images, fps, filename_prefix, crf=None, motion_prompt=None, negative_prompt=None, seed_value=None, input_metadata=None):
start_time = time.time()
filename_prefix += self.prefix_append
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
@ -82,14 +84,17 @@ class MD_SaveMP4:
# metadata = pil_images[0].getexif()
# num_frames = len(pil_images)
logger.info(f"seed_value: {seed_value.seed}")
logger.info(f"input_metadata: {input_metadata}")
json_metadata = {
"crf": crf,
"motion_prompt": motion_prompt,
"negative_prompt": negative_prompt,
"img2vid_metadata": json.loads(img2vid_metadata),
"sampler_metadata": json.loads(sampler_metadata),
}
"seed": seed_value.seed,
"input_metadata": json.loads(input_metadata),
}
# Use ffmpeg to create MP4 with watermark
output_file = f"{filename}_{counter:05}_.mp4"
@ -237,105 +242,6 @@ class MD_SaveMP4:
return watermark_img
# def save_images(self, images, fps, filename_prefix, lossless, quality, method, crf=None, motion_prompt=None, negative_prompt=None, img2vid_metadata=None, sampler_metadata=None):
# start_time = time.time()
# method = self.methods.get(method)
# filename_prefix += self.prefix_append
# full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
# filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]
# )
# results = []
# # Prepare PIL images in one loop
# pil_images = [
# Image.fromarray(np.clip((255. * image.cpu().numpy()), 0, 255).astype(np.uint8))
# for image in images
# ]
# metadata = pil_images[0].getexif()
# num_frames = len(pil_images)
# # Pre-serialize JSON metadata
# json_metadata = json.dumps({
# "crf": crf,
# "motion_prompt": motion_prompt,
# "negative_prompt": negative_prompt,
# "img2vid_metadata": json.loads(img2vid_metadata),
# "sampler_metadata": json.loads(sampler_metadata),
# })
# # Save images directly
# duration = int(1000.0 / fps)
# for i in range(0, len(pil_images), num_frames):
# file = f"{filename}_{counter:05}_.webp"
# pil_images[i].save(
# os.path.join(full_output_folder, file),
# save_all=True,
# duration=duration,
# append_images=pil_images[i + 1:i + num_frames],
# exif=metadata,
# lossless=lossless,
# quality=quality,
# method=method
# )
# results.append({"filename": file, "subfolder": subfolder, "type": self.type})
# counter += 1
# end_time = time.time()
# logger.info(f"Save images took: {end_time - start_time} seconds")
# return {
# "ui": {
# "images": results,
# "animated": (num_frames != 1,),
# "metadata": (json_metadata,),
# },
# }
# def save_images(self, images, fps, filename_prefix, lossless, quality, method, crf=None, motion_prompt=None, negative_prompt=None, img2vid_metadata=None, sampler_metadata=None):
# method = self.methods.get(method)
# filename_prefix += self.prefix_append
# full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
# results = list()
# pil_images = []
# for image in images:
# i = 255. * image.cpu().numpy()
# img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
# pil_images.append(img)
# metadata = pil_images[0].getexif()
# num_frames = len(pil_images)
# json_metadata = {
# "crf": crf,
# "motion_prompt": motion_prompt,
# "negative_prompt": negative_prompt,
# "img2vid_metadata": json.loads(img2vid_metadata),
# "sampler_metadata": json.loads(sampler_metadata),
# }
# c = len(pil_images)
# for i in range(0, c, num_frames):
# file = f"{filename}_{counter:05}_.webp"
# pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], exif=metadata, lossless=lossless, quality=quality, method=method)
# results.append({
# "filename": file,
# "subfolder": subfolder,
# "type": self.type,
# })
# counter += 1
# animated = num_frames != 1
# # properly serialize metadata
# return {
# "ui": {
# "images": results,
# "animated": (animated,),
# "metadata": (json.dumps(json_metadata),)
# },
# }
class MD_VAEDecode:
@classmethod
def INPUT_TYPES(s):

View File

@ -1,3 +1,4 @@
import json
from pathlib import Path
import sys
import time
@ -111,7 +112,11 @@ class MD_ImageToMotionPrompt:
},
),
"max_tokens": ("INT", {"min": 1, "max": 2048, "default": 200}),
}
},
# "optional": {
# "temperature": ("FLOAT", {"min": 0.0, "max": 1.0, "step": 0.01, "default": 0.2}),
# "top_p": ("FLOAT", {"min": 0.0, "max": 1.0, "step": 0.01, "default": 0.9}),
# }
}
@ -121,14 +126,23 @@ class MD_ImageToMotionPrompt:
CATEGORY = "MemeDeck"
def generate_completion(
self, pre_prompt: str, post_prompt: str, Image: torch.Tensor, clip, prompt: str, negative_prompt: str, max_tokens: int
self, pre_prompt: str, post_prompt: str, Image: torch.Tensor, clip, prompt: str, negative_prompt: str,
# temperature: float,
# top_p: float,
max_tokens: int
) -> Tuple[str]:
# start a timer
start_time = time.time()
b64image = image.pil2base64(image.tensor2pil(Image))
# change this to a endpoint on localhost:5010/inference that takes a json with the image and the prompt
response = requests.post("http://127.0.0.1:5010/inference", json={"image_url": f"data:image/jpeg;base64,{b64image}", "prompt": prompt})
response = requests.post("http://127.0.0.1:5010/inference", json={
"image_url": f"data:image/jpeg;base64,{b64image}",
"prompt": prompt,
"temperature": 0.2,
"top_p": 0.7,
"max_gen_len": max_tokens,
})
if response.status_code != 200:
raise Exception(f"Failed to generate completion: {response.text}")
end_time = time.time()
@ -171,6 +185,19 @@ class MD_CompressAdjustNode:
"description": "The height of the video."
}),
},
"optional": {
"weights": ("STRING", {
"multiline": True,
"default": json.dumps({
"ideal_blockiness": 600,
"ideal_edge_density": 12,
"ideal_color_variation": 10000,
"blockiness_weight": -0.006,
"edge_density_weight": 0.32,
"color_variation_weight": -0.00005
}),
}),
}
}
RETURN_TYPES = ("IMAGE", "FLOAT", "INT", "INT")
@ -252,11 +279,20 @@ class MD_CompressAdjustNode:
target_crf = round(target_crf, 2)
return target_crf
def tensor_to_video_and_back(self, image, desired_crf=28, width=832, height=832):
def tensor_to_video_and_back(self, image, desired_crf=28, width=832, height=832, weights=None):
temp_dir = "temp_video"
filename = f"frame_{time.time()}".split('.')[0]
os.makedirs(temp_dir, exist_ok=True)
if weights:
weights = json.loads(weights)
self.ideal_blockiness = weights["ideal_blockiness"]
self.ideal_edge_density = weights["ideal_edge_density"]
self.ideal_color_variation = weights["ideal_color_variation"]
self.blockiness_weight = weights["blockiness_weight"]
self.edge_density_weight = weights["edge_density_weight"]
self.color_variation_weight = weights["color_variation_weight"]
# Convert single image to list if necessary
if len(image.shape) == 3:
image = [image]