mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-06-03 10:02:09 +08:00
Compare commits
11 Commits
4fd764317c
...
952a096e89
Author | SHA1 | Date | |
---|---|---|---|
![]() |
952a096e89 | ||
![]() |
d8e5662822 | ||
![]() |
3d44a09812 | ||
![]() |
62690eddec | ||
![]() |
05eb10b43a | ||
![]() |
f5e4e976f4 | ||
![]() |
e53f8fd168 | ||
![]() |
28d5474d76 | ||
![]() |
e231676a77 | ||
![]() |
76649815d3 | ||
![]() |
22c0afc8e2 |
25
README.md
25
README.md
@ -279,6 +279,17 @@ For models compatible with Ascend Extension for PyTorch (torch_npu). To get star
|
||||
3. Next, install the necessary packages for torch-npu by adhering to the platform-specific instructions on the [Installation](https://ascend.github.io/docs/sources/pytorch/install.html#pytorch) page.
|
||||
4. Finally, adhere to the [ComfyUI manual installation](#manual-install-windows-linux) guide for Linux. Once all components are installed, you can run ComfyUI as described earlier.
|
||||
|
||||
|
||||
### UV (Package Manager)
|
||||
|
||||
UV is an extremely fast Python package and project manager, written in Rust. For UV installation, checks [docs](https://docs.astral.sh/uv/). UV help isolation and reproducibility of the desired environment.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/comfyanonymous/ComfyUI.git # Clone this repo
|
||||
cd ComfyUI # Move to created folder
|
||||
uv add --requirements requirements.txt # Add dependencies
|
||||
```
|
||||
|
||||
#### Cambricon MLUs
|
||||
|
||||
For models compatible with Cambricon Extension for PyTorch (torch_mlu). Here's a step-by-step guide tailored to your platform and installation method:
|
||||
@ -287,9 +298,21 @@ For models compatible with Cambricon Extension for PyTorch (torch_mlu). Here's a
|
||||
2. Next, install the PyTorch(torch_mlu) following the instructions on the [Installation](https://www.cambricon.com/docs/sdk_1.15.0/cambricon_pytorch_1.17.0/user_guide_1.9/index.html)
|
||||
3. Launch ComfyUI by running `python main.py`
|
||||
|
||||
|
||||
# Running
|
||||
|
||||
```python main.py```
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
|
||||
If using **UV**:
|
||||
|
||||
```bash
|
||||
uv run python main.py
|
||||
# or in case of activating the virtual env before
|
||||
source .venv/bin/activate
|
||||
python main.py
|
||||
```
|
||||
|
||||
### For AMD cards not officially supported by ROCm
|
||||
|
||||
|
@ -65,6 +65,12 @@ from comfy_api_nodes.apinode_utils import (
|
||||
download_url_to_image_tensor,
|
||||
)
|
||||
from comfy_api_nodes.mapper_utils import model_field_to_node_input
|
||||
from comfy_api_nodes.util.validation_utils import (
|
||||
validate_image_dimensions,
|
||||
validate_image_aspect_ratio,
|
||||
validate_video_dimensions,
|
||||
validate_video_duration,
|
||||
)
|
||||
from comfy_api.input.basic_types import AudioInput
|
||||
from comfy_api.input.video_types import VideoInput
|
||||
from comfy_api.input_impl import VideoFromFile
|
||||
@ -80,18 +86,16 @@ PATH_CHARACTER_IMAGE = f"/proxy/kling/{KLING_API_VERSION}/images/generations"
|
||||
PATH_VIRTUAL_TRY_ON = f"/proxy/kling/{KLING_API_VERSION}/images/kolors-virtual-try-on"
|
||||
PATH_IMAGE_GENERATIONS = f"/proxy/kling/{KLING_API_VERSION}/images/generations"
|
||||
|
||||
|
||||
MAX_PROMPT_LENGTH_T2V = 2500
|
||||
MAX_PROMPT_LENGTH_I2V = 500
|
||||
MAX_PROMPT_LENGTH_IMAGE_GEN = 500
|
||||
MAX_NEGATIVE_PROMPT_LENGTH_IMAGE_GEN = 200
|
||||
MAX_PROMPT_LENGTH_LIP_SYNC = 120
|
||||
|
||||
# TODO: adjust based on tests
|
||||
AVERAGE_DURATION_T2V = 319 # 319,
|
||||
AVERAGE_DURATION_I2V = 164 # 164,
|
||||
AVERAGE_DURATION_LIP_SYNC = 120
|
||||
AVERAGE_DURATION_VIRTUAL_TRY_ON = 19 # 19,
|
||||
AVERAGE_DURATION_T2V = 319
|
||||
AVERAGE_DURATION_I2V = 164
|
||||
AVERAGE_DURATION_LIP_SYNC = 455
|
||||
AVERAGE_DURATION_VIRTUAL_TRY_ON = 19
|
||||
AVERAGE_DURATION_IMAGE_GEN = 32
|
||||
AVERAGE_DURATION_VIDEO_EFFECTS = 320
|
||||
AVERAGE_DURATION_VIDEO_EXTEND = 320
|
||||
@ -211,23 +215,8 @@ def validate_input_image(image: torch.Tensor) -> None:
|
||||
|
||||
See: https://app.klingai.com/global/dev/document-api/apiReference/model/imageToVideo
|
||||
"""
|
||||
if len(image.shape) == 4:
|
||||
height, width = image.shape[1], image.shape[2]
|
||||
elif len(image.shape) == 3:
|
||||
height, width = image.shape[0], image.shape[1]
|
||||
else:
|
||||
raise ValueError("Invalid image tensor shape.")
|
||||
|
||||
# Ensure minimum resolution is met
|
||||
if height < 300:
|
||||
raise ValueError("Image height must be at least 300px")
|
||||
if width < 300:
|
||||
raise ValueError("Image width must be at least 300px")
|
||||
|
||||
# Ensure aspect ratio is within acceptable range
|
||||
aspect_ratio = width / height
|
||||
if aspect_ratio < 1 / 2.5 or aspect_ratio > 2.5:
|
||||
raise ValueError("Image aspect ratio must be between 1:2.5 and 2.5:1")
|
||||
validate_image_dimensions(image, min_width=300, min_height=300)
|
||||
validate_image_aspect_ratio(image, min_aspect_ratio=1 / 2.5, max_aspect_ratio=2.5)
|
||||
|
||||
|
||||
def get_camera_control_input_config(
|
||||
@ -1243,6 +1232,17 @@ class KlingLipSyncBase(KlingNodeBase):
|
||||
RETURN_TYPES = ("VIDEO", "STRING", "STRING")
|
||||
RETURN_NAMES = ("VIDEO", "video_id", "duration")
|
||||
|
||||
def validate_lip_sync_video(self, video: VideoInput):
|
||||
"""
|
||||
Validates the input video adheres to the expectations of the Kling Lip Sync API:
|
||||
- Video length does not exceed 10s and is not shorter than 2s
|
||||
- Length and width dimensions should both be between 720px and 1920px
|
||||
|
||||
See: https://app.klingai.com/global/dev/document-api/apiReference/model/videoTolip
|
||||
"""
|
||||
validate_video_dimensions(video, 720, 1920)
|
||||
validate_video_duration(video, 2, 10)
|
||||
|
||||
def validate_text(self, text: str):
|
||||
if not text:
|
||||
raise ValueError("Text is required")
|
||||
@ -1282,6 +1282,7 @@ class KlingLipSyncBase(KlingNodeBase):
|
||||
) -> tuple[VideoFromFile, str, str]:
|
||||
if text:
|
||||
self.validate_text(text)
|
||||
self.validate_lip_sync_video(video)
|
||||
|
||||
# Upload video to Comfy API and get download URL
|
||||
video_url = upload_video_to_comfyapi(video, auth_kwargs=kwargs)
|
||||
@ -1352,7 +1353,7 @@ class KlingLipSyncAudioToVideoNode(KlingLipSyncBase):
|
||||
},
|
||||
}
|
||||
|
||||
DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file."
|
||||
DESCRIPTION = "Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length."
|
||||
|
||||
def api_call(
|
||||
self,
|
||||
@ -1464,7 +1465,7 @@ class KlingLipSyncTextToVideoNode(KlingLipSyncBase):
|
||||
},
|
||||
}
|
||||
|
||||
DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt."
|
||||
DESCRIPTION = "Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length."
|
||||
|
||||
def api_call(
|
||||
self,
|
||||
|
0
comfy_api_nodes/util/__init__.py
Normal file
0
comfy_api_nodes/util/__init__.py
Normal file
100
comfy_api_nodes/util/validation_utils.py
Normal file
100
comfy_api_nodes/util/validation_utils.py
Normal file
@ -0,0 +1,100 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from comfy_api.input.video_types import VideoInput
|
||||
|
||||
|
||||
def get_image_dimensions(image: torch.Tensor) -> tuple[int, int]:
|
||||
if len(image.shape) == 4:
|
||||
return image.shape[1], image.shape[2]
|
||||
elif len(image.shape) == 3:
|
||||
return image.shape[0], image.shape[1]
|
||||
else:
|
||||
raise ValueError("Invalid image tensor shape.")
|
||||
|
||||
|
||||
def validate_image_dimensions(
|
||||
image: torch.Tensor,
|
||||
min_width: Optional[int] = None,
|
||||
max_width: Optional[int] = None,
|
||||
min_height: Optional[int] = None,
|
||||
max_height: Optional[int] = None,
|
||||
):
|
||||
height, width = get_image_dimensions(image)
|
||||
|
||||
if min_width is not None and width < min_width:
|
||||
raise ValueError(f"Image width must be at least {min_width}px, got {width}px")
|
||||
if max_width is not None and width > max_width:
|
||||
raise ValueError(f"Image width must be at most {max_width}px, got {width}px")
|
||||
if min_height is not None and height < min_height:
|
||||
raise ValueError(
|
||||
f"Image height must be at least {min_height}px, got {height}px"
|
||||
)
|
||||
if max_height is not None and height > max_height:
|
||||
raise ValueError(f"Image height must be at most {max_height}px, got {height}px")
|
||||
|
||||
|
||||
def validate_image_aspect_ratio(
|
||||
image: torch.Tensor,
|
||||
min_aspect_ratio: Optional[float] = None,
|
||||
max_aspect_ratio: Optional[float] = None,
|
||||
):
|
||||
width, height = get_image_dimensions(image)
|
||||
aspect_ratio = width / height
|
||||
|
||||
if min_aspect_ratio is not None and aspect_ratio < min_aspect_ratio:
|
||||
raise ValueError(
|
||||
f"Image aspect ratio must be at least {min_aspect_ratio}, got {aspect_ratio}"
|
||||
)
|
||||
if max_aspect_ratio is not None and aspect_ratio > max_aspect_ratio:
|
||||
raise ValueError(
|
||||
f"Image aspect ratio must be at most {max_aspect_ratio}, got {aspect_ratio}"
|
||||
)
|
||||
|
||||
|
||||
def validate_video_dimensions(
|
||||
video: VideoInput,
|
||||
min_width: Optional[int] = None,
|
||||
max_width: Optional[int] = None,
|
||||
min_height: Optional[int] = None,
|
||||
max_height: Optional[int] = None,
|
||||
):
|
||||
try:
|
||||
width, height = video.get_dimensions()
|
||||
except Exception as e:
|
||||
logging.error("Error getting dimensions of video: %s", e)
|
||||
return
|
||||
|
||||
if min_width is not None and width < min_width:
|
||||
raise ValueError(f"Video width must be at least {min_width}px, got {width}px")
|
||||
if max_width is not None and width > max_width:
|
||||
raise ValueError(f"Video width must be at most {max_width}px, got {width}px")
|
||||
if min_height is not None and height < min_height:
|
||||
raise ValueError(
|
||||
f"Video height must be at least {min_height}px, got {height}px"
|
||||
)
|
||||
if max_height is not None and height > max_height:
|
||||
raise ValueError(f"Video height must be at most {max_height}px, got {height}px")
|
||||
|
||||
|
||||
def validate_video_duration(
|
||||
video: VideoInput,
|
||||
min_duration: Optional[float] = None,
|
||||
max_duration: Optional[float] = None,
|
||||
):
|
||||
try:
|
||||
duration = video.get_duration()
|
||||
except Exception as e:
|
||||
logging.error("Error getting duration of video: %s", e)
|
||||
return
|
||||
|
||||
epsilon = 0.0001
|
||||
if min_duration is not None and min_duration - epsilon > duration:
|
||||
raise ValueError(
|
||||
f"Video duration must be at least {min_duration}s, got {duration}s"
|
||||
)
|
||||
if max_duration is not None and duration > max_duration + epsilon:
|
||||
raise ValueError(
|
||||
f"Video duration must be at most {max_duration}s, got {duration}s"
|
||||
)
|
@ -31,6 +31,7 @@ class T5TokenizerOptions:
|
||||
}
|
||||
}
|
||||
|
||||
CATEGORY = "_for_testing/conditioning"
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
FUNCTION = "set_options"
|
||||
|
||||
|
@ -13,6 +13,7 @@ import os
|
||||
import re
|
||||
from io import BytesIO
|
||||
from inspect import cleandoc
|
||||
import torch
|
||||
|
||||
from comfy.comfy_types import FileLocator
|
||||
|
||||
@ -74,6 +75,24 @@ class ImageFromBatch:
|
||||
s = s_in[batch_index:batch_index + length].clone()
|
||||
return (s,)
|
||||
|
||||
|
||||
class ImageAddNoise:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "image": ("IMAGE",),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "control_after_generate": True, "tooltip": "The random seed used for creating the noise."}),
|
||||
"strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
}}
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "repeat"
|
||||
|
||||
CATEGORY = "image"
|
||||
|
||||
def repeat(self, image, seed, strength):
|
||||
generator = torch.manual_seed(seed)
|
||||
s = torch.clip((image + strength * torch.randn(image.size(), generator=generator, device="cpu").to(image)), min=0.0, max=1.0)
|
||||
return (s,)
|
||||
|
||||
class SaveAnimatedWEBP:
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_output_directory()
|
||||
@ -295,6 +314,7 @@ NODE_CLASS_MAPPINGS = {
|
||||
"ImageCrop": ImageCrop,
|
||||
"RepeatImageBatch": RepeatImageBatch,
|
||||
"ImageFromBatch": ImageFromBatch,
|
||||
"ImageAddNoise": ImageAddNoise,
|
||||
"SaveAnimatedWEBP": SaveAnimatedWEBP,
|
||||
"SaveAnimatedPNG": SaveAnimatedPNG,
|
||||
"SaveSVGNode": SaveSVGNode,
|
||||
|
@ -8,7 +8,8 @@ class StringConcatenate():
|
||||
return {
|
||||
"required": {
|
||||
"string_a": (IO.STRING, {"multiline": True}),
|
||||
"string_b": (IO.STRING, {"multiline": True})
|
||||
"string_b": (IO.STRING, {"multiline": True}),
|
||||
"delimiter": (IO.STRING, {"multiline": False, "default": ""})
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,8 +17,8 @@ class StringConcatenate():
|
||||
FUNCTION = "execute"
|
||||
CATEGORY = "utils/string"
|
||||
|
||||
def execute(self, string_a, string_b, **kwargs):
|
||||
return string_a + string_b,
|
||||
def execute(self, string_a, string_b, delimiter, **kwargs):
|
||||
return delimiter.join((string_a, string_b)),
|
||||
|
||||
class StringSubstring():
|
||||
@classmethod
|
||||
|
Loading…
x
Reference in New Issue
Block a user