mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-18 01:53:31 +00:00
Merge branch 'master' into worksplit-multigpu
This commit is contained in:
commit
605893d3cf
37
README.md
37
README.md
@ -31,10 +31,24 @@
|
|||||||

|

|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
This ui will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. For some workflow examples and see what ComfyUI can do you can check out:
|
ComfyUI lets you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. Available on Windows, Linux, and macOS.
|
||||||
### [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/)
|
|
||||||
|
## Get Started
|
||||||
|
|
||||||
|
#### [Desktop Application](https://www.comfy.org/download)
|
||||||
|
- The easiest way to get started.
|
||||||
|
- Available on Windows & macOS.
|
||||||
|
|
||||||
|
#### [Windows Portable Package](#installing)
|
||||||
|
- Get the latest commits and completely portable.
|
||||||
|
- Available on Windows.
|
||||||
|
|
||||||
|
#### [Manual Install](#manual-install-windows-linux)
|
||||||
|
Supports all operating systems and GPU types (NVIDIA, AMD, Intel, Apple Silicon, Ascend).
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
See what ComfyUI can do with the [example workflows](https://comfyanonymous.github.io/ComfyUI_examples/).
|
||||||
|
|
||||||
### [Installing ComfyUI](#installing)
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
|
- Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything.
|
||||||
@ -121,7 +135,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git
|
|||||||
|
|
||||||
# Installing
|
# Installing
|
||||||
|
|
||||||
## Windows
|
## Windows Portable
|
||||||
|
|
||||||
There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases).
|
There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases).
|
||||||
|
|
||||||
@ -141,6 +155,15 @@ See the [Config file](extra_model_paths.yaml.example) to set the search paths fo
|
|||||||
|
|
||||||
To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb)
|
To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb)
|
||||||
|
|
||||||
|
|
||||||
|
## [comfy-cli](https://docs.comfy.org/comfy-cli/getting-started)
|
||||||
|
|
||||||
|
You can install and start ComfyUI using comfy-cli:
|
||||||
|
```bash
|
||||||
|
pip install comfy-cli
|
||||||
|
comfy install
|
||||||
|
```
|
||||||
|
|
||||||
## Manual Install (Windows, Linux)
|
## Manual Install (Windows, Linux)
|
||||||
|
|
||||||
python 3.13 is supported but using 3.12 is recommended because some custom nodes and their dependencies might not support it yet.
|
python 3.13 is supported but using 3.12 is recommended because some custom nodes and their dependencies might not support it yet.
|
||||||
@ -293,6 +316,8 @@ Use `--tls-keyfile key.pem --tls-certfile cert.pem` to enable TLS/SSL, the app w
|
|||||||
|
|
||||||
## Support and dev channel
|
## Support and dev channel
|
||||||
|
|
||||||
|
[Discord](https://comfy.org/discord): Try the #help or #feedback channels.
|
||||||
|
|
||||||
[Matrix space: #comfyui_space:matrix.org](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) (it's like discord but open source).
|
[Matrix space: #comfyui_space:matrix.org](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) (it's like discord but open source).
|
||||||
|
|
||||||
See also: [https://www.comfy.org/](https://www.comfy.org/)
|
See also: [https://www.comfy.org/](https://www.comfy.org/)
|
||||||
@ -309,7 +334,7 @@ For any bugs, issues, or feature requests related to the frontend, please use th
|
|||||||
|
|
||||||
The new frontend is now the default for ComfyUI. However, please note:
|
The new frontend is now the default for ComfyUI. However, please note:
|
||||||
|
|
||||||
1. The frontend in the main ComfyUI repository is updated weekly.
|
1. The frontend in the main ComfyUI repository is updated fortnightly.
|
||||||
2. Daily releases are available in the separate frontend repository.
|
2. Daily releases are available in the separate frontend repository.
|
||||||
|
|
||||||
To use the most up-to-date frontend version:
|
To use the most up-to-date frontend version:
|
||||||
@ -326,7 +351,7 @@ To use the most up-to-date frontend version:
|
|||||||
--front-end-version Comfy-Org/ComfyUI_frontend@1.2.2
|
--front-end-version Comfy-Org/ComfyUI_frontend@1.2.2
|
||||||
```
|
```
|
||||||
|
|
||||||
This approach allows you to easily switch between the stable weekly release and the cutting-edge daily updates, or even specific versions for testing purposes.
|
This approach allows you to easily switch between the stable fortnightly release and the cutting-edge daily updates, or even specific versions for testing purposes.
|
||||||
|
|
||||||
### Accessing the Legacy Frontend
|
### Accessing the Legacy Frontend
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from folder_paths import folder_names_and_paths
|
from folder_paths import folder_names_and_paths, get_directory_by_type
|
||||||
from api_server.services.terminal_service import TerminalService
|
from api_server.services.terminal_service import TerminalService
|
||||||
import app.logger
|
import app.logger
|
||||||
|
import os
|
||||||
|
|
||||||
class InternalRoutes:
|
class InternalRoutes:
|
||||||
'''
|
'''
|
||||||
@ -50,6 +51,20 @@ class InternalRoutes:
|
|||||||
response[key] = folder_names_and_paths[key][0]
|
response[key] = folder_names_and_paths[key][0]
|
||||||
return web.json_response(response)
|
return web.json_response(response)
|
||||||
|
|
||||||
|
@self.routes.get('/files/{directory_type}')
|
||||||
|
async def get_files(request: web.Request) -> web.Response:
|
||||||
|
directory_type = request.match_info['directory_type']
|
||||||
|
if directory_type not in ("output", "input", "temp"):
|
||||||
|
return web.json_response({"error": "Invalid directory type"}, status=400)
|
||||||
|
|
||||||
|
directory = get_directory_by_type(directory_type)
|
||||||
|
sorted_files = sorted(
|
||||||
|
(entry for entry in os.scandir(directory) if entry.is_file()),
|
||||||
|
key=lambda entry: -entry.stat().st_mtime
|
||||||
|
)
|
||||||
|
return web.json_response([entry.name for entry in sorted_files], status=200)
|
||||||
|
|
||||||
|
|
||||||
def get_app(self):
|
def get_app(self):
|
||||||
if self._app is None:
|
if self._app is None:
|
||||||
self._app = web.Application()
|
self._app = web.Application()
|
||||||
|
@ -66,13 +66,26 @@ class IO(StrEnum):
|
|||||||
b = frozenset(value.split(","))
|
b = frozenset(value.split(","))
|
||||||
return not (b.issubset(a) or a.issubset(b))
|
return not (b.issubset(a) or a.issubset(b))
|
||||||
|
|
||||||
|
class RemoteInputOptions(TypedDict):
|
||||||
|
route: str
|
||||||
|
"""The route to the remote source."""
|
||||||
|
refresh_button: bool
|
||||||
|
"""Specifies whether to show a refresh button in the UI below the widget."""
|
||||||
|
control_after_refresh: Literal["first", "last"]
|
||||||
|
"""Specifies the control after the refresh button is clicked. If "first", the first item will be automatically selected, and so on."""
|
||||||
|
timeout: int
|
||||||
|
"""The maximum amount of time to wait for a response from the remote source in milliseconds."""
|
||||||
|
max_retries: int
|
||||||
|
"""The maximum number of retries before aborting the request."""
|
||||||
|
refresh: int
|
||||||
|
"""The TTL of the remote input's value in milliseconds. Specifies the interval at which the remote input's value is refreshed."""
|
||||||
|
|
||||||
class InputTypeOptions(TypedDict):
|
class InputTypeOptions(TypedDict):
|
||||||
"""Provides type hinting for the return type of the INPUT_TYPES node function.
|
"""Provides type hinting for the return type of the INPUT_TYPES node function.
|
||||||
|
|
||||||
Due to IDE limitations with unions, for now all options are available for all types (e.g. `label_on` is hinted even when the type is not `IO.BOOLEAN`).
|
Due to IDE limitations with unions, for now all options are available for all types (e.g. `label_on` is hinted even when the type is not `IO.BOOLEAN`).
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_datatypes
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/datatypes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
default: bool | str | float | int | list | tuple
|
default: bool | str | float | int | list | tuple
|
||||||
@ -113,6 +126,14 @@ class InputTypeOptions(TypedDict):
|
|||||||
# defaultVal: str
|
# defaultVal: str
|
||||||
dynamicPrompts: bool
|
dynamicPrompts: bool
|
||||||
"""Causes the front-end to evaluate dynamic prompts (``STRING``)"""
|
"""Causes the front-end to evaluate dynamic prompts (``STRING``)"""
|
||||||
|
# class InputTypeCombo(InputTypeOptions):
|
||||||
|
image_upload: bool
|
||||||
|
"""Specifies whether the input should have an image upload button and image preview attached to it. Requires that the input's name is `image`."""
|
||||||
|
image_folder: Literal["input", "output", "temp"]
|
||||||
|
"""Specifies which folder to get preview images from if the input has the ``image_upload`` flag.
|
||||||
|
"""
|
||||||
|
remote: RemoteInputOptions
|
||||||
|
"""Specifies the configuration for a remote input."""
|
||||||
|
|
||||||
|
|
||||||
class HiddenInputTypeDict(TypedDict):
|
class HiddenInputTypeDict(TypedDict):
|
||||||
@ -133,7 +154,7 @@ class HiddenInputTypeDict(TypedDict):
|
|||||||
class InputTypeDict(TypedDict):
|
class InputTypeDict(TypedDict):
|
||||||
"""Provides type hinting for node INPUT_TYPES.
|
"""Provides type hinting for node INPUT_TYPES.
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_more_on_inputs
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/more_on_inputs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
required: dict[str, tuple[IO, InputTypeOptions]]
|
required: dict[str, tuple[IO, InputTypeOptions]]
|
||||||
@ -143,14 +164,14 @@ class InputTypeDict(TypedDict):
|
|||||||
hidden: HiddenInputTypeDict
|
hidden: HiddenInputTypeDict
|
||||||
"""Offers advanced functionality and server-client communication.
|
"""Offers advanced functionality and server-client communication.
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_more_on_inputs#hidden-inputs
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/more_on_inputs#hidden-inputs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
class ComfyNodeABC(ABC):
|
class ComfyNodeABC(ABC):
|
||||||
"""Abstract base class for Comfy nodes. Includes the names and expected types of attributes.
|
"""Abstract base class for Comfy nodes. Includes the names and expected types of attributes.
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DESCRIPTION: str
|
DESCRIPTION: str
|
||||||
@ -167,7 +188,7 @@ class ComfyNodeABC(ABC):
|
|||||||
CATEGORY: str
|
CATEGORY: str
|
||||||
"""The category of the node, as per the "Add Node" menu.
|
"""The category of the node, as per the "Add Node" menu.
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#category
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#category
|
||||||
"""
|
"""
|
||||||
EXPERIMENTAL: bool
|
EXPERIMENTAL: bool
|
||||||
"""Flags a node as experimental, informing users that it may change or not work as expected."""
|
"""Flags a node as experimental, informing users that it may change or not work as expected."""
|
||||||
@ -181,9 +202,9 @@ class ComfyNodeABC(ABC):
|
|||||||
|
|
||||||
* Must include the ``required`` key, which describes all inputs that must be connected for the node to execute.
|
* Must include the ``required`` key, which describes all inputs that must be connected for the node to execute.
|
||||||
* The ``optional`` key can be added to describe inputs which do not need to be connected.
|
* The ``optional`` key can be added to describe inputs which do not need to be connected.
|
||||||
* The ``hidden`` key offers some advanced functionality. More info at: https://docs.comfy.org/essentials/custom_node_more_on_inputs#hidden-inputs
|
* The ``hidden`` key offers some advanced functionality. More info at: https://docs.comfy.org/custom-nodes/backend/more_on_inputs#hidden-inputs
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#input-types
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#input-types
|
||||||
"""
|
"""
|
||||||
return {"required": {}}
|
return {"required": {}}
|
||||||
|
|
||||||
@ -198,7 +219,7 @@ class ComfyNodeABC(ABC):
|
|||||||
|
|
||||||
By default, a node is not considered an output. Set ``OUTPUT_NODE = True`` to specify that it is.
|
By default, a node is not considered an output. Set ``OUTPUT_NODE = True`` to specify that it is.
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#output-node
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#output-node
|
||||||
"""
|
"""
|
||||||
INPUT_IS_LIST: bool
|
INPUT_IS_LIST: bool
|
||||||
"""A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes.
|
"""A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes.
|
||||||
@ -209,7 +230,7 @@ class ComfyNodeABC(ABC):
|
|||||||
|
|
||||||
A node can also override the default input behaviour and receive the whole list in a single call. This is done by setting a class attribute `INPUT_IS_LIST` to ``True``.
|
A node can also override the default input behaviour and receive the whole list in a single call. This is done by setting a class attribute `INPUT_IS_LIST` to ``True``.
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_lists#list-processing
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lists#list-processing
|
||||||
"""
|
"""
|
||||||
OUTPUT_IS_LIST: tuple[bool]
|
OUTPUT_IS_LIST: tuple[bool]
|
||||||
"""A tuple indicating which node outputs are lists, but will be connected to nodes that expect individual items.
|
"""A tuple indicating which node outputs are lists, but will be connected to nodes that expect individual items.
|
||||||
@ -227,7 +248,7 @@ class ComfyNodeABC(ABC):
|
|||||||
the node should provide a class attribute `OUTPUT_IS_LIST`, which is a ``tuple[bool]``, of the same length as `RETURN_TYPES`,
|
the node should provide a class attribute `OUTPUT_IS_LIST`, which is a ``tuple[bool]``, of the same length as `RETURN_TYPES`,
|
||||||
specifying which outputs which should be so treated.
|
specifying which outputs which should be so treated.
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_lists#list-processing
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lists#list-processing
|
||||||
"""
|
"""
|
||||||
|
|
||||||
RETURN_TYPES: tuple[IO]
|
RETURN_TYPES: tuple[IO]
|
||||||
@ -237,19 +258,19 @@ class ComfyNodeABC(ABC):
|
|||||||
|
|
||||||
RETURN_TYPES = (IO.INT, "INT", "CUSTOM_TYPE")
|
RETURN_TYPES = (IO.INT, "INT", "CUSTOM_TYPE")
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#return-types
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#return-types
|
||||||
"""
|
"""
|
||||||
RETURN_NAMES: tuple[str]
|
RETURN_NAMES: tuple[str]
|
||||||
"""The output slot names for each item in `RETURN_TYPES`, e.g. ``RETURN_NAMES = ("count", "filter_string")``
|
"""The output slot names for each item in `RETURN_TYPES`, e.g. ``RETURN_NAMES = ("count", "filter_string")``
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#return-names
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#return-names
|
||||||
"""
|
"""
|
||||||
OUTPUT_TOOLTIPS: tuple[str]
|
OUTPUT_TOOLTIPS: tuple[str]
|
||||||
"""A tuple of strings to use as tooltips for node outputs, one for each item in `RETURN_TYPES`."""
|
"""A tuple of strings to use as tooltips for node outputs, one for each item in `RETURN_TYPES`."""
|
||||||
FUNCTION: str
|
FUNCTION: str
|
||||||
"""The name of the function to execute as a literal string, e.g. `FUNCTION = "execute"`
|
"""The name of the function to execute as a literal string, e.g. `FUNCTION = "execute"`
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#function
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#function
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@ -267,7 +288,7 @@ class CheckLazyMixin:
|
|||||||
Params should match the nodes execution ``FUNCTION`` (self, and all inputs by name).
|
Params should match the nodes execution ``FUNCTION`` (self, and all inputs by name).
|
||||||
Will be executed repeatedly until it returns an empty list, or all requested items were already evaluated (and sent as params).
|
Will be executed repeatedly until it returns an empty list, or all requested items were already evaluated (and sent as params).
|
||||||
|
|
||||||
Comfy Docs: https://docs.comfy.org/essentials/custom_node_lazy_evaluation#defining-check-lazy-status
|
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lazy_evaluation#defining-check-lazy-status
|
||||||
"""
|
"""
|
||||||
|
|
||||||
need = [name for name in kwargs if kwargs[name] is None]
|
need = [name for name in kwargs if kwargs[name] is None]
|
||||||
|
@ -310,7 +310,7 @@ class HunyuanVideo(nn.Module):
|
|||||||
shape[i] = shape[i] // self.patch_size[i]
|
shape[i] = shape[i] // self.patch_size[i]
|
||||||
img = img.reshape([img.shape[0]] + shape + [self.out_channels] + self.patch_size)
|
img = img.reshape([img.shape[0]] + shape + [self.out_channels] + self.patch_size)
|
||||||
img = img.permute(0, 4, 1, 5, 2, 6, 3, 7)
|
img = img.permute(0, 4, 1, 5, 2, 6, 3, 7)
|
||||||
img = img.reshape(initial_shape)
|
img = img.reshape(initial_shape[0], self.out_channels, initial_shape[2], initial_shape[3], initial_shape[4])
|
||||||
return img
|
return img
|
||||||
|
|
||||||
def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, control=None, transformer_options={}, **kwargs):
|
def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, control=None, transformer_options={}, **kwargs):
|
||||||
|
@ -30,38 +30,24 @@ ops = comfy.ops.disable_weight_init
|
|||||||
|
|
||||||
FORCE_UPCAST_ATTENTION_DTYPE = model_management.force_upcast_attention_dtype()
|
FORCE_UPCAST_ATTENTION_DTYPE = model_management.force_upcast_attention_dtype()
|
||||||
|
|
||||||
def get_attn_precision(attn_precision):
|
def get_attn_precision(attn_precision, current_dtype):
|
||||||
if args.dont_upcast_attention:
|
if args.dont_upcast_attention:
|
||||||
return None
|
return None
|
||||||
if FORCE_UPCAST_ATTENTION_DTYPE is not None:
|
|
||||||
return FORCE_UPCAST_ATTENTION_DTYPE
|
if FORCE_UPCAST_ATTENTION_DTYPE is not None and current_dtype in FORCE_UPCAST_ATTENTION_DTYPE:
|
||||||
|
return FORCE_UPCAST_ATTENTION_DTYPE[current_dtype]
|
||||||
return attn_precision
|
return attn_precision
|
||||||
|
|
||||||
def exists(val):
|
def exists(val):
|
||||||
return val is not None
|
return val is not None
|
||||||
|
|
||||||
|
|
||||||
def uniq(arr):
|
|
||||||
return{el: True for el in arr}.keys()
|
|
||||||
|
|
||||||
|
|
||||||
def default(val, d):
|
def default(val, d):
|
||||||
if exists(val):
|
if exists(val):
|
||||||
return val
|
return val
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
def max_neg_value(t):
|
|
||||||
return -torch.finfo(t.dtype).max
|
|
||||||
|
|
||||||
|
|
||||||
def init_(tensor):
|
|
||||||
dim = tensor.shape[-1]
|
|
||||||
std = 1 / math.sqrt(dim)
|
|
||||||
tensor.uniform_(-std, std)
|
|
||||||
return tensor
|
|
||||||
|
|
||||||
|
|
||||||
# feedforward
|
# feedforward
|
||||||
class GEGLU(nn.Module):
|
class GEGLU(nn.Module):
|
||||||
def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=ops):
|
def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=ops):
|
||||||
@ -96,7 +82,7 @@ def Normalize(in_channels, dtype=None, device=None):
|
|||||||
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
|
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
|
||||||
|
|
||||||
def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
|
def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
|
||||||
attn_precision = get_attn_precision(attn_precision)
|
attn_precision = get_attn_precision(attn_precision, q.dtype)
|
||||||
|
|
||||||
if skip_reshape:
|
if skip_reshape:
|
||||||
b, _, _, dim_head = q.shape
|
b, _, _, dim_head = q.shape
|
||||||
@ -165,7 +151,7 @@ def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
|
|||||||
|
|
||||||
|
|
||||||
def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
|
def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
|
||||||
attn_precision = get_attn_precision(attn_precision)
|
attn_precision = get_attn_precision(attn_precision, query.dtype)
|
||||||
|
|
||||||
if skip_reshape:
|
if skip_reshape:
|
||||||
b, _, _, dim_head = query.shape
|
b, _, _, dim_head = query.shape
|
||||||
@ -235,7 +221,7 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None,
|
|||||||
return hidden_states
|
return hidden_states
|
||||||
|
|
||||||
def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
|
def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
|
||||||
attn_precision = get_attn_precision(attn_precision)
|
attn_precision = get_attn_precision(attn_precision, q.dtype)
|
||||||
|
|
||||||
if skip_reshape:
|
if skip_reshape:
|
||||||
b, _, _, dim_head = q.shape
|
b, _, _, dim_head = q.shape
|
||||||
|
@ -871,6 +871,15 @@ class HunyuanVideo(BaseModel):
|
|||||||
if cross_attn is not None:
|
if cross_attn is not None:
|
||||||
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
||||||
|
|
||||||
|
image = kwargs.get("concat_latent_image", None)
|
||||||
|
noise = kwargs.get("noise", None)
|
||||||
|
|
||||||
|
if image is not None:
|
||||||
|
padding_shape = (noise.shape[0], 16, noise.shape[2] - 1, noise.shape[3], noise.shape[4])
|
||||||
|
latent_padding = torch.zeros(padding_shape, device=noise.device, dtype=noise.dtype)
|
||||||
|
image_latents = torch.cat([image.to(noise), latent_padding], dim=2)
|
||||||
|
out['c_concat'] = comfy.conds.CONDNoiseShape(self.process_latent_in(image_latents))
|
||||||
|
|
||||||
guidance = kwargs.get("guidance", 6.0)
|
guidance = kwargs.get("guidance", 6.0)
|
||||||
if guidance is not None:
|
if guidance is not None:
|
||||||
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance]))
|
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance]))
|
||||||
|
@ -136,7 +136,7 @@ def detect_unet_config(state_dict, key_prefix):
|
|||||||
if '{}txt_in.individual_token_refiner.blocks.0.norm1.weight'.format(key_prefix) in state_dict_keys: #Hunyuan Video
|
if '{}txt_in.individual_token_refiner.blocks.0.norm1.weight'.format(key_prefix) in state_dict_keys: #Hunyuan Video
|
||||||
dit_config = {}
|
dit_config = {}
|
||||||
dit_config["image_model"] = "hunyuan_video"
|
dit_config["image_model"] = "hunyuan_video"
|
||||||
dit_config["in_channels"] = 16
|
dit_config["in_channels"] = state_dict['{}img_in.proj.weight'.format(key_prefix)].shape[1] #SkyReels img2video has 32 input channels
|
||||||
dit_config["patch_size"] = [1, 2, 2]
|
dit_config["patch_size"] = [1, 2, 2]
|
||||||
dit_config["out_channels"] = 16
|
dit_config["out_channels"] = 16
|
||||||
dit_config["vec_in_dim"] = 768
|
dit_config["vec_in_dim"] = 768
|
||||||
|
@ -245,7 +245,7 @@ def is_amd():
|
|||||||
|
|
||||||
MIN_WEIGHT_MEMORY_RATIO = 0.4
|
MIN_WEIGHT_MEMORY_RATIO = 0.4
|
||||||
if is_nvidia():
|
if is_nvidia():
|
||||||
MIN_WEIGHT_MEMORY_RATIO = 0.1
|
MIN_WEIGHT_MEMORY_RATIO = 0.0
|
||||||
|
|
||||||
ENABLE_PYTORCH_ATTENTION = False
|
ENABLE_PYTORCH_ATTENTION = False
|
||||||
if args.use_pytorch_cross_attention:
|
if args.use_pytorch_cross_attention:
|
||||||
@ -281,9 +281,12 @@ if ENABLE_PYTORCH_ATTENTION:
|
|||||||
torch.backends.cuda.enable_flash_sdp(True)
|
torch.backends.cuda.enable_flash_sdp(True)
|
||||||
torch.backends.cuda.enable_mem_efficient_sdp(True)
|
torch.backends.cuda.enable_mem_efficient_sdp(True)
|
||||||
|
|
||||||
|
|
||||||
|
PRIORITIZE_FP16 = False # TODO: remove and replace with something that shows exactly which dtype is faster than the other
|
||||||
try:
|
try:
|
||||||
if is_nvidia() and args.fast:
|
if is_nvidia() and args.fast:
|
||||||
torch.backends.cuda.matmul.allow_fp16_accumulation = True
|
torch.backends.cuda.matmul.allow_fp16_accumulation = True
|
||||||
|
PRIORITIZE_FP16 = True # TODO: limit to cards where it actually boosts performance
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -710,6 +713,10 @@ def unet_dtype(device=None, model_params=0, supported_dtypes=[torch.float16, tor
|
|||||||
if model_params * 2 > free_model_memory:
|
if model_params * 2 > free_model_memory:
|
||||||
return fp8_dtype
|
return fp8_dtype
|
||||||
|
|
||||||
|
if PRIORITIZE_FP16:
|
||||||
|
if torch.float16 in supported_dtypes and should_use_fp16(device=device, model_params=model_params):
|
||||||
|
return torch.float16
|
||||||
|
|
||||||
for dt in supported_dtypes:
|
for dt in supported_dtypes:
|
||||||
if dt == torch.float16 and should_use_fp16(device=device, model_params=model_params):
|
if dt == torch.float16 and should_use_fp16(device=device, model_params=model_params):
|
||||||
if torch.float16 in supported_dtypes:
|
if torch.float16 in supported_dtypes:
|
||||||
@ -972,11 +979,11 @@ def force_upcast_attention_dtype():
|
|||||||
upcast = args.force_upcast_attention
|
upcast = args.force_upcast_attention
|
||||||
|
|
||||||
macos_version = mac_version()
|
macos_version = mac_version()
|
||||||
if macos_version is not None and ((14, 5) <= macos_version <= (15, 2)): # black image bug on recent versions of macOS
|
if macos_version is not None and ((14, 5) <= macos_version < (16,)): # black image bug on recent versions of macOS
|
||||||
upcast = True
|
upcast = True
|
||||||
|
|
||||||
if upcast:
|
if upcast:
|
||||||
return torch.float32
|
return {torch.float16: torch.float32}
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -1050,8 +1057,6 @@ def is_directml_enabled():
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
|
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
|
||||||
global directml_enabled
|
|
||||||
|
|
||||||
if device is not None:
|
if device is not None:
|
||||||
if is_device_cpu(device):
|
if is_device_cpu(device):
|
||||||
return False
|
return False
|
||||||
@ -1062,8 +1067,8 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma
|
|||||||
if FORCE_FP32:
|
if FORCE_FP32:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if directml_enabled:
|
if is_directml_enabled():
|
||||||
return False
|
return True
|
||||||
|
|
||||||
if (device is not None and is_device_mps(device)) or mps_mode():
|
if (device is not None and is_device_mps(device)) or mps_mode():
|
||||||
return True
|
return True
|
||||||
@ -1150,7 +1155,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
|
|||||||
|
|
||||||
bf16_works = torch.cuda.is_bf16_supported()
|
bf16_works = torch.cuda.is_bf16_supported()
|
||||||
|
|
||||||
if bf16_works or manual_cast:
|
if bf16_works and manual_cast:
|
||||||
free_model_memory = maximum_vram_for_weights(device)
|
free_model_memory = maximum_vram_for_weights(device)
|
||||||
if (not prioritize_performance) or model_params * 4 > free_model_memory:
|
if (not prioritize_performance) or model_params * 4 > free_model_memory:
|
||||||
return True
|
return True
|
||||||
|
75
comfy_extras/nodes_video.py
Normal file
75
comfy_extras/nodes_video.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
import os
|
||||||
|
import av
|
||||||
|
import torch
|
||||||
|
import folder_paths
|
||||||
|
import json
|
||||||
|
from fractions import Fraction
|
||||||
|
|
||||||
|
|
||||||
|
class SaveWEBM:
|
||||||
|
def __init__(self):
|
||||||
|
self.output_dir = folder_paths.get_output_directory()
|
||||||
|
self.type = "output"
|
||||||
|
self.prefix_append = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required":
|
||||||
|
{"images": ("IMAGE", ),
|
||||||
|
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
|
||||||
|
"codec": (["vp9", "av1"],),
|
||||||
|
"fps": ("FLOAT", {"default": 24.0, "min": 0.01, "max": 1000.0, "step": 0.01}),
|
||||||
|
"crf": ("FLOAT", {"default": 32.0, "min": 0, "max": 63.0, "step": 1, "tooltip": "Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."}),
|
||||||
|
},
|
||||||
|
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ()
|
||||||
|
FUNCTION = "save_images"
|
||||||
|
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
CATEGORY = "image/video"
|
||||||
|
|
||||||
|
EXPERIMENTAL = True
|
||||||
|
|
||||||
|
def save_images(self, images, codec, fps, filename_prefix, crf, prompt=None, extra_pnginfo=None):
|
||||||
|
filename_prefix += self.prefix_append
|
||||||
|
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
|
||||||
|
|
||||||
|
file = f"{filename}_{counter:05}_.webm"
|
||||||
|
container = av.open(os.path.join(full_output_folder, file), mode="w")
|
||||||
|
|
||||||
|
if prompt is not None:
|
||||||
|
container.metadata["prompt"] = json.dumps(prompt)
|
||||||
|
|
||||||
|
if extra_pnginfo is not None:
|
||||||
|
for x in extra_pnginfo:
|
||||||
|
container.metadata[x] = json.dumps(extra_pnginfo[x])
|
||||||
|
|
||||||
|
codec_map = {"vp9": "libvpx-vp9", "av1": "libaom-av1"}
|
||||||
|
stream = container.add_stream(codec_map[codec], rate=Fraction(round(fps * 1000), 1000))
|
||||||
|
stream.width = images.shape[-2]
|
||||||
|
stream.height = images.shape[-3]
|
||||||
|
stream.pix_fmt = "yuv420p"
|
||||||
|
stream.bit_rate = 0
|
||||||
|
stream.options = {'crf': str(crf)}
|
||||||
|
|
||||||
|
for frame in images:
|
||||||
|
frame = av.VideoFrame.from_ndarray(torch.clamp(frame[..., :3] * 255, min=0, max=255).to(device=torch.device("cpu"), dtype=torch.uint8).numpy(), format="rgb24")
|
||||||
|
for packet in stream.encode(frame):
|
||||||
|
container.mux(packet)
|
||||||
|
container.close()
|
||||||
|
|
||||||
|
results = [{
|
||||||
|
"filename": file,
|
||||||
|
"subfolder": subfolder,
|
||||||
|
"type": self.type
|
||||||
|
}]
|
||||||
|
|
||||||
|
return {"ui": {"images": results, "animated": (True,)}} # TODO: frontend side
|
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"SaveWEBM": SaveWEBM,
|
||||||
|
}
|
@ -1,3 +1,3 @@
|
|||||||
# This file is automatically generated by the build process when version is
|
# This file is automatically generated by the build process when version is
|
||||||
# updated in pyproject.toml.
|
# updated in pyproject.toml.
|
||||||
__version__ = "0.3.14"
|
__version__ = "0.3.15"
|
||||||
|
33
nodes.py
33
nodes.py
@ -1763,6 +1763,36 @@ class LoadImageMask:
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class LoadImageOutput(LoadImage):
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"image": ("COMBO", {
|
||||||
|
"image_upload": True,
|
||||||
|
"image_folder": "output",
|
||||||
|
"remote": {
|
||||||
|
"route": "/internal/files/output",
|
||||||
|
"refresh_button": True,
|
||||||
|
"control_after_refresh": "first",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DESCRIPTION = "Load an image from the output folder. When the refresh button is clicked, the node will update the image list and automatically select the first image, allowing for easy iteration."
|
||||||
|
EXPERIMENTAL = True
|
||||||
|
FUNCTION = "load_image_output"
|
||||||
|
|
||||||
|
def load_image_output(self, image):
|
||||||
|
return self.load_image(f"{image} [output]")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def VALIDATE_INPUTS(s, image):
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class ImageScale:
|
class ImageScale:
|
||||||
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
||||||
crop_methods = ["disabled", "center"]
|
crop_methods = ["disabled", "center"]
|
||||||
@ -1949,6 +1979,7 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"PreviewImage": PreviewImage,
|
"PreviewImage": PreviewImage,
|
||||||
"LoadImage": LoadImage,
|
"LoadImage": LoadImage,
|
||||||
"LoadImageMask": LoadImageMask,
|
"LoadImageMask": LoadImageMask,
|
||||||
|
"LoadImageOutput": LoadImageOutput,
|
||||||
"ImageScale": ImageScale,
|
"ImageScale": ImageScale,
|
||||||
"ImageScaleBy": ImageScaleBy,
|
"ImageScaleBy": ImageScaleBy,
|
||||||
"ImageInvert": ImageInvert,
|
"ImageInvert": ImageInvert,
|
||||||
@ -2049,6 +2080,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
|
|||||||
"PreviewImage": "Preview Image",
|
"PreviewImage": "Preview Image",
|
||||||
"LoadImage": "Load Image",
|
"LoadImage": "Load Image",
|
||||||
"LoadImageMask": "Load Image (as Mask)",
|
"LoadImageMask": "Load Image (as Mask)",
|
||||||
|
"LoadImageOutput": "Load Image (from Outputs)",
|
||||||
"ImageScale": "Upscale Image",
|
"ImageScale": "Upscale Image",
|
||||||
"ImageScaleBy": "Upscale Image By",
|
"ImageScaleBy": "Upscale Image By",
|
||||||
"ImageUpscaleWithModel": "Upscale Image (using Model)",
|
"ImageUpscaleWithModel": "Upscale Image (using Model)",
|
||||||
@ -2234,6 +2266,7 @@ def init_builtin_extra_nodes():
|
|||||||
"nodes_multigpu.py",
|
"nodes_multigpu.py",
|
||||||
"nodes_load_3d.py",
|
"nodes_load_3d.py",
|
||||||
"nodes_cosmos.py",
|
"nodes_cosmos.py",
|
||||||
|
"nodes_video.py",
|
||||||
"nodes_lumina2.py",
|
"nodes_lumina2.py",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "ComfyUI"
|
name = "ComfyUI"
|
||||||
version = "0.3.14"
|
version = "0.3.15"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
license = { file = "LICENSE" }
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
@ -8,7 +8,8 @@ transformers>=4.28.1
|
|||||||
tokenizers>=0.13.3
|
tokenizers>=0.13.3
|
||||||
sentencepiece
|
sentencepiece
|
||||||
safetensors>=0.4.2
|
safetensors>=0.4.2
|
||||||
aiohttp
|
aiohttp>=3.11.8
|
||||||
|
yarl>=1.18.0
|
||||||
pyyaml
|
pyyaml
|
||||||
Pillow
|
Pillow
|
||||||
scipy
|
scipy
|
||||||
@ -19,3 +20,4 @@ psutil
|
|||||||
kornia>=0.7.1
|
kornia>=0.7.1
|
||||||
spandrel
|
spandrel
|
||||||
soundfile
|
soundfile
|
||||||
|
av
|
||||||
|
@ -145,7 +145,7 @@ def test_load_extra_model_paths_expands_appdata(
|
|||||||
else:
|
else:
|
||||||
expected_base_path = '/Users/TestUser/AppData/Roaming/ComfyUI'
|
expected_base_path = '/Users/TestUser/AppData/Roaming/ComfyUI'
|
||||||
expected_calls = [
|
expected_calls = [
|
||||||
('checkpoints', os.path.join(expected_base_path, 'models/checkpoints'), False),
|
('checkpoints', os.path.normpath(os.path.join(expected_base_path, 'models/checkpoints')), False),
|
||||||
]
|
]
|
||||||
|
|
||||||
assert mock_add_model_folder_path.call_count == len(expected_calls)
|
assert mock_add_model_folder_path.call_count == len(expected_calls)
|
||||||
@ -197,8 +197,8 @@ def test_load_extra_path_config_relative_base_path(
|
|||||||
|
|
||||||
load_extra_path_config(dummy_yaml_name)
|
load_extra_path_config(dummy_yaml_name)
|
||||||
|
|
||||||
expected_checkpoints = os.path.abspath(os.path.join(str(tmp_path), sub_folder, "checkpoints"))
|
expected_checkpoints = os.path.abspath(os.path.join(str(tmp_path), "my_rel_base", "checkpoints"))
|
||||||
expected_some_value = os.path.abspath(os.path.join(str(tmp_path), sub_folder, "some_value"))
|
expected_some_value = os.path.abspath(os.path.join(str(tmp_path), "my_rel_base", "some_value"))
|
||||||
|
|
||||||
actual_paths = folder_paths.folder_names_and_paths["checkpoints"][0]
|
actual_paths = folder_paths.folder_names_and_paths["checkpoints"][0]
|
||||||
assert len(actual_paths) == 1, "Should have one path added for 'checkpoints'."
|
assert len(actual_paths) == 1, "Should have one path added for 'checkpoints'."
|
||||||
|
@ -29,5 +29,6 @@ def load_extra_path_config(yaml_path):
|
|||||||
full_path = os.path.join(base_path, full_path)
|
full_path = os.path.join(base_path, full_path)
|
||||||
elif not os.path.isabs(full_path):
|
elif not os.path.isabs(full_path):
|
||||||
full_path = os.path.abspath(os.path.join(yaml_dir, y))
|
full_path = os.path.abspath(os.path.join(yaml_dir, y))
|
||||||
logging.info("Adding extra search path {} {}".format(x, full_path))
|
normalized_path = os.path.normpath(full_path)
|
||||||
folder_paths.add_model_folder_path(x, full_path, is_default)
|
logging.info("Adding extra search path {} {}".format(x, normalized_path))
|
||||||
|
folder_paths.add_model_folder_path(x, normalized_path, is_default)
|
||||||
|
Loading…
Reference in New Issue
Block a user