mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-22 07:27:18 +08:00

* Support for async execution functions This commit adds support for node execution functions defined as async. When a node's execution function is defined as async, we can continue executing other nodes while it is processing. Standard uses of `await` should "just work", but people will still have to be careful if they spawn actual threads. Because torch doesn't really have async/await versions of functions, this won't particularly help with most locally-executing nodes, but it does work for e.g. web requests to other machines. In addition to the execute function, the `VALIDATE_INPUTS` and `check_lazy_status` functions can also be defined as async, though we'll only resolve one node at a time right now for those. * Add the execution model tests to CI * Add a missing file It looks like this got caught by .gitignore? There's probably a better place to put it, but I'm not sure what that is. * Add the websocket library for automated tests * Add additional tests for async error cases Also fixes one bug that was found when an async function throws an error after being scheduled on a task. * Add a feature flags message to reduce bandwidth We now only send 1 preview message of the latest type the client can support. We'll add a console warning when the client fails to send a feature flags message at some point in the future. * Add async tests to CI * Don't actually add new tests in this PR Will do it in a separate PR * Resolve unit test in GPU-less runner * Just remove the tests that GHA can't handle * Change line endings to UNIX-style * Avoid loading model_management.py so early Because model_management.py has a top-level `logging.info`, we have to be careful not to import that file before we call `setup_logging`. If we do, we end up having the default logging handler registered in addition to our custom one.
70 lines
1.8 KiB
Python
70 lines
1.8 KiB
Python
"""
|
|
Feature flags module for ComfyUI WebSocket protocol negotiation.
|
|
|
|
This module handles capability negotiation between frontend and backend,
|
|
allowing graceful protocol evolution while maintaining backward compatibility.
|
|
"""
|
|
|
|
from typing import Any, Dict
|
|
|
|
from comfy.cli_args import args
|
|
|
|
# Default server capabilities
|
|
SERVER_FEATURE_FLAGS: Dict[str, Any] = {
|
|
"supports_preview_metadata": True,
|
|
"max_upload_size": args.max_upload_size * 1024 * 1024, # Convert MB to bytes
|
|
}
|
|
|
|
|
|
def get_connection_feature(
|
|
sockets_metadata: Dict[str, Dict[str, Any]],
|
|
sid: str,
|
|
feature_name: str,
|
|
default: Any = False
|
|
) -> Any:
|
|
"""
|
|
Get a feature flag value for a specific connection.
|
|
|
|
Args:
|
|
sockets_metadata: Dictionary of socket metadata
|
|
sid: Session ID of the connection
|
|
feature_name: Name of the feature to check
|
|
default: Default value if feature not found
|
|
|
|
Returns:
|
|
Feature value or default if not found
|
|
"""
|
|
if sid not in sockets_metadata:
|
|
return default
|
|
|
|
return sockets_metadata[sid].get("feature_flags", {}).get(feature_name, default)
|
|
|
|
|
|
def supports_feature(
|
|
sockets_metadata: Dict[str, Dict[str, Any]],
|
|
sid: str,
|
|
feature_name: str
|
|
) -> bool:
|
|
"""
|
|
Check if a connection supports a specific feature.
|
|
|
|
Args:
|
|
sockets_metadata: Dictionary of socket metadata
|
|
sid: Session ID of the connection
|
|
feature_name: Name of the feature to check
|
|
|
|
Returns:
|
|
Boolean indicating if feature is supported
|
|
"""
|
|
return get_connection_feature(sockets_metadata, sid, feature_name, False) is True
|
|
|
|
|
|
def get_server_features() -> Dict[str, Any]:
|
|
"""
|
|
Get the server's feature flags.
|
|
|
|
Returns:
|
|
Dictionary of server feature flags
|
|
"""
|
|
return SERVER_FEATURE_FLAGS.copy()
|