mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-10 09:27:17 +08:00
Show a better error when the workflow OOMs. (#8574)
This commit is contained in:
parent
d7430c529a
commit
e9e9a031a8
11
execution.py
11
execution.py
@ -429,17 +429,20 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp
|
|||||||
|
|
||||||
logging.error(f"!!! Exception during processing !!! {ex}")
|
logging.error(f"!!! Exception during processing !!! {ex}")
|
||||||
logging.error(traceback.format_exc())
|
logging.error(traceback.format_exc())
|
||||||
|
tips = ""
|
||||||
|
|
||||||
|
if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
|
||||||
|
tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number."
|
||||||
|
logging.error("Got an OOM, unloading all loaded models.")
|
||||||
|
comfy.model_management.unload_all_models()
|
||||||
|
|
||||||
error_details = {
|
error_details = {
|
||||||
"node_id": real_node_id,
|
"node_id": real_node_id,
|
||||||
"exception_message": str(ex),
|
"exception_message": "{}\n{}".format(ex, tips),
|
||||||
"exception_type": exception_type,
|
"exception_type": exception_type,
|
||||||
"traceback": traceback.format_tb(tb),
|
"traceback": traceback.format_tb(tb),
|
||||||
"current_inputs": input_data_formatted
|
"current_inputs": input_data_formatted
|
||||||
}
|
}
|
||||||
if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
|
|
||||||
logging.error("Got an OOM, unloading all loaded models.")
|
|
||||||
comfy.model_management.unload_all_models()
|
|
||||||
|
|
||||||
return (ExecutionResult.FAILURE, error_details, ex)
|
return (ExecutionResult.FAILURE, error_details, ex)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user