From 33e71e0e7925442bf1c261c8d921f2c541606529 Mon Sep 17 00:00:00 2001 From: ethan Date: Fri, 24 Jan 2025 01:37:44 -0800 Subject: [PATCH] update openvino backend --- comfy/cli_args.py | 1 + comfy/model_management.py | 2 +- comfy/model_patcher.py | 9 +++++++++ comfy/sampler_helpers.py | 7 ++++++- 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 812798bf..36926581 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -83,6 +83,7 @@ fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text parser.add_argument("--force-channels-last", action="store_true", help="Force channels last format when inferencing the models.") parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.") +parser.add_argument("--openvino", type=str, default="GPU", help="Run OpenVINO inference engine on the specified device.") parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.") parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.") diff --git a/comfy/model_management.py b/comfy/model_management.py index f6dfc18b..29aa60d6 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -93,7 +93,7 @@ try: except: npu_available = False -if args.cpu: +if args.cpu or args.openvino: cpu_state = CPUState.CPU def is_intel_xpu(): diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 0501f7b3..813e7be1 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -469,6 +469,15 @@ class ModelPatcher: current_patches = self.patches.get(key, []) current_patches.append((strength_patch, patches[k], strength_model, offset, function)) self.patches[key] = current_patches + else: + new_key=key.replace("diffusion_model","diffusion_model._orig_mod") + if new_key in model_sd: + p.add(k) + if key in self.patches: + self.patches.pop(key) + current_patches = self.patches.get(new_key, []) + current_patches.append((strength_patch, patches[k], strength_model, offset, function)) + self.patches[new_key] = current_patches self.patches_uuid = uuid.uuid4() return list(p) diff --git a/comfy/sampler_helpers.py b/comfy/sampler_helpers.py index 92ec7ca7..dc7e07aa 100644 --- a/comfy/sampler_helpers.py +++ b/comfy/sampler_helpers.py @@ -1,4 +1,5 @@ from __future__ import annotations +from comfy.cli_args import args import uuid import comfy.model_management import comfy.conds @@ -114,7 +115,11 @@ def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None minimum_memory_required = model.memory_required([noise_shape[0]] + list(noise_shape[1:])) + inference_memory comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required, minimum_memory_required=minimum_memory_required) real_model = model.model - + if args.openvino and real_model.diffusion_model.__class__.__name__=="UNetModel": + import openvino.torch + import torch + print("Unet is being compiled using OpenVINO") + real_model.diffusion_model = torch.compile(real_model.diffusion_model, backend="openvino", options = {"device" : args.openvino, "model_caching" : False, "cache_dir": "./model_cache"}) return real_model, conds, models def cleanup_models(conds, models):