mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-19 19:03:51 +00:00
update openvino backend
This commit is contained in:
parent
ce557cfb88
commit
33e71e0e79
@ -83,6 +83,7 @@ fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text
|
|||||||
parser.add_argument("--force-channels-last", action="store_true", help="Force channels last format when inferencing the models.")
|
parser.add_argument("--force-channels-last", action="store_true", help="Force channels last format when inferencing the models.")
|
||||||
|
|
||||||
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
|
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
|
||||||
|
parser.add_argument("--openvino", type=str, default="GPU", help="Run OpenVINO inference engine on the specified device.")
|
||||||
|
|
||||||
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
|
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
|
||||||
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
|
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
|
||||||
|
@ -93,7 +93,7 @@ try:
|
|||||||
except:
|
except:
|
||||||
npu_available = False
|
npu_available = False
|
||||||
|
|
||||||
if args.cpu:
|
if args.cpu or args.openvino:
|
||||||
cpu_state = CPUState.CPU
|
cpu_state = CPUState.CPU
|
||||||
|
|
||||||
def is_intel_xpu():
|
def is_intel_xpu():
|
||||||
|
@ -469,6 +469,15 @@ class ModelPatcher:
|
|||||||
current_patches = self.patches.get(key, [])
|
current_patches = self.patches.get(key, [])
|
||||||
current_patches.append((strength_patch, patches[k], strength_model, offset, function))
|
current_patches.append((strength_patch, patches[k], strength_model, offset, function))
|
||||||
self.patches[key] = current_patches
|
self.patches[key] = current_patches
|
||||||
|
else:
|
||||||
|
new_key=key.replace("diffusion_model","diffusion_model._orig_mod")
|
||||||
|
if new_key in model_sd:
|
||||||
|
p.add(k)
|
||||||
|
if key in self.patches:
|
||||||
|
self.patches.pop(key)
|
||||||
|
current_patches = self.patches.get(new_key, [])
|
||||||
|
current_patches.append((strength_patch, patches[k], strength_model, offset, function))
|
||||||
|
self.patches[new_key] = current_patches
|
||||||
|
|
||||||
self.patches_uuid = uuid.uuid4()
|
self.patches_uuid = uuid.uuid4()
|
||||||
return list(p)
|
return list(p)
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
from comfy.cli_args import args
|
||||||
import uuid
|
import uuid
|
||||||
import comfy.model_management
|
import comfy.model_management
|
||||||
import comfy.conds
|
import comfy.conds
|
||||||
@ -114,7 +115,11 @@ def prepare_sampling(model: ModelPatcher, noise_shape, conds, model_options=None
|
|||||||
minimum_memory_required = model.memory_required([noise_shape[0]] + list(noise_shape[1:])) + inference_memory
|
minimum_memory_required = model.memory_required([noise_shape[0]] + list(noise_shape[1:])) + inference_memory
|
||||||
comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required, minimum_memory_required=minimum_memory_required)
|
comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required, minimum_memory_required=minimum_memory_required)
|
||||||
real_model = model.model
|
real_model = model.model
|
||||||
|
if args.openvino and real_model.diffusion_model.__class__.__name__=="UNetModel":
|
||||||
|
import openvino.torch
|
||||||
|
import torch
|
||||||
|
print("Unet is being compiled using OpenVINO")
|
||||||
|
real_model.diffusion_model = torch.compile(real_model.diffusion_model, backend="openvino", options = {"device" : args.openvino, "model_caching" : False, "cache_dir": "./model_cache"})
|
||||||
return real_model, conds, models
|
return real_model, conds, models
|
||||||
|
|
||||||
def cleanup_models(conds, models):
|
def cleanup_models(conds, models):
|
||||||
|
Loading…
Reference in New Issue
Block a user