From bc9eb9dfdb571f0ca0248691f1f5850e32ec3221 Mon Sep 17 00:00:00 2001 From: ethan Date: Wed, 5 Mar 2025 18:28:47 -0800 Subject: [PATCH] fix the memory leakage issue --- comfy/model_patcher.py | 9 ++++++++- comfy_extras/nodes_torch_compile.py | 11 +++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 259d66d9..9a8f7fe7 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -499,12 +499,19 @@ class ModelPatcher: if len(k) > 2: function = k[2] org_key=key.replace("diffusion_model", "diffusion_model._orig_mod") - if key in model_sd or org_key in model_sd: + if key in model_sd: p.add(k) current_patches = self.patches.get(key, []) current_patches.append((strength_patch, patches[k], strength_model, offset, function)) self.patches[key] = current_patches self.patches[org_key] = current_patches + elif org_key in model_sd: + if key in self.patches: + self.patches.pop(key) + p.add(k) + current_patches = self.patches.get(org_key, []) + current_patches.append((strength_patch, patches[k], strength_model, offset, function)) + self.patches[org_key] = current_patches self.patches_uuid = uuid.uuid4() return list(p) diff --git a/comfy_extras/nodes_torch_compile.py b/comfy_extras/nodes_torch_compile.py index cad3b142..d217f504 100644 --- a/comfy_extras/nodes_torch_compile.py +++ b/comfy_extras/nodes_torch_compile.py @@ -19,7 +19,7 @@ class TorchCompileModel: "backend": (["inductor", "cudagraphs", "openvino"],), }, "optional": { - "openvino_device": (available_devices,), + "openvino device": (available_devices,), }, } @@ -30,6 +30,7 @@ class TorchCompileModel: EXPERIMENTAL = True def patch(self, model, backend, openvino_device): + print(model.__class__.__name__) if backend == "openvino": options = {"device": openvino_device} try: @@ -39,6 +40,12 @@ class TorchCompileModel: "Could not import openvino python package. " "Please install it with `pip install openvino`." ) + import openvino.frontend.pytorch.torchdynamo.execute as ov_ex + + torch._dynamo.reset() + ov_ex.compiled_cache.clear() + ov_ex.req_cache.clear() + ov_ex.partitioned_modules.clear() else: options = None m = model.clone() @@ -55,4 +62,4 @@ class TorchCompileModel: NODE_CLASS_MAPPINGS = { "TorchCompileModel": TorchCompileModel, -} \ No newline at end of file +}