mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-27 09:57:17 +08:00
Small cleanup of comments
This commit is contained in:
parent
59891b0f96
commit
35016983fb
@ -214,9 +214,8 @@ class ModelPatcher:
|
|||||||
self.hook_backup: dict[str, tuple[torch.Tensor, torch.device]] = {}
|
self.hook_backup: dict[str, tuple[torch.Tensor, torch.device]] = {}
|
||||||
self.cached_hook_patches: dict[comfy.hooks.HookGroup, dict[str, torch.Tensor]] = {}
|
self.cached_hook_patches: dict[comfy.hooks.HookGroup, dict[str, torch.Tensor]] = {}
|
||||||
self.current_hooks: Optional[comfy.hooks.HookGroup] = None
|
self.current_hooks: Optional[comfy.hooks.HookGroup] = None
|
||||||
self.forced_hooks: Optional[comfy.hooks.HookGroup] = None # NOTE: only used for CLIP
|
self.forced_hooks: Optional[comfy.hooks.HookGroup] = None # NOTE: only used for CLIP at this time
|
||||||
self.is_clip = False
|
self.is_clip = False
|
||||||
# TODO: hook_mode should be entirely removed; behavior should be determined by remaining VRAM/memory
|
|
||||||
self.hook_mode = comfy.hooks.EnumHookMode.MaxSpeed
|
self.hook_mode = comfy.hooks.EnumHookMode.MaxSpeed
|
||||||
|
|
||||||
if not hasattr(self.model, 'model_loaded_weight_memory'):
|
if not hasattr(self.model, 'model_loaded_weight_memory'):
|
||||||
@ -1034,7 +1033,7 @@ class ModelPatcher:
|
|||||||
if used:
|
if used:
|
||||||
target_device = weight.device
|
target_device = weight.device
|
||||||
self.hook_backup[key] = (weight.to(device=target_device, copy=True), weight.device)
|
self.hook_backup[key] = (weight.to(device=target_device, copy=True), weight.device)
|
||||||
# TODO: properly handle lowvram situations for cached hook patches
|
# TODO: properly handle LowVramPatch, if it ends up an issue
|
||||||
out_weight = comfy.lora.calculate_weight(combined_patches[key],
|
out_weight = comfy.lora.calculate_weight(combined_patches[key],
|
||||||
comfy.model_management.cast_to_device(weight, weight.device, torch.float32, copy=True),
|
comfy.model_management.cast_to_device(weight, weight.device, torch.float32, copy=True),
|
||||||
key, original_weights=original_weights)
|
key, original_weights=original_weights)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user