Use better dtype for the lowvram lora system.

This commit is contained in:
comfyanonymous 2024-08-19 15:35:25 -04:00
parent be0726c1ed
commit 6138f92084

View File

@ -96,7 +96,7 @@ class LowVramPatch:
self.key = key self.key = key
self.model_patcher = model_patcher self.model_patcher = model_patcher
def __call__(self, weight): def __call__(self, weight):
return self.model_patcher.calculate_weight(self.model_patcher.patches[self.key], weight, self.key) return self.model_patcher.calculate_weight(self.model_patcher.patches[self.key], weight, self.key, intermediate_dtype=weight.dtype)
class ModelPatcher: class ModelPatcher: