Fix loras not working on mixed fp8. (#10899)

This commit is contained in:
comfyanonymous
2025-11-25 21:07:58 -08:00
committed by GitHub
parent 0e24dbb19f
commit bdb10a583f
4 changed files with 37 additions and 9 deletions

View File

@@ -132,7 +132,7 @@ class LowVramPatch:
def __call__(self, weight):
intermediate_dtype = weight.dtype
if self.convert_func is not None:
weight = self.convert_func(weight.to(dtype=torch.float32, copy=True), inplace=True)
weight = self.convert_func(weight, inplace=False)
if intermediate_dtype not in [torch.float32, torch.float16, torch.bfloat16]: #intermediate_dtype has to be one that is supported in math ops
intermediate_dtype = torch.float32