Fix loras not working on mixed fp8. (#10899)

This commit is contained in:
comfyanonymous
2025-11-25 21:07:58 -08:00
committed by GitHub
parent 0e24dbb19f
commit bdb10a583f
4 changed files with 37 additions and 9 deletions

View File

@@ -194,6 +194,7 @@ class LoRAAdapter(WeightAdapterBase):
lora_diff = torch.mm(
mat1.flatten(start_dim=1), mat2.flatten(start_dim=1)
).reshape(weight.shape)
del mat1, mat2
if dora_scale is not None:
weight = weight_decompose(
dora_scale,