Use single apply_rope function across models (#10547)

This commit is contained in:
contentis
2025-11-05 02:10:11 +01:00
committed by GitHub
parent 265adad858
commit 4cd881866b
5 changed files with 59 additions and 80 deletions

View File

@@ -232,6 +232,7 @@ class WanAttentionBlock(nn.Module):
# assert e[0].dtype == torch.float32
# self-attention
x = x.contiguous() # otherwise implicit in LayerNorm
y = self.self_attn(
torch.addcmul(repeat_e(e[0], x), self.norm1(x), 1 + repeat_e(e[1], x)),
freqs, transformer_options=transformer_options)