Browse Source

Simplify and improve some vae attention code.

pull/696/head
comfyanonymous 2 years ago
parent
commit
797c4e8d3b
  1. 16
      comfy/ldm/modules/diffusionmodules/model.py

16
comfy/ldm/modules/diffusionmodules/model.py

@ -331,25 +331,13 @@ class MemoryEfficientAttnBlockPytorch(nn.Module):
# compute attention # compute attention
B, C, H, W = q.shape B, C, H, W = q.shape
q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
q, k, v = map( q, k, v = map(
lambda t: t.unsqueeze(3) lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(),
.reshape(B, t.shape[1], 1, C)
.permute(0, 2, 1, 3)
.reshape(B * 1, t.shape[1], C)
.contiguous(),
(q, k, v), (q, k, v),
) )
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)
out = ( out = out.transpose(2, 3).reshape(B, C, H, W)
out.unsqueeze(0)
.reshape(B, 1, out.shape[1], C)
.permute(0, 2, 1, 3)
.reshape(B, out.shape[1], C)
)
out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
out = self.proj_out(out) out = self.proj_out(out)
return x+out return x+out

Loading…
Cancel
Save