Browse Source

Remove unnecessary code.

pull/2078/merge
comfyanonymous 6 months ago
parent
commit
98f828fad9
  1. 11
      comfy/ldm/modules/attention.py
  2. 1
      comfy/ldm/modules/diffusionmodules/model.py

11
comfy/ldm/modules/attention.py

@ -318,11 +318,7 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None):
return attention_pytorch(q, k, v, heads, mask)
q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(b, -1, heads, dim_head)
.permute(0, 2, 1, 3)
.reshape(b * heads, -1, dim_head)
.contiguous(),
lambda t: t.reshape(b, -1, heads, dim_head),
(q, k, v),
)
@ -335,10 +331,7 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None):
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
out = (
out.unsqueeze(0)
.reshape(b, heads, -1, dim_head)
.permute(0, 2, 1, 3)
.reshape(b, -1, heads * dim_head)
out.reshape(b, -1, heads * dim_head)
)
return out

1
comfy/ldm/modules/diffusionmodules/model.py

@ -3,7 +3,6 @@ import math
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
from typing import Optional, Any
import logging

Loading…
Cancel
Save