|
|
@ -402,8 +402,6 @@ class MemoryEfficientCrossAttention(nn.Module): |
|
|
|
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 |
|
|
|
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 |
|
|
|
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=comfy.ops): |
|
|
|
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=comfy.ops): |
|
|
|
super().__init__() |
|
|
|
super().__init__() |
|
|
|
print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " |
|
|
|
|
|
|
|
f"{heads} heads.") |
|
|
|
|
|
|
|
inner_dim = dim_head * heads |
|
|
|
inner_dim = dim_head * heads |
|
|
|
context_dim = default(context_dim, query_dim) |
|
|
|
context_dim = default(context_dim, query_dim) |
|
|
|
|
|
|
|
|
|
|
|