|
from inspect import isfunction |
|
import math |
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn |
|
from einops import rearrange |
|
|
|
from audioldm.latent_diffusion.util import checkpoint |
|
|
|
|
|
def exists(val): |
|
return val is not None |
|
|
|
|
|
def uniq(arr): |
|
return {el: True for el in arr}.keys() |
|
|
|
|
|
def default(val, d): |
|
if exists(val): |
|
return val |
|
return d() if isfunction(d) else d |
|
|
|
|
|
def max_neg_value(t): |
|
return -torch.finfo(t.dtype).max |
|
|
|
|
|
def init_(tensor): |
|
dim = tensor.shape[-1] |
|
std = 1 / math.sqrt(dim) |
|
tensor.uniform_(-std, std) |
|
return tensor |
|
|
|
|
|
|
|
class GEGLU(nn.Module): |
|
def __init__(self, dim_in, dim_out): |
|
super().__init__() |
|
self.proj = nn.Linear(dim_in, dim_out * 2) |
|
|
|
def forward(self, x): |
|
x, gate = self.proj(x).chunk(2, dim=-1) |
|
return x * F.gelu(gate) |
|
|
|
|
|
class FeedForward(nn.Module): |
|
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): |
|
super().__init__() |
|
inner_dim = int(dim * mult) |
|
dim_out = default(dim_out, dim) |
|
project_in = ( |
|
nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) |
|
if not glu |
|
else GEGLU(dim, inner_dim) |
|
) |
|
|
|
self.net = nn.Sequential( |
|
project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) |
|
) |
|
|
|
def forward(self, x): |
|
return self.net(x) |
|
|
|
|
|
def zero_module(module): |
|
""" |
|
Zero out the parameters of a module and return it. |
|
""" |
|
for p in module.parameters(): |
|
p.detach().zero_() |
|
return module |
|
|
|
|
|
def Normalize(in_channels): |
|
return torch.nn.GroupNorm( |
|
num_groups=32, num_channels=in_channels, eps=1e-6, affine=True |
|
) |
|
|
|
|
|
class LinearAttention(nn.Module): |
|
def __init__(self, dim, heads=4, dim_head=32): |
|
super().__init__() |
|
self.heads = heads |
|
hidden_dim = dim_head * heads |
|
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) |
|
self.to_out = nn.Conv2d(hidden_dim, dim, 1) |
|
|
|
def forward(self, x): |
|
b, c, h, w = x.shape |
|
qkv = self.to_qkv(x) |
|
q, k, v = rearrange( |
|
qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3 |
|
) |
|
k = k.softmax(dim=-1) |
|
context = torch.einsum("bhdn,bhen->bhde", k, v) |
|
out = torch.einsum("bhde,bhdn->bhen", context, q) |
|
out = rearrange( |
|
out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w |
|
) |
|
return self.to_out(out) |
|
|
|
|
|
class SpatialSelfAttention(nn.Module): |
|
def __init__(self, in_channels): |
|
super().__init__() |
|
self.in_channels = in_channels |
|
|
|
self.norm = Normalize(in_channels) |
|
self.q = torch.nn.Conv2d( |
|
in_channels, in_channels, kernel_size=1, stride=1, padding=0 |
|
) |
|
self.k = torch.nn.Conv2d( |
|
in_channels, in_channels, kernel_size=1, stride=1, padding=0 |
|
) |
|
self.v = torch.nn.Conv2d( |
|
in_channels, in_channels, kernel_size=1, stride=1, padding=0 |
|
) |
|
self.proj_out = torch.nn.Conv2d( |
|
in_channels, in_channels, kernel_size=1, stride=1, padding=0 |
|
) |
|
|
|
def forward(self, x): |
|
h_ = x |
|
h_ = self.norm(h_) |
|
q = self.q(h_) |
|
k = self.k(h_) |
|
v = self.v(h_) |
|
|
|
|
|
b, c, h, w = q.shape |
|
q = rearrange(q, "b c h w -> b (h w) c") |
|
k = rearrange(k, "b c h w -> b c (h w)") |
|
w_ = torch.einsum("bij,bjk->bik", q, k) |
|
|
|
w_ = w_ * (int(c) ** (-0.5)) |
|
w_ = torch.nn.functional.softmax(w_, dim=2) |
|
|
|
|
|
v = rearrange(v, "b c h w -> b c (h w)") |
|
w_ = rearrange(w_, "b i j -> b j i") |
|
h_ = torch.einsum("bij,bjk->bik", v, w_) |
|
h_ = rearrange(h_, "b c (h w) -> b c h w", h=h) |
|
h_ = self.proj_out(h_) |
|
|
|
return x + h_ |
|
|
|
|
|
class CrossAttention(nn.Module): |
|
""" |
|
### Cross Attention Layer |
|
This falls-back to self-attention when conditional embeddings are not specified. |
|
""" |
|
|
|
|
|
use_flash_attention: bool = False |
|
def __init__( |
|
self, |
|
query_dim, |
|
context_dim=None, |
|
heads=8, |
|
dim_head=64, |
|
dropout=0.0, |
|
is_inplace: bool = True, |
|
): |
|
|
|
""" |
|
:param d_model: is the input embedding size |
|
:param n_heads: is the number of attention heads |
|
:param d_head: is the size of a attention head |
|
:param d_cond: is the size of the conditional embeddings |
|
:param is_inplace: specifies whether to perform the attention softmax computation inplace to |
|
save memory |
|
""" |
|
super().__init__() |
|
|
|
self.is_inplace = is_inplace |
|
self.n_heads = heads |
|
self.d_head = dim_head |
|
|
|
|
|
self.scale = dim_head**-0.5 |
|
|
|
|
|
if context_dim is None: |
|
context_dim = query_dim |
|
|
|
|
|
d_attn = dim_head * heads |
|
self.to_q = nn.Linear(query_dim, d_attn, bias=False) |
|
self.to_k = nn.Linear(context_dim, d_attn, bias=False) |
|
self.to_v = nn.Linear(context_dim, d_attn, bias=False) |
|
|
|
|
|
self.to_out = nn.Sequential(nn.Linear(d_attn, query_dim), nn.Dropout(dropout)) |
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
|
|
from flash_attn.flash_attention import FlashAttention |
|
|
|
self.flash = FlashAttention() |
|
|
|
self.flash.softmax_scale = self.scale |
|
|
|
except ImportError: |
|
self.flash = None |
|
|
|
def forward(self, x, context=None, mask=None): |
|
""" |
|
:param x: are the input embeddings of shape `[batch_size, height * width, d_model]` |
|
:param cond: is the conditional embeddings of shape `[batch_size, n_cond, d_cond]` |
|
""" |
|
|
|
|
|
has_cond = context is not None |
|
if not has_cond: |
|
context = x |
|
|
|
|
|
q = self.to_q(x) |
|
k = self.to_k(context) |
|
v = self.to_v(context) |
|
|
|
|
|
if ( |
|
CrossAttention.use_flash_attention |
|
and self.flash is not None |
|
and not has_cond |
|
and self.d_head <= 128 |
|
): |
|
return self.flash_attention(q, k, v) |
|
|
|
else: |
|
return self.normal_attention(q, k, v) |
|
|
|
def flash_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor): |
|
""" |
|
#### Flash Attention |
|
:param q: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` |
|
:param k: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` |
|
:param v: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` |
|
""" |
|
|
|
|
|
batch_size, seq_len, _ = q.shape |
|
|
|
|
|
|
|
qkv = torch.stack((q, k, v), dim=2) |
|
|
|
qkv = qkv.view(batch_size, seq_len, 3, self.n_heads, self.d_head) |
|
|
|
|
|
|
|
if self.d_head <= 32: |
|
pad = 32 - self.d_head |
|
elif self.d_head <= 64: |
|
pad = 64 - self.d_head |
|
elif self.d_head <= 128: |
|
pad = 128 - self.d_head |
|
else: |
|
raise ValueError(f"Head size ${self.d_head} too large for Flash Attention") |
|
|
|
|
|
if pad: |
|
qkv = torch.cat( |
|
(qkv, qkv.new_zeros(batch_size, seq_len, 3, self.n_heads, pad)), dim=-1 |
|
) |
|
|
|
|
|
|
|
|
|
|
|
out, _ = self.flash(qkv.type(torch.float16)) |
|
|
|
out = out[:, :, :, : self.d_head].float() |
|
|
|
out = out.reshape(batch_size, seq_len, self.n_heads * self.d_head) |
|
|
|
|
|
return self.to_out(out) |
|
|
|
def normal_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor): |
|
""" |
|
#### Normal Attention |
|
|
|
:param q: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` |
|
:param k: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` |
|
:param v: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]` |
|
""" |
|
|
|
|
|
q = q.view(*q.shape[:2], self.n_heads, -1) |
|
k = k.view(*k.shape[:2], self.n_heads, -1) |
|
v = v.view(*v.shape[:2], self.n_heads, -1) |
|
|
|
|
|
attn = torch.einsum("bihd,bjhd->bhij", q, k) * self.scale |
|
|
|
|
|
|
|
if self.is_inplace: |
|
half = attn.shape[0] // 2 |
|
attn[half:] = attn[half:].softmax(dim=-1) |
|
attn[:half] = attn[:half].softmax(dim=-1) |
|
else: |
|
attn = attn.softmax(dim=-1) |
|
|
|
|
|
|
|
|
|
|
|
out = torch.einsum("bhij,bjhd->bihd", attn, v) |
|
|
|
out = out.reshape(*out.shape[:2], -1) |
|
|
|
return self.to_out(out) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class BasicTransformerBlock(nn.Module): |
|
def __init__( |
|
self, |
|
dim, |
|
n_heads, |
|
d_head, |
|
dropout=0.0, |
|
context_dim=None, |
|
gated_ff=True, |
|
checkpoint=True, |
|
): |
|
super().__init__() |
|
self.attn1 = CrossAttention( |
|
query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout |
|
) |
|
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) |
|
self.attn2 = CrossAttention( |
|
query_dim=dim, |
|
context_dim=context_dim, |
|
heads=n_heads, |
|
dim_head=d_head, |
|
dropout=dropout, |
|
) |
|
self.norm1 = nn.LayerNorm(dim) |
|
self.norm2 = nn.LayerNorm(dim) |
|
self.norm3 = nn.LayerNorm(dim) |
|
self.checkpoint = checkpoint |
|
|
|
def forward(self, x, context=None): |
|
if context is None: |
|
return checkpoint(self._forward, (x,), self.parameters(), self.checkpoint) |
|
else: |
|
return checkpoint( |
|
self._forward, (x, context), self.parameters(), self.checkpoint |
|
) |
|
|
|
def _forward(self, x, context=None): |
|
x = self.attn1(self.norm1(x)) + x |
|
x = self.attn2(self.norm2(x), context=context) + x |
|
x = self.ff(self.norm3(x)) + x |
|
return x |
|
|
|
|
|
class SpatialTransformer(nn.Module): |
|
""" |
|
Transformer block for image-like data. |
|
First, project the input (aka embedding) |
|
and reshape to b, t, d. |
|
Then apply standard transformer action. |
|
Finally, reshape to image |
|
""" |
|
|
|
def __init__( |
|
self, |
|
in_channels, |
|
n_heads, |
|
d_head, |
|
depth=1, |
|
dropout=0.0, |
|
context_dim=None, |
|
no_context=False, |
|
): |
|
super().__init__() |
|
|
|
if no_context: |
|
context_dim = None |
|
|
|
self.in_channels = in_channels |
|
inner_dim = n_heads * d_head |
|
self.norm = Normalize(in_channels) |
|
|
|
self.proj_in = nn.Conv2d( |
|
in_channels, inner_dim, kernel_size=1, stride=1, padding=0 |
|
) |
|
|
|
self.transformer_blocks = nn.ModuleList( |
|
[ |
|
BasicTransformerBlock( |
|
inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim |
|
) |
|
for d in range(depth) |
|
] |
|
) |
|
|
|
self.proj_out = zero_module( |
|
nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) |
|
) |
|
|
|
def forward(self, x, context=None): |
|
|
|
b, c, h, w = x.shape |
|
x_in = x |
|
x = self.norm(x) |
|
x = self.proj_in(x) |
|
x = rearrange(x, "b c h w -> b (h w) c") |
|
for block in self.transformer_blocks: |
|
x = block(x, context=context) |
|
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w) |
|
x = self.proj_out(x) |
|
return x + x_in |
|
|