Spaces:
Build error
Build error
| # Copyright (c) Meta Platforms, Inc. and affiliates. | |
| # All rights reserved. | |
| # This source code is licensed under the license found in the | |
| # LICENSE file in the root directory of this source tree. | |
| # -------------------------------------------------------- | |
| # References: | |
| # GLIDE: https://github.com/openai/glide-text2im | |
| # MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py | |
| # -------------------------------------------------------- | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| from timm.models.vision_transformer import Mlp, Attention as Attention_ | |
| from einops import rearrange, repeat | |
| import xformers.ops | |
| from .utils import add_decomposed_rel_pos | |
| def modulate(x, shift, scale): | |
| return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) | |
| def t2i_modulate(x, shift, scale): | |
| return x * (1 + scale) + shift | |
| class MultiHeadCrossAttention(nn.Module): | |
| def __init__(self, d_model, num_heads, attn_drop=0., proj_drop=0., **block_kwargs): | |
| super(MultiHeadCrossAttention, self).__init__() | |
| assert d_model % num_heads == 0, "d_model must be divisible by num_heads" | |
| self.d_model = d_model | |
| self.num_heads = num_heads | |
| self.head_dim = d_model // num_heads | |
| self.q_linear = nn.Linear(d_model, d_model) | |
| self.kv_linear = nn.Linear(d_model, d_model*2) | |
| self.attn_drop = nn.Dropout(attn_drop) | |
| self.proj = nn.Linear(d_model, d_model) | |
| self.proj_drop = nn.Dropout(proj_drop) | |
| def forward(self, x, cond, mask=None): | |
| # query/value: img tokens; key: condition; mask: if padding tokens | |
| B, N, C = x.shape | |
| q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim) | |
| # import pdb | |
| # pdb.set_trace() | |
| kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim) | |
| k, v = kv.unbind(2) | |
| attn_bias = None | |
| assert mask is not None | |
| # import pdb | |
| # pdb.set_trace() | |
| attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens([N] * B, mask) | |
| # attn_bias = torch.zeros([B * self.num_heads, q.shape[1], k.shape[1]], dtype=q.dtype, device=q.device) | |
| # import pdb | |
| # pdb.set_trace() | |
| # attn_bias.masked_fill_(mask.squeeze(1).repeat(self.num_heads, 1, 1) == 0, float('-inf')) | |
| x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias) | |
| x = x.view(B, -1, C) | |
| x = self.proj(x) | |
| x = self.proj_drop(x) | |
| # q = self.q_linear(x).reshape(B, -1, self.num_heads, self.head_dim) | |
| # kv = self.kv_linear(cond).reshape(B, -1, 2, self.num_heads, self.head_dim) | |
| # k, v = kv.unbind(2) | |
| # attn_bias = None | |
| # if mask is not None: | |
| # x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias) | |
| # x = x.contiguous().reshape(B, -1, C) | |
| # x = self.proj(x) | |
| # x = self.proj_drop(x) | |
| return x | |
| class WindowAttention(Attention_): | |
| """Multi-head Attention block with relative position embeddings.""" | |
| def __init__( | |
| self, | |
| dim, | |
| num_heads=8, | |
| qkv_bias=True, | |
| use_rel_pos=False, | |
| rel_pos_zero_init=True, | |
| input_size=None, | |
| **block_kwargs, | |
| ): | |
| """ | |
| Args: | |
| dim (int): Number of input channels. | |
| num_heads (int): Number of attention heads. | |
| qkv_bias (bool: If True, add a learnable bias to query, key, value. | |
| rel_pos (bool): If True, add relative positional embeddings to the attention map. | |
| rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. | |
| input_size (int or None): Input resolution for calculating the relative positional | |
| parameter size. | |
| """ | |
| super().__init__(dim, num_heads=num_heads, qkv_bias=qkv_bias, **block_kwargs) | |
| self.use_rel_pos = use_rel_pos | |
| if self.use_rel_pos: | |
| # initialize relative positional embeddings | |
| self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, self.head_dim)) | |
| self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, self.head_dim)) | |
| if not rel_pos_zero_init: | |
| nn.init.trunc_normal_(self.rel_pos_h, std=0.02) | |
| nn.init.trunc_normal_(self.rel_pos_w, std=0.02) | |
| def forward(self, x, mask=None): | |
| B, N, C = x.shape | |
| qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads) | |
| q, k, v = qkv.unbind(2) | |
| if use_fp32_attention := getattr(self, 'fp32_attention', False): | |
| q, k, v = q.float(), k.float(), v.float() | |
| attn_bias = None | |
| if mask is not None: | |
| attn_bias = torch.zeros([B * self.num_heads, q.shape[1], k.shape[1]], dtype=q.dtype, device=q.device) | |
| attn_bias.masked_fill_(mask.squeeze(1).repeat(self.num_heads, 1, 1) == 0, float('-inf')) | |
| # import pdb | |
| # pdb.set_trace() | |
| x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias) | |
| x = x.view(B, N, C) | |
| x = self.proj(x) | |
| x = self.proj_drop(x) | |
| return x | |
| ################################################################################# | |
| # AMP attention with fp32 softmax to fix loss NaN problem during training # | |
| ################################################################################# | |
| class Attention(Attention_): | |
| def forward(self, x): | |
| B, N, C = x.shape | |
| qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) | |
| q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) | |
| use_fp32_attention = getattr(self, 'fp32_attention', False) | |
| if use_fp32_attention: | |
| q, k = q.float(), k.float() | |
| with torch.cuda.amp.autocast(enabled=not use_fp32_attention): | |
| attn = (q @ k.transpose(-2, -1)) * self.scale | |
| attn = attn.softmax(dim=-1) | |
| attn = self.attn_drop(attn) | |
| x = (attn @ v).transpose(1, 2).reshape(B, N, C) | |
| x = self.proj(x) | |
| x = self.proj_drop(x) | |
| return x | |
| class FinalLayer(nn.Module): | |
| """ | |
| The final layer of PixArt. | |
| """ | |
| def __init__(self, hidden_size, patch_size, out_channels): | |
| super().__init__() | |
| self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) | |
| self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True) | |
| self.adaLN_modulation = nn.Sequential( | |
| nn.SiLU(), | |
| nn.Linear(hidden_size, 2 * hidden_size, bias=True) | |
| ) | |
| def forward(self, x, c): | |
| shift, scale = self.adaLN_modulation(c).chunk(2, dim=1) | |
| x = modulate(self.norm_final(x), shift, scale) | |
| x = self.linear(x) | |
| return x | |
| class T2IFinalLayer(nn.Module): | |
| """ | |
| The final layer of PixArt. | |
| """ | |
| def __init__(self, hidden_size, patch_size, out_channels): | |
| super().__init__() | |
| self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) | |
| self.linear = nn.Linear(hidden_size, patch_size[0] * patch_size[1] * out_channels, bias=True) | |
| self.scale_shift_table = nn.Parameter(torch.randn(2, hidden_size) / hidden_size ** 0.5) | |
| self.out_channels = out_channels | |
| def forward(self, x, t): | |
| shift, scale = (self.scale_shift_table[None] + t[:, None]).chunk(2, dim=1) | |
| x = t2i_modulate(self.norm_final(x), shift, scale) | |
| x = self.linear(x) | |
| return x | |
| class MaskFinalLayer(nn.Module): | |
| """ | |
| The final layer of PixArt. | |
| """ | |
| def __init__(self, final_hidden_size, c_emb_size, patch_size, out_channels): | |
| super().__init__() | |
| self.norm_final = nn.LayerNorm(final_hidden_size, elementwise_affine=False, eps=1e-6) | |
| self.linear = nn.Linear(final_hidden_size, patch_size * patch_size * out_channels, bias=True) | |
| self.adaLN_modulation = nn.Sequential( | |
| nn.SiLU(), | |
| nn.Linear(c_emb_size, 2 * final_hidden_size, bias=True) | |
| ) | |
| def forward(self, x, t): | |
| shift, scale = self.adaLN_modulation(t).chunk(2, dim=1) | |
| x = modulate(self.norm_final(x), shift, scale) | |
| x = self.linear(x) | |
| return x | |
| class DecoderLayer(nn.Module): | |
| """ | |
| The final layer of PixArt. | |
| """ | |
| def __init__(self, hidden_size, decoder_hidden_size): | |
| super().__init__() | |
| self.norm_decoder = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) | |
| self.linear = nn.Linear(hidden_size, decoder_hidden_size, bias=True) | |
| self.adaLN_modulation = nn.Sequential( | |
| nn.SiLU(), | |
| nn.Linear(hidden_size, 2 * hidden_size, bias=True) | |
| ) | |
| def forward(self, x, t): | |
| shift, scale = self.adaLN_modulation(t).chunk(2, dim=1) | |
| x = modulate(self.norm_decoder(x), shift, scale) | |
| x = self.linear(x) | |
| return x | |
| ################################################################################# | |
| # Embedding Layers for Timesteps and Class Labels # | |
| ################################################################################# | |
| class TimestepEmbedder(nn.Module): | |
| """ | |
| Embeds scalar timesteps into vector representations. | |
| """ | |
| def __init__(self, hidden_size, frequency_embedding_size=256): | |
| super().__init__() | |
| self.mlp = nn.Sequential( | |
| nn.Linear(frequency_embedding_size, hidden_size, bias=True), | |
| nn.SiLU(), | |
| nn.Linear(hidden_size, hidden_size, bias=True), | |
| ) | |
| self.frequency_embedding_size = frequency_embedding_size | |
| def timestep_embedding(t, dim, max_period=10000): | |
| """ | |
| Create sinusoidal timestep embeddings. | |
| :param t: a 1-D Tensor of N indices, one per batch element. | |
| These may be fractional. | |
| :param dim: the dimension of the output. | |
| :param max_period: controls the minimum frequency of the embeddings. | |
| :return: an (N, D) Tensor of positional embeddings. | |
| """ | |
| # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py | |
| half = dim // 2 | |
| freqs = torch.exp( | |
| -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=t.device) / half) | |
| args = t[:, None].float() * freqs[None] | |
| embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) | |
| if dim % 2: | |
| embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) | |
| return embedding | |
| def forward(self, t): | |
| t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(self.dtype) | |
| return self.mlp(t_freq) | |
| def dtype(self): | |
| # 返回模型参数的数据类型 | |
| return next(self.parameters()).dtype | |
| class SizeEmbedder(TimestepEmbedder): | |
| """ | |
| Embeds scalar timesteps into vector representations. | |
| """ | |
| def __init__(self, hidden_size, frequency_embedding_size=256): | |
| super().__init__(hidden_size=hidden_size, frequency_embedding_size=frequency_embedding_size) | |
| self.mlp = nn.Sequential( | |
| nn.Linear(frequency_embedding_size, hidden_size, bias=True), | |
| nn.SiLU(), | |
| nn.Linear(hidden_size, hidden_size, bias=True), | |
| ) | |
| self.frequency_embedding_size = frequency_embedding_size | |
| self.outdim = hidden_size | |
| def forward(self, s, bs): | |
| if s.ndim == 1: | |
| s = s[:, None] | |
| assert s.ndim == 2 | |
| if s.shape[0] != bs: | |
| s = s.repeat(bs//s.shape[0], 1) | |
| assert s.shape[0] == bs | |
| b, dims = s.shape[0], s.shape[1] | |
| s = rearrange(s, "b d -> (b d)") | |
| s_freq = self.timestep_embedding(s, self.frequency_embedding_size).to(self.dtype) | |
| s_emb = self.mlp(s_freq) | |
| s_emb = rearrange(s_emb, "(b d) d2 -> b (d d2)", b=b, d=dims, d2=self.outdim) | |
| return s_emb | |
| def dtype(self): | |
| # 返回模型参数的数据类型 | |
| return next(self.parameters()).dtype | |
| class LabelEmbedder(nn.Module): | |
| """ | |
| Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. | |
| """ | |
| def __init__(self, num_classes, hidden_size, dropout_prob): | |
| super().__init__() | |
| use_cfg_embedding = dropout_prob > 0 | |
| self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size) | |
| self.num_classes = num_classes | |
| self.dropout_prob = dropout_prob | |
| def token_drop(self, labels, force_drop_ids=None): | |
| """ | |
| Drops labels to enable classifier-free guidance. | |
| """ | |
| if force_drop_ids is None: | |
| drop_ids = torch.rand(labels.shape[0]).cuda() < self.dropout_prob | |
| else: | |
| drop_ids = force_drop_ids == 1 | |
| labels = torch.where(drop_ids, self.num_classes, labels) | |
| return labels | |
| def forward(self, labels, train, force_drop_ids=None): | |
| use_dropout = self.dropout_prob > 0 | |
| if (train and use_dropout) or (force_drop_ids is not None): | |
| labels = self.token_drop(labels, force_drop_ids) | |
| return self.embedding_table(labels) | |
| class CaptionEmbedder(nn.Module): | |
| """ | |
| Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. | |
| """ | |
| def __init__(self, in_channels, hidden_size, uncond_prob, act_layer=nn.GELU(approximate='tanh'), token_num=120): | |
| super().__init__() | |
| self.y_proj = Mlp(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size, act_layer=act_layer, drop=0) | |
| self.register_buffer("y_embedding", nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5)) | |
| self.uncond_prob = uncond_prob | |
| def token_drop(self, caption, force_drop_ids=None): | |
| """ | |
| Drops labels to enable classifier-free guidance. | |
| """ | |
| if force_drop_ids is None: | |
| drop_ids = torch.rand(caption.shape[0]).cuda() < self.uncond_prob | |
| else: | |
| drop_ids = force_drop_ids == 1 | |
| caption = torch.where(drop_ids[:, None, None, None], self.y_embedding, caption) | |
| return caption | |
| def forward(self, caption, train, force_drop_ids=None): | |
| if train: | |
| assert caption.shape[2:] == self.y_embedding.shape | |
| use_dropout = self.uncond_prob > 0 | |
| if (train and use_dropout) or (force_drop_ids is not None): | |
| caption = self.token_drop(caption, force_drop_ids) | |
| caption = self.y_proj(caption) | |
| return caption | |
| class CaptionEmbedderDoubleBr(nn.Module): | |
| """ | |
| Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. | |
| """ | |
| def __init__(self, in_channels, hidden_size, uncond_prob, act_layer=nn.GELU(approximate='tanh'), token_num=120): | |
| super().__init__() | |
| self.proj = Mlp(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size, act_layer=act_layer, drop=0) | |
| self.embedding = nn.Parameter(torch.randn(1, in_channels) / 10 ** 0.5) | |
| self.y_embedding = nn.Parameter(torch.randn(token_num, in_channels) / 10 ** 0.5) | |
| self.uncond_prob = uncond_prob | |
| def token_drop(self, global_caption, caption, force_drop_ids=None): | |
| """ | |
| Drops labels to enable classifier-free guidance. | |
| """ | |
| if force_drop_ids is None: | |
| drop_ids = torch.rand(global_caption.shape[0]).cuda() < self.uncond_prob | |
| else: | |
| drop_ids = force_drop_ids == 1 | |
| global_caption = torch.where(drop_ids[:, None], self.embedding, global_caption) | |
| caption = torch.where(drop_ids[:, None, None, None], self.y_embedding, caption) | |
| return global_caption, caption | |
| def forward(self, caption, train, force_drop_ids=None): | |
| assert caption.shape[2: ] == self.y_embedding.shape | |
| global_caption = caption.mean(dim=2).squeeze() | |
| use_dropout = self.uncond_prob > 0 | |
| if (train and use_dropout) or (force_drop_ids is not None): | |
| global_caption, caption = self.token_drop(global_caption, caption, force_drop_ids) | |
| y_embed = self.proj(global_caption) | |
| return y_embed, caption | |