| from typing import *
|
| from functools import partial
|
| import torch
|
| import torch.nn as nn
|
| import torch.nn.functional as F
|
| import numpy as np
|
| from ..modules.utils import convert_module_to, manual_cast, str_to_dtype
|
| from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
|
| from ..modules.attention import RotaryPositionEmbedder
|
|
|
|
|
| class TimestepEmbedder(nn.Module):
|
| """
|
| Embeds scalar timesteps into vector representations.
|
| """
|
| def __init__(self, hidden_size, frequency_embedding_size=256):
|
| super().__init__()
|
| self.mlp = nn.Sequential(
|
| nn.Linear(frequency_embedding_size, hidden_size, bias=True),
|
| nn.SiLU(),
|
| nn.Linear(hidden_size, hidden_size, bias=True),
|
| )
|
| self.frequency_embedding_size = frequency_embedding_size
|
|
|
| @staticmethod
|
| def timestep_embedding(t, dim, max_period=10000):
|
| """
|
| Create sinusoidal timestep embeddings.
|
|
|
| Args:
|
| t: a 1-D Tensor of N indices, one per batch element.
|
| These may be fractional.
|
| dim: the dimension of the output.
|
| max_period: controls the minimum frequency of the embeddings.
|
|
|
| Returns:
|
| an (N, D) Tensor of positional embeddings.
|
| """
|
|
|
| half = dim // 2
|
| freqs = torch.exp(
|
| -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
| ).to(device=t.device)
|
| args = t[:, None].float() * freqs[None]
|
| embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| if dim % 2:
|
| embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| return embedding
|
|
|
| def forward(self, t):
|
| t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
|
| t_emb = self.mlp(t_freq)
|
| return t_emb
|
|
|
|
|
| class SparseStructureFlowModel(nn.Module):
|
| def __init__(
|
| self,
|
| resolution: int,
|
| in_channels: int,
|
| model_channels: int,
|
| cond_channels: int,
|
| out_channels: int,
|
| num_blocks: int,
|
| num_heads: Optional[int] = None,
|
| num_head_channels: Optional[int] = 64,
|
| mlp_ratio: float = 4,
|
| pe_mode: Literal["ape", "rope"] = "ape",
|
| rope_freq: Tuple[float, float] = (1.0, 10000.0),
|
| dtype: str = 'float32',
|
| use_checkpoint: bool = False,
|
| share_mod: bool = False,
|
| initialization: str = 'vanilla',
|
| qk_rms_norm: bool = False,
|
| qk_rms_norm_cross: bool = False,
|
| **kwargs
|
| ):
|
| super().__init__()
|
| self.resolution = resolution
|
| self.in_channels = in_channels
|
| self.model_channels = model_channels
|
| self.cond_channels = cond_channels
|
| self.out_channels = out_channels
|
| self.num_blocks = num_blocks
|
| self.num_heads = num_heads or model_channels // num_head_channels
|
| self.mlp_ratio = mlp_ratio
|
| self.pe_mode = pe_mode
|
| self.use_checkpoint = use_checkpoint
|
| self.share_mod = share_mod
|
| self.initialization = initialization
|
| self.qk_rms_norm = qk_rms_norm
|
| self.qk_rms_norm_cross = qk_rms_norm_cross
|
| self.dtype = str_to_dtype(dtype)
|
|
|
| self.t_embedder = TimestepEmbedder(model_channels)
|
| if share_mod:
|
| self.adaLN_modulation = nn.Sequential(
|
| nn.SiLU(),
|
| nn.Linear(model_channels, 6 * model_channels, bias=True)
|
| )
|
|
|
| if pe_mode == "ape":
|
| pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
|
| coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution] * 3], indexing='ij')
|
| coords = torch.stack(coords, dim=-1).reshape(-1, 3)
|
| pos_emb = pos_embedder(coords)
|
| self.register_buffer("pos_emb", pos_emb)
|
| elif pe_mode == "rope":
|
| pos_embedder = RotaryPositionEmbedder(self.model_channels // self.num_heads, 3)
|
| coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution] * 3], indexing='ij')
|
| coords = torch.stack(coords, dim=-1).reshape(-1, 3)
|
| rope_phases = pos_embedder(coords)
|
| self.register_buffer("rope_phases", rope_phases)
|
|
|
| if pe_mode != "rope":
|
| self.rope_phases = None
|
|
|
| self.input_layer = nn.Linear(in_channels, model_channels)
|
|
|
| self.blocks = nn.ModuleList([
|
| ModulatedTransformerCrossBlock(
|
| model_channels,
|
| cond_channels,
|
| num_heads=self.num_heads,
|
| mlp_ratio=self.mlp_ratio,
|
| attn_mode='full',
|
| use_checkpoint=self.use_checkpoint,
|
| use_rope=(pe_mode == "rope"),
|
| rope_freq=rope_freq,
|
| share_mod=share_mod,
|
| qk_rms_norm=self.qk_rms_norm,
|
| qk_rms_norm_cross=self.qk_rms_norm_cross,
|
| )
|
| for _ in range(num_blocks)
|
| ])
|
|
|
| self.out_layer = nn.Linear(model_channels, out_channels)
|
|
|
| self.initialize_weights()
|
| self.convert_to(self.dtype)
|
|
|
| @property
|
| def device(self) -> torch.device:
|
| """
|
| Return the device of the model.
|
| """
|
| return next(self.parameters()).device
|
|
|
| def convert_to(self, dtype: torch.dtype) -> None:
|
| """
|
| Convert the torso of the model to the specified dtype.
|
| """
|
| self.dtype = dtype
|
| self.blocks.apply(partial(convert_module_to, dtype=dtype))
|
|
|
| def initialize_weights(self) -> None:
|
| if self.initialization == 'vanilla':
|
|
|
| def _basic_init(module):
|
| if isinstance(module, nn.Linear):
|
| torch.nn.init.xavier_uniform_(module.weight)
|
| if module.bias is not None:
|
| nn.init.constant_(module.bias, 0)
|
| self.apply(_basic_init)
|
|
|
|
|
| nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
|
| nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
|
|
|
|
|
| if self.share_mod:
|
| nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
|
| nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
|
| else:
|
| for block in self.blocks:
|
| nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
|
| nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
|
|
|
|
|
| nn.init.constant_(self.out_layer.weight, 0)
|
| nn.init.constant_(self.out_layer.bias, 0)
|
|
|
| elif self.initialization == 'scaled':
|
|
|
| def _basic_init(module):
|
| if isinstance(module, nn.Linear):
|
| torch.nn.init.normal_(module.weight, std=np.sqrt(2.0 / (5.0 * self.model_channels)))
|
| if module.bias is not None:
|
| nn.init.constant_(module.bias, 0)
|
| self.apply(_basic_init)
|
|
|
|
|
| def _scaled_init(module):
|
| if isinstance(module, nn.Linear):
|
| torch.nn.init.normal_(module.weight, std=1.0 / np.sqrt(5 * self.num_blocks * self.model_channels))
|
| if module.bias is not None:
|
| nn.init.constant_(module.bias, 0)
|
| for block in self.blocks:
|
| block.self_attn.to_out.apply(_scaled_init)
|
| block.cross_attn.to_out.apply(_scaled_init)
|
| block.mlp.mlp[2].apply(_scaled_init)
|
|
|
|
|
| nn.init.normal_(self.input_layer.weight, std=1.0 / np.sqrt(self.in_channels))
|
| nn.init.zeros_(self.input_layer.bias)
|
|
|
|
|
| nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
|
| nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
|
|
|
|
|
| if self.share_mod:
|
| nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
|
| nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
|
| else:
|
| for block in self.blocks:
|
| nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
|
| nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
|
|
|
|
|
| nn.init.constant_(self.out_layer.weight, 0)
|
| nn.init.constant_(self.out_layer.bias, 0)
|
|
|
| def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
|
| assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
|
| f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
|
|
|
| h = x.view(*x.shape[:2], -1).permute(0, 2, 1).contiguous()
|
|
|
| h = self.input_layer(h)
|
| if self.pe_mode == "ape":
|
| h = h + self.pos_emb[None]
|
| t_emb = self.t_embedder(t)
|
| if self.share_mod:
|
| t_emb = self.adaLN_modulation(t_emb)
|
| t_emb = manual_cast(t_emb, self.dtype)
|
| h = manual_cast(h, self.dtype)
|
| cond = manual_cast(cond, self.dtype)
|
| for block in self.blocks:
|
| h = block(h, t_emb, cond, self.rope_phases)
|
| h = manual_cast(h, x.dtype)
|
| h = F.layer_norm(h, h.shape[-1:])
|
| h = self.out_layer(h)
|
|
|
| h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution] * 3).contiguous()
|
|
|
| return h
|
|
|