OpenSora-STDiT-v2-stage2 / modeling_stdit2.py
frankleeeee's picture
Upload STDiT2
6ac94e3 verified
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from einops import rearrange
from .configuration_stdit2 import STDiT2Config
from .layers import (
STDiT2Block,
CaptionEmbedder,
PatchEmbed3D,
T2IFinalLayer,
TimestepEmbedder,
SizeEmbedder,
PositionEmbedding2D
)
from rotary_embedding_torch import RotaryEmbedding
from .utils import (
get_2d_sincos_pos_embed,
approx_gelu
)
from transformers import PreTrainedModel
class STDiT2(PreTrainedModel):
config_class = STDiT2Config
def __init__(
self,
config: STDiT2Config
):
super().__init__(config)
self.pred_sigma = config.pred_sigma
self.in_channels = config.in_channels
self.out_channels = config.in_channels * 2 if config.pred_sigma else config.in_channels
self.hidden_size = config.hidden_size
self.num_heads = config.num_heads
self.no_temporal_pos_emb = config.no_temporal_pos_emb
self.depth = config.depth
self.mlp_ratio = config.mlp_ratio
self.enable_flash_attn = config.enable_flash_attn
self.enable_layernorm_kernel = config.enable_layernorm_kernel
self.enable_sequence_parallelism = config.enable_sequence_parallelism
# support dynamic input
self.patch_size = config.patch_size
self.input_size = config.input_size
self.input_sq_size = config.input_sq_size
self.pos_embed = PositionEmbedding2D(config.hidden_size)
self.x_embedder = PatchEmbed3D(config.patch_size, config.in_channels, config.hidden_size)
self.t_embedder = TimestepEmbedder(config.hidden_size)
self.t_block = nn.Sequential(nn.SiLU(), nn.Linear(config.hidden_size, 6 * config.hidden_size, bias=True))
self.t_block_temp = nn.Sequential(nn.SiLU(), nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=True)) # new
self.y_embedder = CaptionEmbedder(
in_channels=config.caption_channels,
hidden_size=config.hidden_size,
uncond_prob=config.class_dropout_prob,
act_layer=approx_gelu,
token_num=config.model_max_length,
)
drop_path = [x.item() for x in torch.linspace(0, config.drop_path, config.depth)]
self.rope = RotaryEmbedding(dim=self.hidden_size // self.num_heads) # new
self.blocks = nn.ModuleList(
[
STDiT2Block(
self.hidden_size,
self.num_heads,
mlp_ratio=self.mlp_ratio,
drop_path=drop_path[i],
enable_flash_attn=self.enable_flash_attn,
enable_layernorm_kernel=self.enable_layernorm_kernel,
enable_sequence_parallelism=self.enable_sequence_parallelism,
rope=self.rope.rotate_queries_or_keys,
qk_norm=config.qk_norm,
)
for i in range(self.depth)
]
)
self.final_layer = T2IFinalLayer(config.hidden_size, np.prod(self.patch_size), self.out_channels)
# multi_res
assert self.hidden_size % 3 == 0, "hidden_size must be divisible by 3"
self.csize_embedder = SizeEmbedder(self.hidden_size // 3)
self.ar_embedder = SizeEmbedder(self.hidden_size // 3)
self.fl_embedder = SizeEmbedder(self.hidden_size) # new
self.fps_embedder = SizeEmbedder(self.hidden_size) # new
# init model
self.initialize_weights()
self.initialize_temporal()
if config.freeze is not None:
assert config.freeze in ["not_temporal", "text"]
if config.freeze == "not_temporal":
self.freeze_not_temporal()
elif config.freeze == "text":
self.freeze_text()
# sequence parallel related configs
if self.enable_sequence_parallelism:
self.sp_rank = dist.get_rank(get_sequence_parallel_group())
else:
self.sp_rank = None
def get_dynamic_size(self, x):
_, _, T, H, W = x.size()
if T % self.patch_size[0] != 0:
T += self.patch_size[0] - T % self.patch_size[0]
if H % self.patch_size[1] != 0:
H += self.patch_size[1] - H % self.patch_size[1]
if W % self.patch_size[2] != 0:
W += self.patch_size[2] - W % self.patch_size[2]
T = T // self.patch_size[0]
H = H // self.patch_size[1]
W = W // self.patch_size[2]
return (T, H, W)
def forward(
self, x, timestep, y, mask=None, x_mask=None, num_frames=None, height=None, width=None, ar=None, fps=None
):
"""
Forward pass of STDiT.
Args:
x (torch.Tensor): latent representation of video; of shape [B, C, T, H, W]
timestep (torch.Tensor): diffusion time steps; of shape [B]
y (torch.Tensor): representation of prompts; of shape [B, 1, N_token, C]
mask (torch.Tensor): mask for selecting prompt tokens; of shape [B, N_token]
Returns:
x (torch.Tensor): output latent representation; of shape [B, C, T, H, W]
"""
B = x.shape[0]
x = x.to(self.final_layer.linear.weight.dtype)
timestep = timestep.to(self.final_layer.linear.weight.dtype)
y = y.to(self.final_layer.linear.weight.dtype)
# === process data info ===
# 1. get dynamic size
hw = torch.cat([height[:, None], width[:, None]], dim=1)
rs = (height[0].item() * width[0].item()) ** 0.5
csize = self.csize_embedder(hw, B)
# 2. get aspect ratio
ar = ar.unsqueeze(1)
ar = self.ar_embedder(ar, B)
data_info = torch.cat([csize, ar], dim=1)
# 3. get number of frames
fl = num_frames.unsqueeze(1)
fps = fps.unsqueeze(1)
fl = self.fl_embedder(fl, B)
fl = fl + self.fps_embedder(fps, B)
# === get dynamic shape size ===
_, _, Tx, Hx, Wx = x.size()
T, H, W = self.get_dynamic_size(x)
S = H * W
scale = rs / self.input_sq_size
base_size = round(S**0.5)
pos_emb = self.pos_embed(x, H, W, scale=scale, base_size=base_size)
# embedding
x = self.x_embedder(x) # [B, N, C]
x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
x = x + pos_emb
x = rearrange(x, "B T S C -> B (T S) C")
# shard over the sequence dim if sp is enabled
if self.enable_sequence_parallelism:
x = split_forward_gather_backward(x, get_sequence_parallel_group(), dim=1, grad_scale="down")
# prepare adaIN
t = self.t_embedder(timestep, dtype=x.dtype) # [B, C]
t_spc = t + data_info # [B, C]
t_tmp = t + fl # [B, C]
t_spc_mlp = self.t_block(t_spc) # [B, 6*C]
t_tmp_mlp = self.t_block_temp(t_tmp) # [B, 3*C]
if x_mask is not None:
t0_timestep = torch.zeros_like(timestep)
t0 = self.t_embedder(t0_timestep, dtype=x.dtype)
t0_spc = t0 + data_info
t0_tmp = t0 + fl
t0_spc_mlp = self.t_block(t0_spc)
t0_tmp_mlp = self.t_block_temp(t0_tmp)
else:
t0_spc = None
t0_tmp = None
t0_spc_mlp = None
t0_tmp_mlp = None
# prepare y
y = self.y_embedder(y, self.training) # [B, 1, N_token, C]
if mask is not None:
if mask.shape[0] != y.shape[0]:
mask = mask.repeat(y.shape[0] // mask.shape[0], 1)
mask = mask.squeeze(1).squeeze(1)
y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])
y_lens = mask.sum(dim=1).tolist()
else:
y_lens = [y.shape[2]] * y.shape[0]
y = y.squeeze(1).view(1, -1, x.shape[-1])
# blocks
for _, block in enumerate(self.blocks):
x = block(
x,
y,
t_spc_mlp,
t_tmp_mlp,
y_lens,
x_mask,
t0_spc_mlp,
t0_tmp_mlp,
T,
S,
)
if self.enable_sequence_parallelism:
x = gather_forward_split_backward(x, get_sequence_parallel_group(), dim=1, grad_scale="up")
# x.shape: [B, N, C]
# final process
x = self.final_layer(x, t, x_mask, t0_spc, T, S) # [B, N, C=T_p * H_p * W_p * C_out]
x = self.unpatchify(x, T, H, W, Tx, Hx, Wx) # [B, C_out, T, H, W]
# cast to float32 for better accuracy
x = x.to(torch.float32)
return x
def unpatchify(self, x, N_t, N_h, N_w, R_t, R_h, R_w):
"""
Args:
x (torch.Tensor): of shape [B, N, C]
Return:
x (torch.Tensor): of shape [B, C_out, T, H, W]
"""
# N_t, N_h, N_w = [self.input_size[i] // self.patch_size[i] for i in range(3)]
T_p, H_p, W_p = self.patch_size
x = rearrange(
x,
"B (N_t N_h N_w) (T_p H_p W_p C_out) -> B C_out (N_t T_p) (N_h H_p) (N_w W_p)",
N_t=N_t,
N_h=N_h,
N_w=N_w,
T_p=T_p,
H_p=H_p,
W_p=W_p,
C_out=self.out_channels,
)
# unpad
x = x[:, :, :R_t, :R_h, :R_w]
return x
def unpatchify_old(self, x):
c = self.out_channels
t, h, w = [self.input_size[i] // self.patch_size[i] for i in range(3)]
pt, ph, pw = self.patch_size
x = x.reshape(shape=(x.shape[0], t, h, w, pt, ph, pw, c))
x = rearrange(x, "n t h w r p q c -> n c t r h p w q")
imgs = x.reshape(shape=(x.shape[0], c, t * pt, h * ph, w * pw))
return imgs
def get_spatial_pos_embed(self, H, W, scale=1.0, base_size=None):
pos_embed = get_2d_sincos_pos_embed(
self.hidden_size,
(H, W),
scale=scale,
base_size=base_size,
)
pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).requires_grad_(False)
return pos_embed
def freeze_not_temporal(self):
for n, p in self.named_parameters():
if "attn_temp" not in n:
p.requires_grad = False
def freeze_text(self):
for n, p in self.named_parameters():
if "cross_attn" in n:
p.requires_grad = False
def initialize_temporal(self):
for block in self.blocks:
nn.init.constant_(block.attn_temp.proj.weight, 0)
nn.init.constant_(block.attn_temp.proj.bias, 0)
def initialize_weights(self):
# Initialize transformer layers:
def _basic_init(module):
if isinstance(module, nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
self.apply(_basic_init)
# Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
w = self.x_embedder.proj.weight.data
nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
# Initialize timestep embedding MLP:
nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
nn.init.normal_(self.t_block[1].weight, std=0.02)
nn.init.normal_(self.t_block_temp[1].weight, std=0.02)
# Initialize caption embedding MLP:
nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02)
nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02)
# Zero-out adaLN modulation layers in PixArt blocks:
for block in self.blocks:
nn.init.constant_(block.cross_attn.proj.weight, 0)
nn.init.constant_(block.cross_attn.proj.bias, 0)
# Zero-out output layers:
nn.init.constant_(self.final_layer.linear.weight, 0)
nn.init.constant_(self.final_layer.linear.bias, 0)