Spaces:
Running
on
Zero
Running
on
Zero
Update modules/diffusion_transformer.py
Browse files- modules/diffusion_transformer.py +240 -237
modules/diffusion_transformer.py
CHANGED
@@ -1,237 +1,240 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import math
|
4 |
-
|
5 |
-
from modules.gpt_fast.model import ModelArgs, Transformer
|
6 |
-
from modules.
|
7 |
-
from modules.
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
nn.
|
28 |
-
nn.
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
return
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
)
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
self
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
self
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
self.
|
123 |
-
|
124 |
-
self.
|
125 |
-
self.
|
126 |
-
|
127 |
-
self.
|
128 |
-
|
129 |
-
|
130 |
-
self.
|
131 |
-
|
132 |
-
self.
|
133 |
-
self.
|
134 |
-
|
135 |
-
self.
|
136 |
-
|
137 |
-
self.
|
138 |
-
self.
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
self.
|
144 |
-
|
145 |
-
self.
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
self.
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
self.
|
169 |
-
self.
|
170 |
-
|
171 |
-
|
172 |
-
self.
|
173 |
-
|
174 |
-
self.
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
self.
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
if
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
if self.
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
if self.
|
224 |
-
|
225 |
-
if self.
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
x =
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
x = x.transpose(1, 2)
|
236 |
-
x = self.
|
237 |
-
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn
|
3 |
+
import math
|
4 |
+
|
5 |
+
from modules.gpt_fast.model import ModelArgs, Transformer
|
6 |
+
# from modules.torchscript_modules.gpt_fast_model import ModelArgs, Transformer
|
7 |
+
from modules.wavenet import WN
|
8 |
+
from modules.commons import sequence_mask
|
9 |
+
|
10 |
+
from torch.nn.utils import weight_norm
|
11 |
+
|
12 |
+
def modulate(x, shift, scale):
|
13 |
+
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
14 |
+
|
15 |
+
|
16 |
+
#################################################################################
|
17 |
+
# Embedding Layers for Timesteps and Class Labels #
|
18 |
+
#################################################################################
|
19 |
+
|
20 |
+
class TimestepEmbedder(nn.Module):
|
21 |
+
"""
|
22 |
+
Embeds scalar timesteps into vector representations.
|
23 |
+
"""
|
24 |
+
def __init__(self, hidden_size, frequency_embedding_size=256):
|
25 |
+
super().__init__()
|
26 |
+
self.mlp = nn.Sequential(
|
27 |
+
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
|
28 |
+
nn.SiLU(),
|
29 |
+
nn.Linear(hidden_size, hidden_size, bias=True),
|
30 |
+
)
|
31 |
+
self.frequency_embedding_size = frequency_embedding_size
|
32 |
+
self.max_period = 10000
|
33 |
+
self.scale = 1000
|
34 |
+
|
35 |
+
half = frequency_embedding_size // 2
|
36 |
+
freqs = torch.exp(
|
37 |
+
-math.log(self.max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
38 |
+
)
|
39 |
+
self.register_buffer("freqs", freqs)
|
40 |
+
|
41 |
+
def timestep_embedding(self, t):
|
42 |
+
"""
|
43 |
+
Create sinusoidal timestep embeddings.
|
44 |
+
:param t: a 1-D Tensor of N indices, one per batch element.
|
45 |
+
These may be fractional.
|
46 |
+
:param dim: the dimension of the output.
|
47 |
+
:param max_period: controls the minimum frequency of the embeddings.
|
48 |
+
:return: an (N, D) Tensor of positional embeddings.
|
49 |
+
"""
|
50 |
+
# https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
|
51 |
+
|
52 |
+
args = self.scale * t[:, None].float() * self.freqs[None]
|
53 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
54 |
+
if self.frequency_embedding_size % 2:
|
55 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
56 |
+
return embedding
|
57 |
+
|
58 |
+
def forward(self, t):
|
59 |
+
t_freq = self.timestep_embedding(t)
|
60 |
+
t_emb = self.mlp(t_freq)
|
61 |
+
return t_emb
|
62 |
+
|
63 |
+
|
64 |
+
class StyleEmbedder(nn.Module):
|
65 |
+
"""
|
66 |
+
Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
|
67 |
+
"""
|
68 |
+
def __init__(self, input_size, hidden_size, dropout_prob):
|
69 |
+
super().__init__()
|
70 |
+
use_cfg_embedding = dropout_prob > 0
|
71 |
+
self.embedding_table = nn.Embedding(int(use_cfg_embedding), hidden_size)
|
72 |
+
self.style_in = weight_norm(nn.Linear(input_size, hidden_size, bias=True))
|
73 |
+
self.input_size = input_size
|
74 |
+
self.dropout_prob = dropout_prob
|
75 |
+
|
76 |
+
def forward(self, labels, train, force_drop_ids=None):
|
77 |
+
use_dropout = self.dropout_prob > 0
|
78 |
+
if (train and use_dropout) or (force_drop_ids is not None):
|
79 |
+
labels = self.token_drop(labels, force_drop_ids)
|
80 |
+
else:
|
81 |
+
labels = self.style_in(labels)
|
82 |
+
embeddings = labels
|
83 |
+
return embeddings
|
84 |
+
|
85 |
+
class FinalLayer(nn.Module):
|
86 |
+
"""
|
87 |
+
The final layer of DiT.
|
88 |
+
"""
|
89 |
+
def __init__(self, hidden_size, patch_size, out_channels):
|
90 |
+
super().__init__()
|
91 |
+
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
92 |
+
self.linear = weight_norm(nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True))
|
93 |
+
self.adaLN_modulation = nn.Sequential(
|
94 |
+
nn.SiLU(),
|
95 |
+
nn.Linear(hidden_size, 2 * hidden_size, bias=True)
|
96 |
+
)
|
97 |
+
|
98 |
+
def forward(self, x, c):
|
99 |
+
shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
|
100 |
+
x = modulate(self.norm_final(x), shift, scale)
|
101 |
+
x = self.linear(x)
|
102 |
+
return x
|
103 |
+
|
104 |
+
class DiT(torch.nn.Module):
|
105 |
+
def __init__(
|
106 |
+
self,
|
107 |
+
args
|
108 |
+
):
|
109 |
+
super(DiT, self).__init__()
|
110 |
+
self.time_as_token = args.DiT.time_as_token if hasattr(args.DiT, 'time_as_token') else False
|
111 |
+
self.style_as_token = args.DiT.style_as_token if hasattr(args.DiT, 'style_as_token') else False
|
112 |
+
self.uvit_skip_connection = args.DiT.uvit_skip_connection if hasattr(args.DiT, 'uvit_skip_connection') else False
|
113 |
+
model_args = ModelArgs(
|
114 |
+
block_size=16384,#args.DiT.block_size,
|
115 |
+
n_layer=args.DiT.depth,
|
116 |
+
n_head=args.DiT.num_heads,
|
117 |
+
dim=args.DiT.hidden_dim,
|
118 |
+
head_dim=args.DiT.hidden_dim // args.DiT.num_heads,
|
119 |
+
vocab_size=1024,
|
120 |
+
uvit_skip_connection=self.uvit_skip_connection,
|
121 |
+
)
|
122 |
+
self.transformer = Transformer(model_args)
|
123 |
+
self.in_channels = args.DiT.in_channels
|
124 |
+
self.out_channels = args.DiT.in_channels
|
125 |
+
self.num_heads = args.DiT.num_heads
|
126 |
+
|
127 |
+
self.x_embedder = weight_norm(nn.Linear(args.DiT.in_channels, args.DiT.hidden_dim, bias=True))
|
128 |
+
|
129 |
+
self.content_type = args.DiT.content_type # 'discrete' or 'continuous'
|
130 |
+
self.content_codebook_size = args.DiT.content_codebook_size # for discrete content
|
131 |
+
self.content_dim = args.DiT.content_dim # for continuous content
|
132 |
+
self.cond_embedder = nn.Embedding(args.DiT.content_codebook_size, args.DiT.hidden_dim) # discrete content
|
133 |
+
self.cond_projection = nn.Linear(args.DiT.content_dim, args.DiT.hidden_dim, bias=True) # continuous content
|
134 |
+
|
135 |
+
self.is_causal = args.DiT.is_causal
|
136 |
+
|
137 |
+
self.n_f0_bins = args.DiT.n_f0_bins
|
138 |
+
self.f0_bins = torch.arange(2, 1024, 1024 // args.DiT.n_f0_bins)
|
139 |
+
self.f0_embedder = nn.Embedding(args.DiT.n_f0_bins, args.DiT.hidden_dim)
|
140 |
+
self.f0_condition = args.DiT.f0_condition
|
141 |
+
|
142 |
+
self.t_embedder = TimestepEmbedder(args.DiT.hidden_dim)
|
143 |
+
self.t_embedder2 = TimestepEmbedder(args.wavenet.hidden_dim)
|
144 |
+
# self.style_embedder1 = weight_norm(nn.Linear(1024, args.DiT.hidden_dim, bias=True))
|
145 |
+
# self.style_embedder2 = weight_norm(nn.Linear(1024, args.style_encoder.dim, bias=True))
|
146 |
+
|
147 |
+
input_pos = torch.arange(16384)
|
148 |
+
self.register_buffer("input_pos", input_pos)
|
149 |
+
|
150 |
+
self.conv1 = nn.Linear(args.DiT.hidden_dim, args.wavenet.hidden_dim)
|
151 |
+
self.conv2 = nn.Conv1d(args.wavenet.hidden_dim, args.DiT.in_channels, 1)
|
152 |
+
self.final_layer_type = args.DiT.final_layer_type # mlp or wavenet
|
153 |
+
if self.final_layer_type == 'wavenet':
|
154 |
+
self.wavenet = WN(hidden_channels=args.wavenet.hidden_dim,
|
155 |
+
kernel_size=args.wavenet.kernel_size,
|
156 |
+
dilation_rate=args.wavenet.dilation_rate,
|
157 |
+
n_layers=args.wavenet.num_layers,
|
158 |
+
gin_channels=args.wavenet.hidden_dim,
|
159 |
+
p_dropout=args.wavenet.p_dropout,
|
160 |
+
causal=False)
|
161 |
+
self.final_layer = FinalLayer(args.wavenet.hidden_dim, 1, args.wavenet.hidden_dim)
|
162 |
+
else:
|
163 |
+
self.final_mlp = nn.Sequential(
|
164 |
+
nn.Linear(args.DiT.hidden_dim, args.DiT.hidden_dim),
|
165 |
+
nn.SiLU(),
|
166 |
+
nn.Linear(args.DiT.hidden_dim, args.DiT.in_channels),
|
167 |
+
)
|
168 |
+
self.transformer_style_condition = args.DiT.style_condition
|
169 |
+
self.wavenet_style_condition = args.wavenet.style_condition
|
170 |
+
assert args.DiT.style_condition == args.wavenet.style_condition
|
171 |
+
|
172 |
+
self.class_dropout_prob = args.DiT.class_dropout_prob
|
173 |
+
self.content_mask_embedder = nn.Embedding(1, args.DiT.hidden_dim)
|
174 |
+
self.res_projection = nn.Linear(args.DiT.hidden_dim, args.wavenet.hidden_dim) # residual connection from tranformer output to final output
|
175 |
+
self.long_skip_connection = args.DiT.long_skip_connection
|
176 |
+
self.skip_linear = nn.Linear(args.DiT.hidden_dim + args.DiT.in_channels, args.DiT.hidden_dim)
|
177 |
+
|
178 |
+
self.cond_x_merge_linear = nn.Linear(args.DiT.hidden_dim + args.DiT.in_channels * 2 +
|
179 |
+
args.style_encoder.dim * self.transformer_style_condition * (not self.style_as_token),
|
180 |
+
args.DiT.hidden_dim)
|
181 |
+
if self.style_as_token:
|
182 |
+
self.style_in = nn.Linear(args.style_encoder.dim, args.DiT.hidden_dim)
|
183 |
+
|
184 |
+
def setup_caches(self, max_batch_size, max_seq_length):
|
185 |
+
self.transformer.setup_caches(max_batch_size, max_seq_length, use_kv_cache=False)
|
186 |
+
def forward(self, x, prompt_x, x_lens, t, style, cond, f0=None, mask_content=False):
|
187 |
+
class_dropout = False
|
188 |
+
if self.training and torch.rand(1) < self.class_dropout_prob:
|
189 |
+
class_dropout = True
|
190 |
+
if not self.training and mask_content:
|
191 |
+
class_dropout = True
|
192 |
+
# cond_in_module = self.cond_embedder if self.content_type == 'discrete' else self.cond_projection
|
193 |
+
cond_in_module = self.cond_projection
|
194 |
+
|
195 |
+
B, _, T = x.size()
|
196 |
+
|
197 |
+
|
198 |
+
t1 = self.t_embedder(t) # (N, D)
|
199 |
+
|
200 |
+
cond = cond_in_module(cond)
|
201 |
+
if self.f0_condition and f0 is not None:
|
202 |
+
quantized_f0 = torch.bucketize(f0, self.f0_bins.to(f0.device)) # (N, T)
|
203 |
+
cond = cond + self.f0_embedder(quantized_f0)
|
204 |
+
|
205 |
+
x = x.transpose(1, 2)
|
206 |
+
prompt_x = prompt_x.transpose(1, 2)
|
207 |
+
|
208 |
+
x_in = torch.cat([x, prompt_x, cond], dim=-1)
|
209 |
+
if self.transformer_style_condition and not self.style_as_token:
|
210 |
+
x_in = torch.cat([x_in, style[:, None, :].repeat(1, T, 1)], dim=-1)
|
211 |
+
if class_dropout:
|
212 |
+
x_in[..., self.in_channels:] = x_in[..., self.in_channels:] * 0
|
213 |
+
x_in = self.cond_x_merge_linear(x_in) # (N, T, D)
|
214 |
+
|
215 |
+
if self.style_as_token:
|
216 |
+
style = self.style_in(style)
|
217 |
+
style = torch.zeros_like(style) if class_dropout else style
|
218 |
+
x_in = torch.cat([style.unsqueeze(1), x_in], dim=1)
|
219 |
+
if self.time_as_token:
|
220 |
+
x_in = torch.cat([t1.unsqueeze(1), x_in], dim=1)
|
221 |
+
x_mask = sequence_mask(x_lens + self.style_as_token + self.time_as_token).to(x.device).unsqueeze(1)
|
222 |
+
input_pos = self.input_pos[:x_in.size(1)] # (T,)
|
223 |
+
x_mask_expanded = x_mask[:, None, :].repeat(1, 1, x_in.size(1), 1) if not self.is_causal else None
|
224 |
+
x_res = self.transformer(x_in, None if self.time_as_token else t1.unsqueeze(1), input_pos, x_mask_expanded)
|
225 |
+
x_res = x_res[:, 1:] if self.time_as_token else x_res
|
226 |
+
x_res = x_res[:, 1:] if self.style_as_token else x_res
|
227 |
+
if self.long_skip_connection:
|
228 |
+
x_res = self.skip_linear(torch.cat([x_res, x], dim=-1))
|
229 |
+
if self.final_layer_type == 'wavenet':
|
230 |
+
x = self.conv1(x_res)
|
231 |
+
x = x.transpose(1, 2)
|
232 |
+
t2 = self.t_embedder2(t)
|
233 |
+
x = self.wavenet(x, x_mask, g=t2.unsqueeze(2)).transpose(1, 2) + self.res_projection(
|
234 |
+
x_res) # long residual connection
|
235 |
+
x = self.final_layer(x, t1).transpose(1, 2)
|
236 |
+
x = self.conv2(x)
|
237 |
+
else:
|
238 |
+
x = self.final_mlp(x_res)
|
239 |
+
x = x.transpose(1, 2)
|
240 |
+
return x
|