MVRL
/

Transformers
PyTorch
Inference Endpoints
Srikumar26 commited on
Commit
e28f8ca
1 Parent(s): 247c284

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +381 -0
model.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ # --------------------------------------------------------
7
+ # References:
8
+ # timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
9
+ # DeiT: https://github.com/facebookresearch/deit
10
+ # --------------------------------------------------------
11
+
12
+ from functools import partial
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import numpy as np
17
+ from timm.models.vision_transformer import PatchEmbed, Block
18
+
19
+ from huggingface_hub import PyTorchModelHubMixin
20
+ from timm.models.layers import DropPath
21
+ import math
22
+ import torch.nn.functional as F
23
+
24
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
25
+ """
26
+ grid_size: int of the grid height and width
27
+ return:
28
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
29
+ """
30
+ grid_h = np.arange(grid_size, dtype=np.float32)
31
+ grid_w = np.arange(grid_size, dtype=np.float32)
32
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
33
+ grid = np.stack(grid, axis=0)
34
+
35
+ grid = grid.reshape([2, 1, grid_size, grid_size])
36
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
37
+ if cls_token:
38
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
39
+ return pos_embed
40
+
41
+
42
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
43
+ assert embed_dim % 2 == 0
44
+
45
+ # use half of dimensions to encode grid_h
46
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
47
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
48
+
49
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
50
+ return emb
51
+
52
+
53
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
54
+ """
55
+ embed_dim: output dimension for each position
56
+ pos: a list of positions to be encoded: size (M,)
57
+ out: (M, D)
58
+ """
59
+ assert embed_dim % 2 == 0
60
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
61
+ omega /= embed_dim / 2.
62
+ omega = 1. / 10000**omega # (D/2,)
63
+
64
+ pos = pos.reshape(-1) # (M,)
65
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
66
+
67
+ emb_sin = np.sin(out) # (M, D/2)
68
+ emb_cos = np.cos(out) # (M, D/2)
69
+
70
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
71
+ return emb
72
+
73
+ class Mlp(nn.Module):
74
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
75
+ super().__init__()
76
+ out_features = out_features or in_features
77
+ hidden_features = hidden_features or in_features
78
+ self.hidden_features = hidden_features
79
+ self.fc1 = nn.Linear(in_features, hidden_features)
80
+ self.act = act_layer()
81
+ self.fc2 = nn.Linear(hidden_features, out_features)
82
+ self.drop = nn.Dropout(drop)
83
+
84
+ def forward(self, x):
85
+ x = self.fc1(x)
86
+ x = self.act(x)
87
+ x = self.drop(x)
88
+ x = self.fc2(x)
89
+ x = self.drop(x)
90
+ return x
91
+
92
+ class Attention(nn.Module):
93
+ def __init__(
94
+ self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
95
+ proj_drop=0., attn_head_dim=None):
96
+ super().__init__()
97
+ self.num_heads = num_heads
98
+ head_dim = dim // num_heads
99
+ if attn_head_dim is not None:
100
+ head_dim = attn_head_dim
101
+ all_head_dim = head_dim * self.num_heads
102
+ self.scale = qk_scale or head_dim ** -0.5
103
+
104
+ self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
105
+ if qkv_bias:
106
+ self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
107
+ self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
108
+ else:
109
+ self.q_bias = None
110
+ self.v_bias = None
111
+
112
+ self.attn_drop = nn.Dropout(attn_drop)
113
+ self.proj = nn.Linear(all_head_dim, dim)
114
+ self.proj_drop = nn.Dropout(proj_drop)
115
+
116
+ def forward(self, x):
117
+ B, N, C = x.shape
118
+ qkv_bias = None
119
+ if self.q_bias is not None:
120
+ qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
121
+ # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
122
+ qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
123
+ qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
124
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
125
+
126
+ q = q * self.scale
127
+ attn = (q @ k.transpose(-2, -1))
128
+
129
+ attn = attn.softmax(dim=-1)
130
+ attn = self.attn_drop(attn)
131
+
132
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
133
+ x = self.proj(x)
134
+ x = self.proj_drop(x)
135
+ return x
136
+
137
+ class NormalCell(nn.Module):
138
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
139
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, class_token=False, group=1,
140
+ tokens_type='transformer', kernel=3, mlp_hidden_dim=None):
141
+ super().__init__()
142
+ self.norm1 = norm_layer(dim)
143
+ self.class_token = class_token
144
+ if tokens_type == 'transformer':
145
+ self.attn = Attention(
146
+ dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
147
+ else:
148
+ raise NotImplementedError()
149
+
150
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
151
+ self.norm2 = norm_layer(dim)
152
+ mlp_hidden_dim = mlp_hidden_dim if mlp_hidden_dim is not None else int(dim * mlp_ratio)
153
+ PCM_dim = int(dim * mlp_ratio)
154
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
155
+ self.PCM = nn.Sequential(
156
+ nn.Conv2d(dim, PCM_dim, kernel, 1, kernel//2, 1, group),
157
+ nn.BatchNorm2d(PCM_dim),
158
+ nn.SiLU(inplace=True),
159
+ nn.Conv2d(PCM_dim, dim, kernel, 1, kernel//2, 1, group),
160
+ )
161
+
162
+ def forward(self, x):
163
+ b, n, c = x.shape
164
+ if self.class_token:
165
+ n = n - 1
166
+ wh = int(math.sqrt(n))
167
+ convX = self.drop_path(self.PCM(x[:, 1:, :].view(b, wh, wh, c).permute(0, 3, 1, 2).contiguous()).permute(0, 2, 3, 1).contiguous().view(b, n, c))
168
+ x = x + self.drop_path(self.attn(self.norm1(x)))
169
+ x[:, 1:] = x[:, 1:] + convX
170
+ else:
171
+ wh = int(math.sqrt(n))
172
+ x_2d = x.view(b, wh, wh, c).permute(0, 3, 1, 2).contiguous()
173
+ convX = self.drop_path(self.PCM(x_2d).permute(0, 2, 3, 1).contiguous().view(b, n, c))
174
+ x = x + self.drop_path(self.attn(self.norm1(x)))
175
+ x = x + convX
176
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
177
+ return x
178
+
179
+ class MaskedAutoencoderViTAE(nn.Module, PyTorchModelHubMixin):
180
+ """ Masked Autoencoder with VisionTransformer backbone
181
+ """
182
+ def __init__(self, img_size=224, patch_size=16, in_chans=3,
183
+ embed_dim=768, depth=12, num_heads=12,
184
+ decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
185
+ mlp_ratio=4., norm_layer=partial(nn.LayerNorm, eps=1e-6), norm_pix_loss=False, kernel=3, mlp_hidden_dim=None):
186
+ '''
187
+ @Param kernel: int, control the kernel size in PCM
188
+ @Param mlp_hidden_dim: int, the hidden dimenison of FFN, overwrites mlp ratio, default None
189
+ '''
190
+ super().__init__()
191
+
192
+ # --------------------------------------------------------------------------
193
+ # MAE encoder specifics
194
+ self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
195
+ num_patches = self.patch_embed.num_patches
196
+
197
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
198
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) # fixed sin-cos embedding
199
+
200
+ self.blocks = nn.ModuleList([
201
+ NormalCell(embed_dim, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer, kernel=kernel, class_token=True, group=embed_dim // 4, mlp_hidden_dim=mlp_hidden_dim)
202
+ for i in range(depth)])
203
+ self.norm = norm_layer(embed_dim)
204
+ # --------------------------------------------------------------------------
205
+
206
+ # --------------------------------------------------------------------------
207
+ # MAE decoder specifics
208
+ self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)
209
+
210
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))
211
+
212
+ self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False) # fixed sin-cos embedding
213
+
214
+ self.decoder_blocks = nn.ModuleList([
215
+ Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)
216
+ for i in range(decoder_depth)])
217
+
218
+ self.decoder_norm = norm_layer(decoder_embed_dim)
219
+ self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) # encoder to decoder
220
+ # --------------------------------------------------------------------------
221
+
222
+ self.norm_pix_loss = norm_pix_loss
223
+
224
+ self.initialize_weights()
225
+
226
+ def initialize_weights(self):
227
+ # initialization
228
+ # initialize (and freeze) pos_embed by sin-cos embedding
229
+ pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
230
+ self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
231
+
232
+ decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
233
+ self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))
234
+
235
+ # initialize patch_embed like nn.Linear (instead of nn.Conv2d)
236
+ w = self.patch_embed.proj.weight.data
237
+ torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
238
+
239
+ # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
240
+ torch.nn.init.normal_(self.cls_token, std=.02)
241
+ torch.nn.init.normal_(self.mask_token, std=.02)
242
+
243
+ # initialize nn.Linear and nn.LayerNorm
244
+ self.apply(self._init_weights)
245
+
246
+ def _init_weights(self, m):
247
+ if isinstance(m, nn.Linear):
248
+ # we use xavier_uniform following official JAX ViT:
249
+ torch.nn.init.xavier_uniform_(m.weight)
250
+ if isinstance(m, nn.Linear) and m.bias is not None:
251
+ nn.init.constant_(m.bias, 0)
252
+ elif isinstance(m, nn.LayerNorm):
253
+ nn.init.constant_(m.bias, 0)
254
+ nn.init.constant_(m.weight, 1.0)
255
+
256
+ def patchify(self, imgs):
257
+ """
258
+ imgs: (N, 3, H, W)
259
+ x: (N, L, patch_size**2 *3)
260
+ """
261
+ p = self.patch_embed.patch_size[0]
262
+ assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
263
+
264
+ h = w = imgs.shape[2] // p
265
+ x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
266
+ x = torch.einsum('nchpwq->nhwpqc', x)
267
+ x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
268
+ return x
269
+
270
+ def unpatchify(self, x):
271
+ """
272
+ x: (N, L, patch_size**2 *3)
273
+ imgs: (N, 3, H, W)
274
+ """
275
+ p = self.patch_embed.patch_size[0]
276
+ h = w = int(x.shape[1]**.5)
277
+ assert h * w == x.shape[1]
278
+
279
+ x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))
280
+ x = torch.einsum('nhwpqc->nchpwq', x)
281
+ imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))
282
+ return imgs
283
+
284
+ def random_masking(self, x, mask_ratio):
285
+ """
286
+ Perform per-sample random masking by per-sample shuffling.
287
+ Per-sample shuffling is done by argsort random noise.
288
+ x: [N, L, D], sequence
289
+ """
290
+ N, L, D = x.shape # batch, length, dim
291
+ len_keep = int(L * (1 - mask_ratio))
292
+
293
+ noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
294
+
295
+ # sort noise for each sample
296
+ ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
297
+ ids_restore = torch.argsort(ids_shuffle, dim=1)
298
+
299
+ # keep the first subset
300
+ ids_keep = ids_shuffle[:, :len_keep]
301
+ x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).expand(-1, -1, D))
302
+
303
+ # generate the binary mask: 0 is keep, 1 is remove
304
+ mask = torch.ones([N, L], device=x.device)
305
+ mask[:, :len_keep] = 0
306
+ # unshuffle to get the binary mask
307
+ mask = torch.gather(mask, dim=1, index=ids_restore)
308
+
309
+ return x_masked, mask, ids_restore
310
+
311
+ def forward_encoder(self, x, mask_ratio):
312
+ # embed patches
313
+ x = self.patch_embed(x)
314
+
315
+ # add pos embed w/o cls token
316
+ x = x + self.pos_embed[:, 1:, :]
317
+
318
+ # masking: length -> length * mask_ratio
319
+ x, mask, ids_restore = self.random_masking(x, mask_ratio)
320
+
321
+ # append cls token
322
+ cls_token = self.cls_token + self.pos_embed[:, :1, :]
323
+ cls_tokens = cls_token.expand(x.shape[0], -1, -1)
324
+ x = torch.cat((cls_tokens, x), dim=1)
325
+
326
+ # apply Transformer blocks
327
+ for blk in self.blocks:
328
+ x = blk(x)
329
+ x = self.norm(x)
330
+
331
+ return x, mask, ids_restore
332
+
333
+ def forward_decoder(self, x, ids_restore):
334
+ # embed tokens
335
+ x = self.decoder_embed(x)
336
+
337
+ # append mask tokens to sequence
338
+ mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
339
+ x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
340
+ x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).expand(-1, -1, x.shape[2])) # unshuffle
341
+ x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token
342
+
343
+ # add pos embed
344
+ x = x + self.decoder_pos_embed
345
+
346
+ # apply Transformer blocks
347
+ for blk in self.decoder_blocks:
348
+ x = blk(x)
349
+ x = self.decoder_norm(x)
350
+
351
+ # predictor projection
352
+ x = self.decoder_pred(x)
353
+
354
+ # remove cls token
355
+ x = x[:, 1:, :]
356
+
357
+ return x
358
+
359
+ def forward_loss(self, imgs, pred, mask):
360
+ """
361
+ imgs: [N, 3, H, W]
362
+ pred: [N, L, p*p*3]
363
+ mask: [N, L], 0 is keep, 1 is remove,
364
+ """
365
+ target = self.patchify(imgs)
366
+ if self.norm_pix_loss:
367
+ mean = target.mean(dim=-1, keepdim=True)
368
+ var = target.var(dim=-1, keepdim=True)
369
+ target = (target - mean) / (var + 1.e-6)**.5
370
+
371
+ loss = (pred - target) ** 2
372
+ loss = loss.mean(dim=-1) # [N, L], mean loss per patch
373
+
374
+ loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
375
+ return loss
376
+
377
+ def forward(self, imgs, mask_ratio=0.75):
378
+ latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio)
379
+ pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3]
380
+ loss = self.forward_loss(imgs, pred, mask)
381
+ return loss, pred, mask