Srikumar26 commited on
Commit
d6bd18e
1 Parent(s): d41a1fc

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +297 -0
model.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # References:
3
+ # MAE: https://github.com/facebookresearch/mae
4
+ # timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
5
+ # DeiT: https://github.com/facebookresearch/deit
6
+ # --------------------------------------------------------
7
+
8
+ from functools import partial
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn as nn
12
+
13
+ from timm.models.vision_transformer import PatchEmbed, Block
14
+ from huggingface_hub import PyTorchModelHubMixin
15
+
16
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
17
+ """
18
+ grid_size: int of the grid height and width
19
+ return:
20
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
21
+ """
22
+ grid_h = np.arange(grid_size, dtype=np.float32)
23
+ grid_w = np.arange(grid_size, dtype=np.float32)
24
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
25
+ grid = np.stack(grid, axis=0)
26
+
27
+ grid = grid.reshape([2, 1, grid_size, grid_size])
28
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
29
+ if cls_token:
30
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
31
+ return pos_embed
32
+
33
+
34
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
35
+ assert embed_dim % 2 == 0
36
+
37
+ # use half of dimensions to encode grid_h
38
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
39
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
40
+
41
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
42
+ return emb
43
+
44
+
45
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
46
+ """
47
+ embed_dim: output dimension for each position
48
+ pos: a list of positions to be encoded: size (M,)
49
+ out: (M, D)
50
+ """
51
+ assert embed_dim % 2 == 0
52
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
53
+ omega /= embed_dim / 2.
54
+ omega = 1. / 10000**omega # (D/2,)
55
+
56
+ pos = pos.reshape(-1) # (M,)
57
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
58
+
59
+ emb_sin = np.sin(out) # (M, D/2)
60
+ emb_cos = np.cos(out) # (M, D/2)
61
+
62
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
63
+ return emb
64
+
65
+
66
+ def get_1d_sincos_pos_embed_from_grid_torch(embed_dim, pos):
67
+ """
68
+ embed_dim: output dimension for each position
69
+ pos: a list of positions to be encoded: size (M,)
70
+ out: (M, D)
71
+ """
72
+ assert embed_dim % 2 == 0
73
+ omega = torch.arange(embed_dim // 2, dtype=np.float, device=pos.device)
74
+ omega /= embed_dim / 2.
75
+ omega = 1. / 10000**omega # (D/2,)
76
+
77
+ pos = pos.reshape(-1) # (M,)
78
+ out = torch.einsum('m,d->md', pos, omega) # (M, D/2), outer product
79
+
80
+ emb_sin = torch.sin(out) # (M, D/2)
81
+ emb_cos = torch.cos(out) # (M, D/2)
82
+
83
+ emb = torch.cat([emb_sin, emb_cos], dim=1) # (M, D)
84
+ return emb.double()
85
+
86
+
87
+ class MaskedAutoencoderViT(nn.Module, PyTorchModelHubMixin):
88
+ """ Masked Autoencoder with VisionTransformer backbone
89
+ """
90
+ def __init__(self, img_size=224, patch_size=16, in_chans=3,
91
+ embed_dim=1024, depth=24, num_heads=16,
92
+ decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
93
+ mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False):
94
+ super().__init__()
95
+
96
+ self.in_c = in_chans
97
+
98
+ # --------------------------------------------------------------------------
99
+ # MAE encoder specifics
100
+ self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
101
+ num_patches = self.patch_embed.num_patches
102
+
103
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
104
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) # fixed sin-cos embedding
105
+
106
+ self.blocks = nn.ModuleList([
107
+ Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)
108
+ for i in range(depth)])
109
+ self.norm = norm_layer(embed_dim)
110
+ # --------------------------------------------------------------------------
111
+
112
+ # --------------------------------------------------------------------------
113
+ # MAE decoder specifics
114
+ self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)
115
+
116
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))
117
+
118
+ self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False) # fixed sin-cos embedding
119
+
120
+ self.decoder_blocks = nn.ModuleList([
121
+ Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)
122
+ for i in range(decoder_depth)])
123
+
124
+ self.decoder_norm = norm_layer(decoder_embed_dim)
125
+ self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) # decoder to patch
126
+ # --------------------------------------------------------------------------
127
+
128
+ self.norm_pix_loss = norm_pix_loss
129
+
130
+ self.initialize_weights()
131
+
132
+ def initialize_weights(self):
133
+ # initialization
134
+ # initialize (and freeze) pos_embed by sin-cos embedding
135
+ pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
136
+ self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
137
+
138
+ decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
139
+ self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))
140
+
141
+ # initialize patch_embed like nn.Linear (instead of nn.Conv2d)
142
+ w = self.patch_embed.proj.weight.data
143
+ torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
144
+
145
+ # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
146
+ torch.nn.init.normal_(self.cls_token, std=.02)
147
+ torch.nn.init.normal_(self.mask_token, std=.02)
148
+
149
+ # initialize nn.Linear and nn.LayerNorm
150
+ self.apply(self._init_weights)
151
+
152
+ def _init_weights(self, m):
153
+ if isinstance(m, nn.Linear):
154
+ # we use xavier_uniform following official JAX ViT:
155
+ torch.nn.init.xavier_uniform_(m.weight)
156
+ if isinstance(m, nn.Linear) and m.bias is not None:
157
+ nn.init.constant_(m.bias, 0)
158
+ elif isinstance(m, nn.LayerNorm):
159
+ nn.init.constant_(m.bias, 0)
160
+ nn.init.constant_(m.weight, 1.0)
161
+
162
+ def patchify(self, imgs, p, c):
163
+ """
164
+ imgs: (N, C, H, W)
165
+ p: Patch embed patch size
166
+ c: Num channels
167
+ x: (N, L, patch_size**2 *C)
168
+ """
169
+ # p = self.patch_embed.patch_size[0]
170
+ assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
171
+
172
+ # c = self.in_c
173
+ h = w = imgs.shape[2] // p
174
+ x = imgs.reshape(shape=(imgs.shape[0], c, h, p, w, p))
175
+ x = torch.einsum('nchpwq->nhwpqc', x)
176
+ x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * c))
177
+ return x
178
+
179
+ def unpatchify(self, x, p, c):
180
+ """
181
+ x: (N, L, patch_size**2 *C)
182
+ p: Patch embed patch size
183
+ c: Num channels
184
+ imgs: (N, C, H, W)
185
+ """
186
+ # c = self.in_c
187
+ # p = self.patch_embed.patch_size[0]
188
+ h = w = int(x.shape[1]**.5)
189
+ assert h * w == x.shape[1]
190
+
191
+ x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
192
+ x = torch.einsum('nhwpqc->nchpwq', x)
193
+ imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p))
194
+ return imgs
195
+
196
+ def random_masking(self, x, mask_ratio):
197
+ """
198
+ Perform per-sample random masking by per-sample shuffling.
199
+ Per-sample shuffling is done by argsort random noise.
200
+ x: [N, L, D], sequence
201
+ """
202
+ N, L, D = x.shape # batch, length, dim
203
+ len_keep = int(L * (1 - mask_ratio))
204
+
205
+ noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
206
+
207
+ # sort noise for each sample
208
+ ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
209
+ ids_restore = torch.argsort(ids_shuffle, dim=1)
210
+
211
+ # keep the first subset
212
+ ids_keep = ids_shuffle[:, :len_keep]
213
+ x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
214
+
215
+ # generate the binary mask: 0 is keep, 1 is remove
216
+ mask = torch.ones([N, L], device=x.device)
217
+ mask[:, :len_keep] = 0
218
+ # unshuffle to get the binary mask
219
+ mask = torch.gather(mask, dim=1, index=ids_restore)
220
+
221
+ return x_masked, mask, ids_restore
222
+
223
+ def forward_encoder(self, x, mask_ratio):
224
+ # embed patches
225
+ x = self.patch_embed(x)
226
+
227
+ # add pos embed w/o cls token
228
+ x = x + self.pos_embed[:, 1:, :]
229
+
230
+ # masking: length -> length * mask_ratio
231
+ x, mask, ids_restore = self.random_masking(x, mask_ratio)
232
+
233
+ # append cls token
234
+ cls_token = self.cls_token + self.pos_embed[:, :1, :]
235
+ cls_tokens = cls_token.expand(x.shape[0], -1, -1)
236
+ x = torch.cat((cls_tokens, x), dim=1)
237
+
238
+ # apply Transformer blocks
239
+ for blk in self.blocks:
240
+ x = blk(x)
241
+ x = self.norm(x)
242
+
243
+ return x, mask, ids_restore
244
+
245
+ def forward_decoder(self, x, ids_restore):
246
+ # embed tokens
247
+ x = self.decoder_embed(x)
248
+
249
+ # append mask tokens to sequence
250
+ mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
251
+ x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
252
+ x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle
253
+ x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token
254
+
255
+ # add pos embed
256
+ x = x + self.decoder_pos_embed
257
+
258
+ # apply Transformer blocks
259
+ for blk in self.decoder_blocks:
260
+ x = blk(x)
261
+ x = self.decoder_norm(x)
262
+
263
+ # predictor projection
264
+ x = self.decoder_pred(x)
265
+
266
+ # remove cls token
267
+ x = x[:, 1:, :]
268
+
269
+ return x
270
+
271
+ def forward_loss(self, imgs, pred, mask):
272
+ """
273
+ imgs: [N, 3, H, W]
274
+ pred: [N, L, p*p*3]
275
+ mask: [N, L], 0 is keep, 1 is remove,
276
+ """
277
+ # target = imgs[:, :3, :, :]
278
+ # pred = self.unpatchify(pred, self.patch_embed.patch_size[0], self.in_c)
279
+ # pred = self.patchify(pred[:, :3, :, :], self.patch_embed.patch_size[0], 3)
280
+ # target = self.patchify(target, self.patch_embed.patch_size[0], 3)
281
+ target = self.patchify(imgs, self.patch_embed.patch_size[0], self.in_c)
282
+ if self.norm_pix_loss:
283
+ mean = target.mean(dim=-1, keepdim=True)
284
+ var = target.var(dim=-1, keepdim=True)
285
+ target = (target - mean) / (var + 1.e-6)**.5
286
+
287
+ loss = (pred - target) ** 2
288
+ loss = loss.mean(dim=-1) # [N, L], mean loss per patch
289
+
290
+ loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
291
+ return loss
292
+
293
+ def forward(self, imgs, mask_ratio=0.75):
294
+ latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio)
295
+ pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3]
296
+ loss = self.forward_loss(imgs, pred, mask)
297
+ return loss, pred, mask