SunderAli17 commited on
Commit
8f49c43
1 Parent(s): 786a7c4

Create transformer.py

Browse files
Files changed (1) hide show
  1. eva_clip/transformer.py +792 -0
eva_clip/transformer.py ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from collections import OrderedDict
4
+ import math
5
+ import warnings
6
+ from typing import Callable, Optional, Sequence
7
+ import numpy as np
8
+ import torch
9
+ from torch import nn
10
+ from torch.nn import functional as F
11
+
12
+ from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
13
+ from .utils import to_2tuple
14
+
15
+ if os.getenv('ENV_TYPE') == 'deepspeed':
16
+ try:
17
+ import deepspeed
18
+ from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
19
+ except:
20
+ print("Please 'pip install deepspeed'")
21
+ deepspeed = None
22
+ from torch.utils.checkpoint import checkpoint
23
+ else:
24
+ from torch.utils.checkpoint import checkpoint
25
+
26
+ try:
27
+ import xformers.ops as xops
28
+ except ImportError:
29
+ xops = None
30
+ print("Please 'pip install xformers'")
31
+
32
+
33
+ def _no_grad_trunc_normal_(tensor, mean, std, a, b):
34
+ # Cut & paste from PyTorch official master until it's in a few official releases - RW
35
+ # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
36
+ def norm_cdf(x):
37
+ # Computes standard normal cumulative distribution function
38
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
39
+
40
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
41
+ warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
42
+ "The distribution of values may be incorrect.",
43
+ stacklevel=2)
44
+
45
+ with torch.no_grad():
46
+ # Values are generated by using a truncated uniform distribution and
47
+ # then using the inverse CDF for the normal distribution.
48
+ # Get upper and lower cdf values
49
+ l = norm_cdf((a - mean) / std)
50
+ u = norm_cdf((b - mean) / std)
51
+
52
+ # Uniformly fill tensor with values from [l, u], then translate to
53
+ # [2l-1, 2u-1].
54
+ tensor.uniform_(2 * l - 1, 2 * u - 1)
55
+
56
+ # Use inverse cdf transform for normal distribution to get truncated
57
+ # standard normal
58
+ tensor.erfinv_()
59
+
60
+ # Transform to proper mean, std
61
+ tensor.mul_(std * math.sqrt(2.))
62
+ tensor.add_(mean)
63
+
64
+ # Clamp to ensure it's in the proper range
65
+ tensor.clamp_(min=a, max=b)
66
+ return tensor
67
+
68
+
69
+ def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
70
+ # type: (Tensor, float, float, float, float) -> Tensor
71
+ r"""Fills the input Tensor with values drawn from a truncated
72
+ normal distribution. The values are effectively drawn from the
73
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
74
+ with values outside :math:`[a, b]` redrawn until they are within
75
+ the bounds. The method used for generating the random values works
76
+ best when :math:`a \leq \text{mean} \leq b`.
77
+ Args:
78
+ tensor: an n-dimensional `torch.Tensor`
79
+ mean: the mean of the normal distribution
80
+ std: the standard deviation of the normal distribution
81
+ a: the minimum cutoff value
82
+ b: the maximum cutoff value
83
+ Examples:
84
+ >>> w = torch.empty(3, 5)
85
+ >>> nn.init.trunc_normal_(w)
86
+ """
87
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
88
+
89
+
90
+
91
+ class LayerNormFp32(nn.LayerNorm):
92
+ """Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
93
+ def __init__(self, *args, **kwargs):
94
+ super().__init__(*args, **kwargs)
95
+
96
+ def forward(self, x: torch.Tensor):
97
+ output = F.layer_norm(
98
+ x.float(),
99
+ self.normalized_shape,
100
+ self.weight.float() if self.weight is not None else None,
101
+ self.bias.float() if self.bias is not None else None,
102
+ self.eps,
103
+ )
104
+ return output.type_as(x)
105
+
106
+
107
+ class LayerNorm(nn.LayerNorm):
108
+ """Subclass torch's LayerNorm (with cast back to input dtype)."""
109
+
110
+ def forward(self, x: torch.Tensor):
111
+ orig_type = x.dtype
112
+ x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
113
+ return x.to(orig_type)
114
+
115
+ class QuickGELU(nn.Module):
116
+ # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
117
+ def forward(self, x: torch.Tensor):
118
+ return x * torch.sigmoid(1.702 * x)
119
+
120
+
121
+ class LayerScale(nn.Module):
122
+ def __init__(self, dim, init_values=1e-5, inplace=False):
123
+ super().__init__()
124
+ self.inplace = inplace
125
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
126
+
127
+ def forward(self, x):
128
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
129
+
130
+ class PatchDropout(nn.Module):
131
+ """
132
+ https://arxiv.org/abs/2212.00794
133
+ """
134
+
135
+ def __init__(self, prob, exclude_first_token=True):
136
+ super().__init__()
137
+ assert 0 <= prob < 1.
138
+ self.prob = prob
139
+ self.exclude_first_token = exclude_first_token # exclude CLS token
140
+ logging.info(f"os.getenv('RoPE')={os.getenv('RoPE')}")
141
+
142
+ def forward(self, x):
143
+ if not self.training or self.prob == 0.:
144
+ return x
145
+
146
+ if self.exclude_first_token:
147
+ cls_tokens, x = x[:, :1], x[:, 1:]
148
+ else:
149
+ cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
150
+
151
+ batch = x.size()[0]
152
+ num_tokens = x.size()[1]
153
+
154
+ batch_indices = torch.arange(batch)
155
+ batch_indices = batch_indices[..., None]
156
+
157
+ keep_prob = 1 - self.prob
158
+ num_patches_keep = max(1, int(num_tokens * keep_prob))
159
+
160
+ rand = torch.randn(batch, num_tokens)
161
+ patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
162
+
163
+ x = x[batch_indices, patch_indices_keep]
164
+
165
+ if self.exclude_first_token:
166
+ x = torch.cat((cls_tokens, x), dim=1)
167
+
168
+ if self.training and os.getenv('RoPE') == '1':
169
+ return x, patch_indices_keep
170
+
171
+ return x
172
+
173
+
174
+ def _in_projection_packed(
175
+ q: torch.Tensor,
176
+ k: torch.Tensor,
177
+ v: torch.Tensor,
178
+ w: torch.Tensor,
179
+ b: Optional[torch.Tensor] = None,
180
+ ):
181
+ """
182
+ https://github.com/pytorch/pytorch/blob/db2a237763eb8693a20788be94f8c192e762baa8/torch/nn/functional.py#L4726
183
+ """
184
+ E = q.size(-1)
185
+ if k is v:
186
+ if q is k:
187
+ # self-attention
188
+ return F.linear(q, w, b).chunk(3, dim=-1)
189
+ else:
190
+ # encoder-decoder attention
191
+ w_q, w_kv = w.split([E, E * 2])
192
+ if b is None:
193
+ b_q = b_kv = None
194
+ else:
195
+ b_q, b_kv = b.split([E, E * 2])
196
+ return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1)
197
+ else:
198
+ w_q, w_k, w_v = w.chunk(3)
199
+ if b is None:
200
+ b_q = b_k = b_v = None
201
+ else:
202
+ b_q, b_k, b_v = b.chunk(3)
203
+ return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)
204
+
205
+ class Attention(nn.Module):
206
+ def __init__(
207
+ self,
208
+ dim,
209
+ num_heads=8,
210
+ qkv_bias=True,
211
+ scaled_cosine=False,
212
+ scale_heads=False,
213
+ logit_scale_max=math.log(1. / 0.01),
214
+ attn_drop=0.,
215
+ proj_drop=0.,
216
+ xattn=False,
217
+ rope=False
218
+ ):
219
+ super().__init__()
220
+ self.scaled_cosine = scaled_cosine
221
+ self.scale_heads = scale_heads
222
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
223
+ self.num_heads = num_heads
224
+ self.head_dim = dim // num_heads
225
+ self.scale = self.head_dim ** -0.5
226
+ self.logit_scale_max = logit_scale_max
227
+
228
+ # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
229
+ self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
230
+ if qkv_bias:
231
+ self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
232
+ else:
233
+ self.in_proj_bias = None
234
+
235
+ if self.scaled_cosine:
236
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
237
+ else:
238
+ self.logit_scale = None
239
+ self.attn_drop = nn.Dropout(attn_drop)
240
+ if self.scale_heads:
241
+ self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
242
+ else:
243
+ self.head_scale = None
244
+ self.out_proj = nn.Linear(dim, dim)
245
+ self.out_drop = nn.Dropout(proj_drop)
246
+ self.xattn = xattn
247
+ self.xattn_drop = attn_drop
248
+ self.rope = rope
249
+
250
+ def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
251
+ L, N, C = x.shape
252
+ q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
253
+ if self.xattn:
254
+ q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
255
+ k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
256
+ v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
257
+
258
+ x = xops.memory_efficient_attention(
259
+ q, k, v,
260
+ p=self.xattn_drop,
261
+ scale=self.scale if self.logit_scale is None else None,
262
+ attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None,
263
+ )
264
+ else:
265
+ q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
266
+ k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
267
+ v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
268
+
269
+ if self.logit_scale is not None:
270
+ attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
271
+ logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
272
+ attn = attn.view(N, self.num_heads, L, L) * logit_scale
273
+ attn = attn.view(-1, L, L)
274
+ else:
275
+ q = q * self.scale
276
+ attn = torch.bmm(q, k.transpose(-1, -2))
277
+
278
+ if attn_mask is not None:
279
+ if attn_mask.dtype == torch.bool:
280
+ new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
281
+ new_attn_mask.masked_fill_(attn_mask, float("-inf"))
282
+ attn_mask = new_attn_mask
283
+ attn += attn_mask
284
+
285
+ attn = attn.softmax(dim=-1)
286
+ attn = self.attn_drop(attn)
287
+
288
+ x = torch.bmm(attn, v)
289
+
290
+ if self.head_scale is not None:
291
+ x = x.view(N, self.num_heads, L, C) * self.head_scale
292
+ x = x.view(-1, L, C)
293
+ x = x.transpose(0, 1).reshape(L, N, C)
294
+ x = self.out_proj(x)
295
+ x = self.out_drop(x)
296
+ return x
297
+
298
+ class CustomAttention(nn.Module):
299
+ def __init__(
300
+ self,
301
+ dim,
302
+ num_heads=8,
303
+ qkv_bias=True,
304
+ scaled_cosine=True,
305
+ scale_heads=False,
306
+ logit_scale_max=math.log(1. / 0.01),
307
+ attn_drop=0.,
308
+ proj_drop=0.,
309
+ xattn=False
310
+ ):
311
+ super().__init__()
312
+ self.scaled_cosine = scaled_cosine
313
+ self.scale_heads = scale_heads
314
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
315
+ self.num_heads = num_heads
316
+ self.head_dim = dim // num_heads
317
+ self.scale = self.head_dim ** -0.5
318
+ self.logit_scale_max = logit_scale_max
319
+
320
+ # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
321
+ self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
322
+ if qkv_bias:
323
+ self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
324
+ else:
325
+ self.in_proj_bias = None
326
+
327
+ if self.scaled_cosine:
328
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
329
+ else:
330
+ self.logit_scale = None
331
+ self.attn_drop = nn.Dropout(attn_drop)
332
+ if self.scale_heads:
333
+ self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
334
+ else:
335
+ self.head_scale = None
336
+ self.out_proj = nn.Linear(dim, dim)
337
+ self.out_drop = nn.Dropout(proj_drop)
338
+ self.xattn = xattn
339
+ self.xattn_drop = attn_drop
340
+
341
+ def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
342
+ q, k, v = _in_projection_packed(query, key, value, self.in_proj_weight, self.in_proj_bias)
343
+ N_q, B_q, C_q = q.shape
344
+ N_k, B_k, C_k = k.shape
345
+ N_v, B_v, C_v = v.shape
346
+ if self.xattn:
347
+ # B, N, C -> B, N, num_heads, C
348
+ q = q.permute(1, 0, 2).reshape(B_q, N_q, self.num_heads, -1)
349
+ k = k.permute(1, 0, 2).reshape(B_k, N_k, self.num_heads, -1)
350
+ v = v.permute(1, 0, 2).reshape(B_v, N_v, self.num_heads, -1)
351
+
352
+ x = xops.memory_efficient_attention(
353
+ q, k, v,
354
+ p=self.xattn_drop,
355
+ scale=self.scale if self.logit_scale is None else None,
356
+ attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None
357
+ )
358
+ else:
359
+ # B*H, L, C
360
+ q = q.contiguous().view(N_q, B_q * self.num_heads, -1).transpose(0, 1)
361
+ k = k.contiguous().view(N_k, B_k * self.num_heads, -1).transpose(0, 1)
362
+ v = v.contiguous().view(N_v, B_v * self.num_heads, -1).transpose(0, 1)
363
+
364
+ if self.logit_scale is not None:
365
+ # B*H, N_q, N_k
366
+ attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
367
+ logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
368
+ attn = attn.view(B_q, self.num_heads, N_q, N_k) * logit_scale
369
+ attn = attn.view(-1, N_q, N_k)
370
+ else:
371
+ q = q * self.scale
372
+ attn = torch.bmm(q, k.transpose(-1, -2))
373
+
374
+ if attn_mask is not None:
375
+ if attn_mask.dtype == torch.bool:
376
+ new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
377
+ new_attn_mask.masked_fill_(attn_mask, float("-inf"))
378
+ attn_mask = new_attn_mask
379
+ attn += attn_mask
380
+
381
+ attn = attn.softmax(dim=-1)
382
+ attn = self.attn_drop(attn)
383
+
384
+ x = torch.bmm(attn, v)
385
+
386
+ if self.head_scale is not None:
387
+ x = x.view(B_q, self.num_heads, N_q, C_q) * self.head_scale
388
+ x = x.view(-1, N_q, C_q)
389
+ x = x.transpose(0, 1).reshape(N_q, B_q, C_q)
390
+ x = self.out_proj(x)
391
+ x = self.out_drop(x)
392
+ return x
393
+
394
+ class CustomResidualAttentionBlock(nn.Module):
395
+ def __init__(
396
+ self,
397
+ d_model: int,
398
+ n_head: int,
399
+ mlp_ratio: float = 4.0,
400
+ ls_init_value: float = None,
401
+ act_layer: Callable = nn.GELU,
402
+ norm_layer: Callable = LayerNorm,
403
+ scale_cosine_attn: bool = False,
404
+ scale_heads: bool = False,
405
+ scale_attn: bool = False,
406
+ scale_fc: bool = False,
407
+ cross_attn: bool = False,
408
+ xattn: bool = False,
409
+ ):
410
+ super().__init__()
411
+
412
+ self.ln_1 = norm_layer(d_model)
413
+ self.ln_1_k = norm_layer(d_model) if cross_attn else self.ln_1
414
+ self.ln_1_v = norm_layer(d_model) if cross_attn else self.ln_1
415
+ self.attn = CustomAttention(
416
+ d_model, n_head,
417
+ qkv_bias=True,
418
+ attn_drop=0.,
419
+ proj_drop=0.,
420
+ scaled_cosine=scale_cosine_attn,
421
+ scale_heads=scale_heads,
422
+ xattn=xattn
423
+ )
424
+
425
+ self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
426
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
427
+
428
+ self.ln_2 = norm_layer(d_model)
429
+ mlp_width = int(d_model * mlp_ratio)
430
+ self.mlp = nn.Sequential(OrderedDict([
431
+ ("c_fc", nn.Linear(d_model, mlp_width)),
432
+ ('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
433
+ ("gelu", act_layer()),
434
+ ("c_proj", nn.Linear(mlp_width, d_model))
435
+ ]))
436
+
437
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
438
+
439
+ def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
440
+ q = q + self.ls_1(self.ln_attn(self.attn(self.ln_1(q), self.ln_1_k(k), self.ln_1_v(v), attn_mask=attn_mask)))
441
+ q = q + self.ls_2(self.mlp(self.ln_2(q)))
442
+ return q
443
+
444
+ class CustomTransformer(nn.Module):
445
+ def __init__(
446
+ self,
447
+ width: int,
448
+ layers: int,
449
+ heads: int,
450
+ mlp_ratio: float = 4.0,
451
+ ls_init_value: float = None,
452
+ act_layer: Callable = nn.GELU,
453
+ norm_layer: Callable = LayerNorm,
454
+ scale_cosine_attn: bool = True,
455
+ scale_heads: bool = False,
456
+ scale_attn: bool = False,
457
+ scale_fc: bool = False,
458
+ cross_attn: bool = False,
459
+ xattn: bool = False,
460
+ ):
461
+ super().__init__()
462
+ self.width = width
463
+ self.layers = layers
464
+ self.grad_checkpointing = False
465
+ self.xattn = xattn
466
+
467
+ self.resblocks = nn.ModuleList([
468
+ CustomResidualAttentionBlock(
469
+ width,
470
+ heads,
471
+ mlp_ratio,
472
+ ls_init_value=ls_init_value,
473
+ act_layer=act_layer,
474
+ norm_layer=norm_layer,
475
+ scale_cosine_attn=scale_cosine_attn,
476
+ scale_heads=scale_heads,
477
+ scale_attn=scale_attn,
478
+ scale_fc=scale_fc,
479
+ cross_attn=cross_attn,
480
+ xattn=xattn)
481
+ for _ in range(layers)
482
+ ])
483
+
484
+ def get_cast_dtype(self) -> torch.dtype:
485
+ return self.resblocks[0].mlp.c_fc.weight.dtype
486
+
487
+ def forward(self, q: torch.Tensor, k: torch.Tensor = None, v: torch.Tensor = None, attn_mask: Optional[torch.Tensor] = None):
488
+ if k is None and v is None:
489
+ k = v = q
490
+ for r in self.resblocks:
491
+ if self.grad_checkpointing and not torch.jit.is_scripting():
492
+ q = checkpoint(r, q, k, v, attn_mask)
493
+ else:
494
+ q = r(q, k, v, attn_mask=attn_mask)
495
+ return q
496
+
497
+
498
+ class ResidualAttentionBlock(nn.Module):
499
+ def __init__(
500
+ self,
501
+ d_model: int,
502
+ n_head: int,
503
+ mlp_ratio: float = 4.0,
504
+ ls_init_value: float = None,
505
+ act_layer: Callable = nn.GELU,
506
+ norm_layer: Callable = LayerNorm,
507
+ xattn: bool = False,
508
+ ):
509
+ super().__init__()
510
+
511
+ self.ln_1 = norm_layer(d_model)
512
+ if xattn:
513
+ self.attn = Attention(d_model, n_head, xattn=True)
514
+ else:
515
+ self.attn = nn.MultiheadAttention(d_model, n_head)
516
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
517
+
518
+ self.ln_2 = norm_layer(d_model)
519
+ mlp_width = int(d_model * mlp_ratio)
520
+ self.mlp = nn.Sequential(OrderedDict([
521
+ ("c_fc", nn.Linear(d_model, mlp_width)),
522
+ ("gelu", act_layer()),
523
+ ("c_proj", nn.Linear(mlp_width, d_model))
524
+ ]))
525
+
526
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
527
+ self.xattn = xattn
528
+
529
+ def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
530
+ attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
531
+ if self.xattn:
532
+ return self.attn(x, attn_mask=attn_mask)
533
+ return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
534
+
535
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
536
+ x = x + self.ls_1(self.attention(self.ln_1(x), attn_mask=attn_mask))
537
+ x = x + self.ls_2(self.mlp(self.ln_2(x)))
538
+ return x
539
+
540
+ class Transformer(nn.Module):
541
+ def __init__(
542
+ self,
543
+ width: int,
544
+ layers: int,
545
+ heads: int,
546
+ mlp_ratio: float = 4.0,
547
+ ls_init_value: float = None,
548
+ act_layer: Callable = nn.GELU,
549
+ norm_layer: Callable = LayerNorm,
550
+ xattn: bool = False,
551
+ ):
552
+ super().__init__()
553
+ self.width = width
554
+ self.layers = layers
555
+ self.grad_checkpointing = False
556
+
557
+ self.resblocks = nn.ModuleList([
558
+ ResidualAttentionBlock(
559
+ width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn)
560
+ for _ in range(layers)
561
+ ])
562
+
563
+ def get_cast_dtype(self) -> torch.dtype:
564
+ return self.resblocks[0].mlp.c_fc.weight.dtype
565
+
566
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
567
+ for r in self.resblocks:
568
+ if self.grad_checkpointing and not torch.jit.is_scripting():
569
+ x = checkpoint(r, x, attn_mask)
570
+ else:
571
+ x = r(x, attn_mask=attn_mask)
572
+ return x
573
+
574
+
575
+ class VisionTransformer(nn.Module):
576
+ def __init__(
577
+ self,
578
+ image_size: int,
579
+ patch_size: int,
580
+ width: int,
581
+ layers: int,
582
+ heads: int,
583
+ mlp_ratio: float,
584
+ ls_init_value: float = None,
585
+ patch_dropout: float = 0.,
586
+ global_average_pool: bool = False,
587
+ output_dim: int = 512,
588
+ act_layer: Callable = nn.GELU,
589
+ norm_layer: Callable = LayerNorm,
590
+ xattn: bool = False,
591
+ ):
592
+ super().__init__()
593
+ self.image_size = to_2tuple(image_size)
594
+ self.patch_size = to_2tuple(patch_size)
595
+ self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1])
596
+ self.output_dim = output_dim
597
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
598
+
599
+ scale = width ** -0.5
600
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
601
+ self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
602
+
603
+ # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
604
+ self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
605
+ self.ln_pre = norm_layer(width)
606
+
607
+ self.transformer = Transformer(
608
+ width,
609
+ layers,
610
+ heads,
611
+ mlp_ratio,
612
+ ls_init_value=ls_init_value,
613
+ act_layer=act_layer,
614
+ norm_layer=norm_layer,
615
+ xattn=xattn
616
+ )
617
+
618
+ self.global_average_pool = global_average_pool
619
+ self.ln_post = norm_layer(width)
620
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
621
+
622
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
623
+ for param in self.parameters():
624
+ param.requires_grad = False
625
+
626
+ if unlocked_groups != 0:
627
+ groups = [
628
+ [
629
+ self.conv1,
630
+ self.class_embedding,
631
+ self.positional_embedding,
632
+ self.ln_pre,
633
+ ],
634
+ *self.transformer.resblocks[:-1],
635
+ [
636
+ self.transformer.resblocks[-1],
637
+ self.ln_post,
638
+ ],
639
+ self.proj,
640
+ ]
641
+
642
+ def _unlock(x):
643
+ if isinstance(x, Sequence):
644
+ for g in x:
645
+ _unlock(g)
646
+ else:
647
+ if isinstance(x, torch.nn.Parameter):
648
+ x.requires_grad = True
649
+ else:
650
+ for p in x.parameters():
651
+ p.requires_grad = True
652
+
653
+ _unlock(groups[-unlocked_groups:])
654
+
655
+ def get_num_layers(self):
656
+ return self.transformer.layers
657
+
658
+ @torch.jit.ignore
659
+ def set_grad_checkpointing(self, enable=True):
660
+ self.transformer.grad_checkpointing = enable
661
+
662
+ @torch.jit.ignore
663
+ def no_weight_decay(self):
664
+ return {'positional_embedding', 'class_embedding'}
665
+
666
+ def forward(self, x: torch.Tensor, return_all_features: bool=False):
667
+ x = self.conv1(x) # shape = [*, width, grid, grid]
668
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
669
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
670
+ x = torch.cat(
671
+ [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
672
+ x], dim=1) # shape = [*, grid ** 2 + 1, width]
673
+ x = x + self.positional_embedding.to(x.dtype)
674
+
675
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
676
+ x = self.patch_dropout(x)
677
+ x = self.ln_pre(x)
678
+
679
+ x = x.permute(1, 0, 2) # NLD -> LND
680
+ x = self.transformer(x)
681
+ x = x.permute(1, 0, 2) # LND -> NLD
682
+
683
+ if not return_all_features:
684
+ if self.global_average_pool:
685
+ x = x.mean(dim=1) #x = x[:,1:,:].mean(dim=1)
686
+ else:
687
+ x = x[:, 0]
688
+
689
+ x = self.ln_post(x)
690
+
691
+ if self.proj is not None:
692
+ x = x @ self.proj
693
+
694
+ return x
695
+
696
+
697
+ class TextTransformer(nn.Module):
698
+ def __init__(
699
+ self,
700
+ context_length: int = 77,
701
+ vocab_size: int = 49408,
702
+ width: int = 512,
703
+ heads: int = 8,
704
+ layers: int = 12,
705
+ ls_init_value: float = None,
706
+ output_dim: int = 512,
707
+ act_layer: Callable = nn.GELU,
708
+ norm_layer: Callable = LayerNorm,
709
+ xattn: bool= False,
710
+ attn_mask: bool = True
711
+ ):
712
+ super().__init__()
713
+ self.context_length = context_length
714
+ self.vocab_size = vocab_size
715
+ self.width = width
716
+ self.output_dim = output_dim
717
+
718
+ self.token_embedding = nn.Embedding(vocab_size, width)
719
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
720
+ self.transformer = Transformer(
721
+ width=width,
722
+ layers=layers,
723
+ heads=heads,
724
+ ls_init_value=ls_init_value,
725
+ act_layer=act_layer,
726
+ norm_layer=norm_layer,
727
+ xattn=xattn
728
+ )
729
+
730
+ self.xattn = xattn
731
+ self.ln_final = norm_layer(width)
732
+ self.text_projection = nn.Parameter(torch.empty(width, output_dim))
733
+
734
+ if attn_mask:
735
+ self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
736
+ else:
737
+ self.attn_mask = None
738
+
739
+ self.init_parameters()
740
+
741
+ def init_parameters(self):
742
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
743
+ nn.init.normal_(self.positional_embedding, std=0.01)
744
+
745
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
746
+ attn_std = self.transformer.width ** -0.5
747
+ fc_std = (2 * self.transformer.width) ** -0.5
748
+ for block in self.transformer.resblocks:
749
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
750
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
751
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
752
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
753
+
754
+ if self.text_projection is not None:
755
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
756
+
757
+ @torch.jit.ignore
758
+ def set_grad_checkpointing(self, enable=True):
759
+ self.transformer.grad_checkpointing = enable
760
+
761
+ @torch.jit.ignore
762
+ def no_weight_decay(self):
763
+ # return {'positional_embedding', 'token_embedding'}
764
+ return {'positional_embedding'}
765
+
766
+ def get_num_layers(self):
767
+ return self.transformer.layers
768
+
769
+ def build_attention_mask(self):
770
+ # lazily create causal attention mask, with full attention between the vision tokens
771
+ # pytorch uses additive attention mask; fill with -inf
772
+ mask = torch.empty(self.context_length, self.context_length)
773
+ mask.fill_(float("-inf"))
774
+ mask.triu_(1) # zero out the lower diagonal
775
+ return mask
776
+
777
+ def forward(self, text, return_all_features: bool=False):
778
+ cast_dtype = self.transformer.get_cast_dtype()
779
+ x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
780
+
781
+ x = x + self.positional_embedding.to(cast_dtype)
782
+ x = x.permute(1, 0, 2) # NLD -> LND
783
+ x = self.transformer(x, attn_mask=self.attn_mask)
784
+ # x = self.transformer(x) # no attention mask is applied
785
+ x = x.permute(1, 0, 2) # LND -> NLD
786
+ x = self.ln_final(x)
787
+
788
+ if not return_all_features:
789
+ # x.shape = [batch_size, n_ctx, transformer.width]
790
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
791
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
792
+ return x