sathvika181204 commited on
Commit
90c4d4b
1 Parent(s): 2c419f2

Upload modeling_intern_vit.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_intern_vit.py +429 -0
modeling_intern_vit.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ from typing import Optional, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import torch.utils.checkpoint
11
+ from einops import rearrange
12
+ from timm.models.layers import DropPath
13
+ from torch import nn
14
+ from transformers.activations import ACT2FN
15
+ from transformers.modeling_outputs import (BaseModelOutput,
16
+ BaseModelOutputWithPooling)
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import logging
19
+
20
+ from .configuration_intern_vit import InternVisionConfig
21
+
22
+ try:
23
+ from flash_attn.bert_padding import pad_input, unpad_input
24
+ from flash_attn.flash_attn_interface import \
25
+ flash_attn_varlen_qkvpacked_func
26
+ has_flash_attn = True
27
+ except:
28
+ print('FlashAttention2 is not installed.')
29
+ has_flash_attn = False
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ class FlashAttention(nn.Module):
35
+ """Implement the scaled dot product attention with softmax.
36
+ Arguments
37
+ ---------
38
+ softmax_scale: The temperature to use for the softmax attention.
39
+ (default: 1/sqrt(d_keys) where d_keys is computed at
40
+ runtime)
41
+ attention_dropout: The dropout rate to apply to the attention
42
+ (default: 0.0)
43
+ """
44
+
45
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
46
+ super().__init__()
47
+ self.softmax_scale = softmax_scale
48
+ self.dropout_p = attention_dropout
49
+
50
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
51
+ max_s=None, need_weights=False):
52
+ """Implements the multihead softmax attention.
53
+ Arguments
54
+ ---------
55
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
56
+ if unpadded: (nnz, 3, h, d)
57
+ key_padding_mask: a bool tensor of shape (B, S)
58
+ """
59
+ assert not need_weights
60
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
61
+ assert qkv.is_cuda
62
+
63
+ if cu_seqlens is None:
64
+ batch_size = qkv.shape[0]
65
+ seqlen = qkv.shape[1]
66
+ if key_padding_mask is None:
67
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
68
+ max_s = seqlen
69
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
70
+ device=qkv.device)
71
+ output = flash_attn_varlen_qkvpacked_func(
72
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
+ softmax_scale=self.softmax_scale, causal=causal
74
+ )
75
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
76
+ else:
77
+ nheads = qkv.shape[-2]
78
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
79
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
80
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
81
+ output_unpad = flash_attn_varlen_qkvpacked_func(
82
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
83
+ softmax_scale=self.softmax_scale, causal=causal
84
+ )
85
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
86
+ indices, batch_size, seqlen),
87
+ 'b s (h d) -> b s h d', h=nheads)
88
+ else:
89
+ assert max_s is not None
90
+ output = flash_attn_varlen_qkvpacked_func(
91
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
92
+ softmax_scale=self.softmax_scale, causal=causal
93
+ )
94
+
95
+ return output, None
96
+
97
+
98
+ class InternRMSNorm(nn.Module):
99
+ def __init__(self, hidden_size, eps=1e-6):
100
+ super().__init__()
101
+ self.weight = nn.Parameter(torch.ones(hidden_size))
102
+ self.variance_epsilon = eps
103
+
104
+ def forward(self, hidden_states):
105
+ input_dtype = hidden_states.dtype
106
+ hidden_states = hidden_states.to(torch.float32)
107
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
108
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
109
+ return self.weight * hidden_states.to(input_dtype)
110
+
111
+
112
+ try:
113
+ from apex.normalization import FusedRMSNorm
114
+
115
+ InternRMSNorm = FusedRMSNorm # noqa
116
+
117
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
118
+ except ImportError:
119
+ # using the normal InternRMSNorm
120
+ pass
121
+ except Exception:
122
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
123
+ pass
124
+
125
+
126
+ NORM2FN = {
127
+ 'rms_norm': InternRMSNorm,
128
+ 'layer_norm': nn.LayerNorm,
129
+ }
130
+
131
+
132
+ class InternVisionEmbeddings(nn.Module):
133
+ def __init__(self, config: InternVisionConfig):
134
+ super().__init__()
135
+ self.config = config
136
+ self.embed_dim = config.hidden_size
137
+ self.image_size = config.image_size
138
+ self.patch_size = config.patch_size
139
+
140
+ self.class_embedding = nn.Parameter(
141
+ torch.randn(1, 1, self.embed_dim),
142
+ )
143
+
144
+ self.patch_embedding = nn.Conv2d(
145
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
146
+ )
147
+
148
+ self.num_patches = (self.image_size // self.patch_size) ** 2
149
+ self.num_positions = self.num_patches + 1
150
+
151
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
152
+
153
+ def _get_pos_embed(self, pos_embed, H, W):
154
+ target_dtype = pos_embed.dtype
155
+ pos_embed = pos_embed.float().reshape(
156
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
157
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
158
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
159
+ return pos_embed
160
+
161
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
162
+ target_dtype = self.patch_embedding.weight.dtype
163
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
164
+ batch_size, _, height, width = patch_embeds.shape
165
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
166
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
167
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
168
+ position_embedding = torch.cat([
169
+ self.position_embedding[:, :1, :],
170
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
171
+ ], dim=1)
172
+ embeddings = embeddings + position_embedding.to(target_dtype)
173
+ return embeddings
174
+
175
+
176
+ class InternAttention(nn.Module):
177
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
178
+
179
+ def __init__(self, config: InternVisionConfig):
180
+ super().__init__()
181
+ self.config = config
182
+ self.embed_dim = config.hidden_size
183
+ self.num_heads = config.num_attention_heads
184
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
185
+ if config.use_flash_attn and not has_flash_attn:
186
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
187
+ self.head_dim = self.embed_dim // self.num_heads
188
+ if self.head_dim * self.num_heads != self.embed_dim:
189
+ raise ValueError(
190
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
191
+ f' {self.num_heads}).'
192
+ )
193
+
194
+ self.scale = self.head_dim ** -0.5
195
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
196
+ self.attn_drop = nn.Dropout(config.attention_dropout)
197
+ self.proj_drop = nn.Dropout(config.dropout)
198
+
199
+ self.qk_normalization = config.qk_normalization
200
+
201
+ if self.qk_normalization:
202
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
203
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
204
+
205
+ if self.use_flash_attn:
206
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
207
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
208
+
209
+ def _naive_attn(self, x):
210
+ B, N, C = x.shape
211
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
212
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
213
+
214
+ if self.qk_normalization:
215
+ B_, H_, N_, D_ = q.shape
216
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
217
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
218
+
219
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
220
+ attn = attn.softmax(dim=-1)
221
+ attn = self.attn_drop(attn)
222
+
223
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
224
+ x = self.proj(x)
225
+ x = self.proj_drop(x)
226
+ return x
227
+
228
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
229
+ qkv = self.qkv(x)
230
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
231
+
232
+ if self.qk_normalization:
233
+ q, k, v = qkv.unbind(2)
234
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
235
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
236
+ qkv = torch.stack([q, k, v], dim=2)
237
+
238
+ context, _ = self.inner_attn(
239
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
240
+ )
241
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
242
+ outs = self.proj_drop(outs)
243
+ return outs
244
+
245
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
246
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
247
+ return x
248
+
249
+
250
+ class InternMLP(nn.Module):
251
+ def __init__(self, config: InternVisionConfig):
252
+ super().__init__()
253
+ self.config = config
254
+ self.act = ACT2FN[config.hidden_act]
255
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
256
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
257
+
258
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
259
+ hidden_states = self.fc1(hidden_states)
260
+ hidden_states = self.act(hidden_states)
261
+ hidden_states = self.fc2(hidden_states)
262
+ return hidden_states
263
+
264
+
265
+ class InternVisionEncoderLayer(nn.Module):
266
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
267
+ super().__init__()
268
+ self.embed_dim = config.hidden_size
269
+ self.intermediate_size = config.intermediate_size
270
+ self.norm_type = config.norm_type
271
+
272
+ self.attn = InternAttention(config)
273
+ self.mlp = InternMLP(config)
274
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
275
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
276
+
277
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
278
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
279
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
280
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
281
+
282
+ def forward(
283
+ self,
284
+ hidden_states: torch.Tensor,
285
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
286
+ """
287
+ Args:
288
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
289
+ """
290
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states).to(hidden_states.dtype)) * self.ls1)
291
+
292
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states).to(hidden_states.dtype)) * self.ls2)
293
+
294
+ return hidden_states
295
+
296
+
297
+ class InternVisionEncoder(nn.Module):
298
+ """
299
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
300
+ [`InternEncoderLayer`].
301
+
302
+ Args:
303
+ config (`InternConfig`):
304
+ The corresponding vision configuration for the `InternEncoder`.
305
+ """
306
+
307
+ def __init__(self, config: InternVisionConfig):
308
+ super().__init__()
309
+ self.config = config
310
+ # stochastic depth decay rule
311
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
312
+ self.layers = nn.ModuleList([
313
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
314
+ self.gradient_checkpointing = True
315
+
316
+ def forward(
317
+ self,
318
+ inputs_embeds,
319
+ output_hidden_states: Optional[bool] = None,
320
+ return_dict: Optional[bool] = None,
321
+ ) -> Union[Tuple, BaseModelOutput]:
322
+ r"""
323
+ Args:
324
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
325
+ Embedded representation of the inputs. Should be float, not int tokens.
326
+ output_hidden_states (`bool`, *optional*):
327
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
328
+ for more detail.
329
+ return_dict (`bool`, *optional*):
330
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
331
+ """
332
+ output_hidden_states = (
333
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
334
+ )
335
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
336
+
337
+ encoder_states = () if output_hidden_states else None
338
+ hidden_states = inputs_embeds
339
+
340
+ for idx, encoder_layer in enumerate(self.layers):
341
+ if output_hidden_states:
342
+ encoder_states = encoder_states + (hidden_states,)
343
+ if self.gradient_checkpointing and self.training:
344
+ layer_outputs = torch.utils.checkpoint.checkpoint(
345
+ encoder_layer,
346
+ hidden_states)
347
+ else:
348
+ layer_outputs = encoder_layer(
349
+ hidden_states,
350
+ )
351
+ hidden_states = layer_outputs
352
+
353
+ if output_hidden_states:
354
+ encoder_states = encoder_states + (hidden_states,)
355
+
356
+ if not return_dict:
357
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
358
+ return BaseModelOutput(
359
+ last_hidden_state=hidden_states, hidden_states=encoder_states
360
+ )
361
+
362
+
363
+ class InternVisionModel(PreTrainedModel):
364
+ main_input_name = 'pixel_values'
365
+ _supports_flash_attn_2 = True
366
+ config_class = InternVisionConfig
367
+ _no_split_modules = ['InternVisionEncoderLayer']
368
+
369
+ def __init__(self, config: InternVisionConfig):
370
+ super().__init__(config)
371
+ self.config = config
372
+
373
+ self.embeddings = InternVisionEmbeddings(config)
374
+ self.encoder = InternVisionEncoder(config)
375
+
376
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
377
+ pos_emb = self.embeddings.position_embedding
378
+ _, num_positions, embed_dim = pos_emb.shape
379
+ cls_emb = pos_emb[:, :1, :]
380
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
381
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
382
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
383
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
384
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
385
+ self.embeddings.image_size = new_size
386
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
387
+
388
+ def get_input_embeddings(self):
389
+ return self.embeddings
390
+
391
+ def forward(
392
+ self,
393
+ pixel_values: Optional[torch.FloatTensor] = None,
394
+ output_hidden_states: Optional[bool] = None,
395
+ return_dict: Optional[bool] = None,
396
+ pixel_embeds: Optional[torch.FloatTensor] = None,
397
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
398
+ output_hidden_states = (
399
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
400
+ )
401
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
402
+
403
+ if pixel_values is None and pixel_embeds is None:
404
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
405
+
406
+ if pixel_embeds is not None:
407
+ hidden_states = pixel_embeds
408
+ else:
409
+ if len(pixel_values.shape) == 4:
410
+ hidden_states = self.embeddings(pixel_values)
411
+ else:
412
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
413
+ encoder_outputs = self.encoder(
414
+ inputs_embeds=hidden_states,
415
+ output_hidden_states=output_hidden_states,
416
+ return_dict=return_dict,
417
+ )
418
+ last_hidden_state = encoder_outputs.last_hidden_state
419
+ pooled_output = last_hidden_state[:, 0, :]
420
+
421
+ if not return_dict:
422
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
423
+
424
+ return BaseModelOutputWithPooling(
425
+ last_hidden_state=last_hidden_state,
426
+ pooler_output=pooled_output,
427
+ hidden_states=encoder_outputs.hidden_states,
428
+ attentions=encoder_outputs.attentions,
429
+ )