hugohrban commited on
Commit
beb141c
1 Parent(s): 2029a8c

Upload ProGenForCausalLM

Browse files
Files changed (2) hide show
  1. config.json +21 -1
  2. modeling_progen.py +764 -0
config.json CHANGED
@@ -1,8 +1,13 @@
1
  {
 
2
  "activation_function": "gelu_new",
 
 
 
3
  "attn_pdrop": 0.0,
4
  "auto_map": {
5
- "AutoConfig": "configuration_progen.ProGenConfig"
 
6
  },
7
  "bos_token_id": 1,
8
  "embd_pdrop": 0.0,
@@ -19,8 +24,23 @@
19
  "resid_pdrop": 0.0,
20
  "rotary_dim": 32,
21
  "scale_attn_weights": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  "transformers_version": "4.40.0",
23
  "use_cache": true,
 
24
  "vocab_size_emb": 32,
25
  "vocab_size_lm_head": 32
26
  }
 
1
  {
2
+ "_name_or_path": "checkpoints/progen2-small",
3
  "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "ProGenForCausalLM"
6
+ ],
7
  "attn_pdrop": 0.0,
8
  "auto_map": {
9
+ "AutoConfig": "configuration_progen.ProGenConfig",
10
+ "AutoModelForCausalLM": "modeling_progen.ProGenForCausalLM"
11
  },
12
  "bos_token_id": 1,
13
  "embd_pdrop": 0.0,
 
24
  "resid_pdrop": 0.0,
25
  "rotary_dim": 32,
26
  "scale_attn_weights": true,
27
+ "summary_activation": null,
28
+ "summary_first_dropout": 0.1,
29
+ "summary_proj_to_labels": true,
30
+ "summary_type": "cls_index",
31
+ "summary_use_proj": true,
32
+ "task_specific_params": {
33
+ "text-generation": {
34
+ "do_sample": true,
35
+ "max_length": 50,
36
+ "temperature": 1.0
37
+ }
38
+ },
39
+ "tokenizer_class": "GPT2Tokenizer",
40
+ "torch_dtype": "float32",
41
  "transformers_version": "4.40.0",
42
  "use_cache": true,
43
+ "vocab_size": 32,
44
  "vocab_size_emb": 32,
45
  "vocab_size_lm_head": 32
46
  }
modeling_progen.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License atí
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Modified forward-pass implementation based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/gptj/modeling_gptj.py
17
+
18
+ from typing import Tuple
19
+
20
+ import numpy as np
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss
26
+ import torch.nn.functional as F
27
+
28
+ from transformers.activations import ACT2FN
29
+ from transformers.modeling_outputs import (
30
+ BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ )
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import logging
35
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
36
+ from .configuration_progen import ProGenConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+
42
+ def fixed_pos_embedding(x, seq_dim=1, seq_len=None):
43
+ dim = x.shape[-1]
44
+ if seq_len is None:
45
+ seq_len = x.shape[seq_dim]
46
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
47
+ sinusoid_inp = (
48
+ torch.einsum("i , j -> i j", torch.arange(seq_len), inv_freq)
49
+ .to(x.device)
50
+ .float()
51
+ )
52
+ return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
53
+
54
+
55
+ def rotate_every_two(x):
56
+ x1 = x[:, :, :, ::2]
57
+ x2 = x[:, :, :, 1::2]
58
+ x = torch.stack((-x2, x1), axis=-1)
59
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
60
+
61
+
62
+ def apply_rotary_pos_emb(x, sincos, offset=0):
63
+ sin, cos = map(
64
+ lambda t: t[None, offset : x.shape[1] + offset, None, :].repeat_interleave(
65
+ 2, 3
66
+ ),
67
+ sincos,
68
+ )
69
+ # einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
70
+ return (x * cos) + (rotate_every_two(x) * sin)
71
+
72
+
73
+ class ProGenAttention(nn.Module):
74
+ def __init__(self, config):
75
+ super().__init__()
76
+
77
+ max_positions = config.max_position_embeddings
78
+ self.register_buffer(
79
+ "bias",
80
+ torch.tril(
81
+ torch.ones((max_positions, max_positions), dtype=torch.bool)
82
+ ).view(1, 1, max_positions, max_positions),
83
+ )
84
+ self.register_buffer("masked_bias", torch.tensor(-1e9))
85
+
86
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
87
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
88
+
89
+ self.embed_dim = config.hidden_size
90
+ self.num_attention_heads = config.num_attention_heads
91
+ self.head_dim = self.embed_dim // self.num_attention_heads
92
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
93
+ raise ValueError(
94
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and `num_attention_heads`: {self.num_attention_heads})."
95
+ )
96
+ self.scale_attn = torch.sqrt(
97
+ torch.tensor(self.head_dim, dtype=torch.float32)
98
+ ).to(torch.get_default_dtype())
99
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
100
+
101
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
102
+ self.rotary_dim = None
103
+ if config.rotary_dim is not None:
104
+ self.rotary_dim = config.rotary_dim
105
+
106
+ def _split_heads(self, x, n_head, dim_head, mp_num):
107
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
108
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
109
+ return reshaped
110
+
111
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
112
+ """
113
+ Merges attn_head_size dim and num_attn_heads dim into n_positions
114
+ """
115
+ if len(tensor.shape) == 5:
116
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
117
+ elif len(tensor.shape) == 4:
118
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
119
+ else:
120
+ raise ValueError(
121
+ f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}"
122
+ )
123
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
124
+ return tensor.view(new_shape)
125
+
126
+ def _attn(
127
+ self,
128
+ query,
129
+ key,
130
+ value,
131
+ attention_mask=None,
132
+ head_mask=None,
133
+ ):
134
+ # compute causal mask from causal mask buffer
135
+ query_length, key_length = query.size(-2), key.size(-2)
136
+ causal_mask = self.bias[
137
+ :, :, key_length - query_length : key_length, :key_length
138
+ ]
139
+
140
+ # Keep the attention weights computation in fp32 to avoid overflow issues
141
+ query = query.to(torch.float32)
142
+ key = key.to(torch.float32)
143
+ #print("q.shape", query.shape)
144
+ #print("k.shape", key.shape)
145
+ attn_weights = query @ key.transpose(-1, -2)
146
+
147
+ attn_weights = attn_weights / self.scale_attn
148
+ attn_weights = torch.where(
149
+ causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype)
150
+ )
151
+
152
+ if attention_mask is not None:
153
+ # Apply the attention mask
154
+ attn_weights = attn_weights + attention_mask
155
+
156
+ attn_weights = F.softmax(attn_weights, dim=-1)
157
+ attn_weights = attn_weights.to(value.dtype)
158
+ attn_weights = self.attn_dropout(attn_weights)
159
+
160
+ if head_mask is not None:
161
+ attn_weights = attn_weights * head_mask
162
+
163
+ attn_output = attn_weights @ value
164
+
165
+ return attn_output, attn_weights
166
+
167
+ def forward(
168
+ self,
169
+ hidden_states,
170
+ attention_mask=None,
171
+ layer_past=None,
172
+ head_mask=None,
173
+ use_cache=False,
174
+ output_attentions=False,
175
+ ):
176
+ qkv = self.qkv_proj(hidden_states)
177
+ # TODO(enijkamp): factor out number of logical TPU-v3/v4 cores or make forward pass agnostic
178
+ # mp_num = 4
179
+ mp_num = 8
180
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
181
+
182
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
183
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
184
+ query = self._split_heads(
185
+ query, self.num_attention_heads, self.head_dim, mp_num=mp_num
186
+ )
187
+ key = self._split_heads(
188
+ key, self.num_attention_heads, self.head_dim, mp_num=mp_num
189
+ )
190
+
191
+ value = self._split_heads(
192
+ value, self.num_attention_heads, self.head_dim, mp_num=mp_num
193
+ )
194
+ value = value.permute(0, 2, 1, 3)
195
+
196
+ seq_len = key.shape[1]
197
+ offset = 0
198
+
199
+ if layer_past is not None:
200
+ offset = layer_past[0].shape[-2]
201
+ seq_len += offset
202
+
203
+ if self.rotary_dim is not None:
204
+ k_rot = key[:, :, :, : self.rotary_dim]
205
+ k_pass = key[:, :, :, self.rotary_dim :]
206
+
207
+ q_rot = query[:, :, :, : self.rotary_dim]
208
+ q_pass = query[:, :, :, self.rotary_dim :]
209
+
210
+ sincos = fixed_pos_embedding(k_rot, 1, seq_len=seq_len)
211
+ k_rot = apply_rotary_pos_emb(k_rot, sincos, offset=offset)
212
+ q_rot = apply_rotary_pos_emb(q_rot, sincos, offset=offset)
213
+
214
+ key = torch.cat([k_rot, k_pass], dim=-1)
215
+ query = torch.cat([q_rot, q_pass], dim=-1)
216
+ else:
217
+ sincos = fixed_pos_embedding(key, 1, seq_len=seq_len)
218
+ key = apply_rotary_pos_emb(key, sincos, offset=offset)
219
+ query = apply_rotary_pos_emb(query, sincos, offset=offset)
220
+
221
+ key = key.permute(0, 2, 1, 3)
222
+ query = query.permute(0, 2, 1, 3)
223
+
224
+ if layer_past is not None:
225
+ past_key = layer_past[0]
226
+ past_value = layer_past[1]
227
+ key = torch.cat((past_key, key), dim=-2)
228
+ value = torch.cat((past_value, value), dim=-2)
229
+
230
+ if use_cache is True:
231
+ present = (key, value)
232
+ else:
233
+ present = None
234
+
235
+ # compute self-attention: softmax((Q @ K.T) / sqrt(dim_head)) @ V
236
+ attn_output, attn_weights = self._attn(
237
+ query, key, value, attention_mask, head_mask
238
+ )
239
+
240
+ attn_output = self._merge_heads(
241
+ attn_output, self.num_attention_heads, self.head_dim
242
+ )
243
+
244
+ attn_output = self.out_proj(attn_output)
245
+ attn_output = self.resid_dropout(attn_output)
246
+
247
+ outputs = (attn_output, present)
248
+ if output_attentions:
249
+ outputs += (attn_weights,)
250
+
251
+ return outputs # a, present, (attentions)
252
+
253
+
254
+ class ProGenMLP(nn.Module):
255
+ def __init__(
256
+ self, intermediate_size, config
257
+ ): # in MLP: intermediate_size= 4 * embed_dim
258
+ super().__init__()
259
+ embed_dim = config.n_embd
260
+
261
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
262
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
263
+
264
+ self.act = ACT2FN[config.activation_function]
265
+ self.dropout = nn.Dropout(config.resid_pdrop)
266
+
267
+ def forward(self, hidden_states):
268
+ hidden_states = self.fc_in(hidden_states)
269
+ hidden_states = self.act(hidden_states)
270
+ hidden_states = self.fc_out(hidden_states)
271
+ hidden_states = self.dropout(hidden_states)
272
+ return hidden_states
273
+
274
+
275
+ class ProGenBlock(nn.Module):
276
+ def __init__(self, config):
277
+ super().__init__()
278
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
279
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
280
+ self.attn = ProGenAttention(config)
281
+ self.mlp = ProGenMLP(inner_dim, config)
282
+
283
+ def forward(
284
+ self,
285
+ hidden_states,
286
+ layer_past=None,
287
+ attention_mask=None,
288
+ head_mask=None,
289
+ use_cache=False,
290
+ output_attentions=False,
291
+ ):
292
+ residual = hidden_states
293
+ hidden_states = self.ln_1(hidden_states)
294
+ attn_outputs = self.attn(
295
+ hidden_states,
296
+ layer_past=layer_past,
297
+ attention_mask=attention_mask,
298
+ head_mask=head_mask,
299
+ use_cache=use_cache,
300
+ output_attentions=output_attentions,
301
+ )
302
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
303
+ outputs = attn_outputs[1:]
304
+
305
+ feed_forward_hidden_states = self.mlp(hidden_states)
306
+ hidden_states = attn_output + feed_forward_hidden_states + residual
307
+
308
+ if use_cache:
309
+ outputs = (hidden_states,) + outputs
310
+ else:
311
+ outputs = (hidden_states,) + outputs[1:]
312
+
313
+ return outputs # hidden_states, present, (attentions)
314
+
315
+
316
+ class ProGenPreTrainedModel(PreTrainedModel):
317
+ """
318
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
319
+ models.
320
+ """
321
+
322
+ config_class = ProGenConfig
323
+ base_model_prefix = "transformer"
324
+ is_parallelizable = True
325
+
326
+ def __init__(self, *inputs, **kwargs):
327
+ super().__init__(*inputs, **kwargs)
328
+
329
+ def _init_weights(self, module):
330
+ """Initialize the weights."""
331
+ if isinstance(module, (nn.Linear,)):
332
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
333
+ # cf https://github.com/pytorch/pytorch/pull/5617
334
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
335
+ if module.bias is not None:
336
+ module.bias.data.zero_()
337
+ elif isinstance(module, nn.Embedding):
338
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
339
+ if module.padding_idx is not None:
340
+ module.weight.data[module.padding_idx].zero_()
341
+ elif isinstance(module, nn.LayerNorm):
342
+ module.bias.data.zero_()
343
+ module.weight.data.fill_(1.0)
344
+
345
+
346
+ class ProGenModel(ProGenPreTrainedModel):
347
+ def __init__(self, config):
348
+ super().__init__(config)
349
+ self.vocab_size_emb = config.vocab_size_emb
350
+ self.embed_dim = config.n_embd
351
+ self.wte = nn.Embedding(config.vocab_size_emb, self.embed_dim)
352
+ self.drop = nn.Dropout(config.embd_pdrop)
353
+ self.h = nn.ModuleList([ProGenBlock(config) for _ in range(config.n_layer)])
354
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
355
+ self.rotary_dim = min(
356
+ config.rotary_dim, config.n_positions // config.num_attention_heads
357
+ )
358
+ self.init_weights()
359
+
360
+ # Model parallel
361
+ self.model_parallel = False
362
+ self.device_map = None
363
+
364
+ def parallelize(self, device_map=None):
365
+ # Check validity of device_map
366
+ self.device_map = (
367
+ get_device_map(len(self.h), range(torch.cuda.device_count()))
368
+ if device_map is None
369
+ else device_map
370
+ )
371
+ assert_device_map(self.device_map, len(self.h))
372
+ self.model_parallel = True
373
+ self.first_device = (
374
+ "cpu"
375
+ if "cpu" in self.device_map.keys()
376
+ else "cuda:" + str(min(self.device_map.keys()))
377
+ )
378
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
379
+ self.wte = self.wte.to(self.first_device)
380
+ # Load onto devices
381
+ for k, v in self.device_map.items():
382
+ for block in v:
383
+ cuda_device = "cuda:" + str(k)
384
+ self.h[block] = self.h[block].to(cuda_device)
385
+ # ln_f to last
386
+ self.ln_f = self.ln_f.to(self.last_device)
387
+
388
+ def deparallelize(self):
389
+ self.model_parallel = False
390
+ self.device_map = None
391
+ self.first_device = "cpu"
392
+ self.last_device = "cpu"
393
+ self.wte = self.wte.to("cpu")
394
+ for index in range(len(self.h)):
395
+ self.h[index] = self.h[index].to("cpu")
396
+ self.ln_f = self.ln_f.to("cpu")
397
+ torch.cuda.empty_cache()
398
+
399
+ def get_input_embeddings(self):
400
+ return self.wte
401
+
402
+ def set_input_embeddings(self, new_embeddings):
403
+ self.wte = new_embeddings
404
+
405
+ def forward(
406
+ self,
407
+ input_ids=None,
408
+ past_key_values=None,
409
+ attention_mask=None,
410
+ token_type_ids=None,
411
+ position_ids=None,
412
+ head_mask=None,
413
+ inputs_embeds=None,
414
+ use_cache=None,
415
+ output_attentions=None,
416
+ output_hidden_states=None,
417
+ return_dict=None,
418
+ ):
419
+ output_attentions = (
420
+ output_attentions
421
+ if output_attentions is not None
422
+ else self.config.output_attentions
423
+ )
424
+ output_hidden_states = (
425
+ output_hidden_states
426
+ if output_hidden_states is not None
427
+ else self.config.output_hidden_states
428
+ )
429
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
430
+ return_dict = (
431
+ return_dict if return_dict is not None else self.config.use_return_dict
432
+ )
433
+
434
+ if input_ids is not None and inputs_embeds is not None:
435
+ raise ValueError(
436
+ "You cannot specify both input_ids and inputs_embeds at the same time"
437
+ )
438
+ elif input_ids is not None:
439
+ input_shape = input_ids.size()
440
+ input_ids = input_ids.view(-1, input_shape[-1])
441
+ batch_size = input_ids.shape[0]
442
+ elif inputs_embeds is not None:
443
+ input_shape = inputs_embeds.size()[:-1]
444
+ batch_size = inputs_embeds.shape[0]
445
+ else:
446
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
447
+
448
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
449
+
450
+ if token_type_ids is not None:
451
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
452
+
453
+ if position_ids is not None:
454
+ position_ids = position_ids.view(-1, input_shape[-1])
455
+
456
+ if past_key_values is None:
457
+ past_length = 0
458
+ past_key_values = tuple([None] * len(self.h))
459
+ else:
460
+ past_length = past_key_values[0][0].size(-2)
461
+
462
+ if position_ids is None:
463
+ position_ids = torch.arange(
464
+ past_length,
465
+ input_shape[-1] + past_length,
466
+ dtype=torch.long,
467
+ device=device,
468
+ )
469
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
470
+
471
+ # Attention mask.
472
+ if attention_mask is not None:
473
+ assert batch_size > 0, "batch_size has to be defined and > 0"
474
+ attention_mask = attention_mask.view(batch_size, -1)
475
+ # We create a 3D attention mask from a 2D tensor mask.
476
+ # Sizes are [batch_size, 1, 1, to_seq_length]
477
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
478
+ # this attention mask is more simple than the triangular masking of causal attention
479
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
480
+ attention_mask = attention_mask[:, None, None, :]
481
+
482
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
483
+ # masked positions, this operation will create a tensor which is 0.0 for
484
+ # positions we want to attend and -10000.0 for masked positions.
485
+ # Since we are adding it to the raw scores before the softmax, this is
486
+ # effectively the same as removing these entirely.
487
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
488
+ attention_mask = (1.0 - attention_mask) * -10000.0
489
+
490
+ # Prepare head mask if needed
491
+ # 1.0 in head_mask indicate we keep the head
492
+ # attention_probs has shape bsz x num_attention_heads x N x N
493
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
494
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
495
+
496
+ if inputs_embeds is None:
497
+ inputs_embeds = self.wte(input_ids)
498
+
499
+ hidden_states = inputs_embeds
500
+
501
+ if token_type_ids is not None:
502
+ token_type_embeds = self.wte(token_type_ids)
503
+ hidden_states = hidden_states + token_type_embeds
504
+
505
+ hidden_states = self.drop(hidden_states)
506
+
507
+ output_shape = input_shape + (hidden_states.size(-1),)
508
+
509
+ presents = () if use_cache else None
510
+ all_self_attentions = () if output_attentions else None
511
+ all_hidden_states = () if output_hidden_states else None
512
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
513
+ # Model parallel
514
+ if self.model_parallel:
515
+ torch.cuda.set_device(hidden_states.device)
516
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
517
+ if layer_past is not None:
518
+ layer_past = tuple(
519
+ past_state.to(hidden_states.device) for past_state in layer_past
520
+ )
521
+ # Ensure that attention_mask is always on the same device as hidden_states
522
+ if attention_mask is not None:
523
+ attention_mask = attention_mask.to(hidden_states.device)
524
+ if isinstance(head_mask, torch.Tensor):
525
+ head_mask = head_mask.to(hidden_states.device)
526
+ if output_hidden_states:
527
+ all_hidden_states = all_hidden_states + (hidden_states,)
528
+
529
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
530
+ if use_cache:
531
+ logger.warning(
532
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
533
+ "`use_cache=False`..."
534
+ )
535
+ use_cache = False
536
+
537
+ def create_custom_forward(module):
538
+ def custom_forward(*inputs):
539
+ # None for past_key_value
540
+ return module(*inputs, use_cache, output_attentions)
541
+
542
+ return custom_forward
543
+
544
+ outputs = torch.utils.checkpoint.checkpoint(
545
+ create_custom_forward(block),
546
+ hidden_states,
547
+ None,
548
+ attention_mask,
549
+ head_mask[i],
550
+ )
551
+ else:
552
+ outputs = block(
553
+ hidden_states,
554
+ layer_past=layer_past,
555
+ attention_mask=attention_mask,
556
+ head_mask=head_mask[i],
557
+ use_cache=use_cache,
558
+ output_attentions=output_attentions,
559
+ )
560
+
561
+ hidden_states = outputs[0]
562
+ if use_cache is True:
563
+ presents = presents + (outputs[1],)
564
+
565
+ if output_attentions:
566
+ all_self_attentions = all_self_attentions + (
567
+ outputs[2 if use_cache else 1],
568
+ )
569
+
570
+ # Model Parallel: If it's the last layer for that device, put things on the next device
571
+ if self.model_parallel:
572
+ for k, v in self.device_map.items():
573
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
574
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
575
+
576
+ hidden_states = self.ln_f(hidden_states)
577
+
578
+ hidden_states = hidden_states.view(*output_shape)
579
+ # Add last hidden state
580
+ if output_hidden_states:
581
+ all_hidden_states = all_hidden_states + (hidden_states,)
582
+
583
+ if not return_dict:
584
+ return tuple(
585
+ v
586
+ for v in [
587
+ hidden_states,
588
+ presents,
589
+ all_hidden_states,
590
+ all_self_attentions,
591
+ ]
592
+ if v is not None
593
+ )
594
+ # print("hidden_states", hidden_states.shape)
595
+ return BaseModelOutputWithPast(
596
+ last_hidden_state=hidden_states,
597
+ past_key_values=presents,
598
+ hidden_states=all_hidden_states,
599
+ attentions=all_self_attentions,
600
+ )
601
+
602
+
603
+ class ProGenForCausalLM(ProGenPreTrainedModel):
604
+ _keys_to_ignore_on_load_missing = [
605
+ r"h\.\d+\.attn\.masked_bias",
606
+ r"h\.\d+\.attn\.bias",
607
+ r"lm_head\.weight",
608
+ ]
609
+
610
+ def __init__(self, config):
611
+ super().__init__(config)
612
+ self.transformer = ProGenModel(config)
613
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size_lm_head)
614
+ self.init_weights()
615
+
616
+ # Model parallel
617
+ self.model_parallel = False
618
+ self.device_map = None
619
+
620
+ # def parallelize(self, device_map=None):
621
+ # self.device_map = (
622
+ # get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
623
+ # if device_map is None
624
+ # else device_map
625
+ # )
626
+ # assert_device_map(self.device_map, len(self.transformer.h))
627
+ # self.transformer.parallelize(self.device_map)
628
+ # self.lm_head = self.lm_head.to(self.transformer.first_device)
629
+ # self.model_parallel = True
630
+
631
+ # def deparallelize(self):
632
+ # self.transformer.deparallelize()
633
+ # self.transformer = self.transformer.to("cpu")
634
+ # self.lm_head = self.lm_head.to("cpu")
635
+ # self.model_parallel = False
636
+ # torch.cuda.empty_cache()
637
+
638
+ # def get_output_embeddings(self):
639
+ # return None
640
+
641
+ # def set_output_embeddings(self, new_embeddings):
642
+ # return
643
+
644
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
645
+ token_type_ids = kwargs.get("token_type_ids", None)
646
+ # only last token for inputs_ids if past is defined in kwargs
647
+ if past:
648
+ input_ids = input_ids[:, -1].unsqueeze(-1)
649
+ if token_type_ids is not None:
650
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
651
+
652
+ attention_mask = kwargs.get("attention_mask", None)
653
+ # print("attention_mask", attention_mask)
654
+ position_ids = kwargs.get("position_ids", None)
655
+
656
+ if attention_mask is not None and position_ids is None:
657
+ # create position_ids on the fly for batch generation
658
+ position_ids = attention_mask.long().cumsum(-1) - 1
659
+ position_ids.masked_fill_(attention_mask == 0, 1)
660
+ if past:
661
+ position_ids = position_ids[:, -1].unsqueeze(-1)
662
+ else:
663
+ position_ids = None
664
+ return {
665
+ "input_ids": input_ids,
666
+ "past_key_values": past,
667
+ "use_cache": kwargs.get("use_cache"),
668
+ "position_ids": position_ids,
669
+ "attention_mask": attention_mask,
670
+ "token_type_ids": token_type_ids,
671
+ }
672
+
673
+ def forward(
674
+ self,
675
+ input_ids=None,
676
+ past_key_values=None,
677
+ attention_mask=None,
678
+ token_type_ids=None,
679
+ position_ids=None,
680
+ head_mask=None,
681
+ inputs_embeds=None,
682
+ labels=None,
683
+ use_cache=None,
684
+ output_attentions=None,
685
+ output_hidden_states=None,
686
+ return_dict=None,
687
+ ):
688
+ r"""
689
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
690
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
691
+ ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
692
+ ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
693
+ """
694
+ return_dict = (
695
+ return_dict if return_dict is not None else self.config.use_return_dict
696
+ )
697
+ # print("here")
698
+ # print(attention_mask)
699
+ transformer_outputs = self.transformer(
700
+ input_ids,
701
+ past_key_values=past_key_values,
702
+ attention_mask=attention_mask,
703
+ token_type_ids=token_type_ids,
704
+ position_ids=position_ids,
705
+ head_mask=head_mask,
706
+ inputs_embeds=inputs_embeds,
707
+ use_cache=use_cache,
708
+ output_attentions=output_attentions,
709
+ output_hidden_states=output_hidden_states,
710
+ return_dict=return_dict,
711
+ )
712
+ hidden_states = transformer_outputs[0]
713
+
714
+ # Set device for model parallelism
715
+ if self.model_parallel:
716
+ torch.cuda.set_device(self.transformer.first_device)
717
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
718
+
719
+ # make sure sampling in fp16 works correctly and
720
+ # compute loss in fp32 to match with mesh-tf version
721
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
722
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
723
+
724
+ loss = None
725
+ if labels is not None:
726
+ # Shift so that tokens < n predict n
727
+ shift_logits = lm_logits[..., :-1, :].contiguous()
728
+ shift_labels = labels[..., 1:].contiguous()
729
+ # Flatten the tokens
730
+ loss_fct = CrossEntropyLoss()
731
+ loss = loss_fct(
732
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
733
+ )
734
+
735
+ loss = loss.to(hidden_states.dtype)
736
+
737
+ if not return_dict:
738
+ output = (lm_logits,) + transformer_outputs[1:]
739
+ return ((loss,) + output) if loss is not None else output
740
+
741
+ return CausalLMOutputWithPast(
742
+ loss=loss,
743
+ logits=lm_logits,
744
+ past_key_values=transformer_outputs.past_key_values,
745
+ hidden_states=transformer_outputs.hidden_states,
746
+ attentions=transformer_outputs.attentions,
747
+ )
748
+
749
+ @staticmethod
750
+ def _reorder_cache(
751
+ past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
752
+ ) -> Tuple[Tuple[torch.Tensor]]:
753
+ """
754
+ This function is used to re-order the :obj:`past_key_values` cache if
755
+ :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is
756
+ called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
757
+ """
758
+ return tuple(
759
+ tuple(
760
+ past_state.index_select(0, beam_idx.to(past_state.device))
761
+ for past_state in layer_past
762
+ )
763
+ for layer_past in past
764
+ )