Corianas commited on
Commit
c83297f
1 Parent(s): 9e03479

Upload model.py

Browse files
Files changed (1) hide show
  1. model.py +401 -0
model.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Full definition of a GPT Language Model, all of it in this single file.
3
+ References:
4
+ 1) the official GPT-2 TensorFlow implementation released by OpenAI:
5
+ https://github.com/openai/gpt-2/blob/master/src/model.py
6
+ 2) huggingface/transformers PyTorch implementation:
7
+ https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py
8
+ """
9
+
10
+ import math
11
+ import inspect
12
+ from dataclasses import dataclass
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch.nn import functional as F
17
+
18
+ # @torch.jit.script # good to enable when not using torch.compile, disable when using (our default)
19
+ def new_gelu(x):
20
+ """
21
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
22
+ Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415
23
+ """
24
+ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
25
+
26
+ class LayerNorm(nn.Module):
27
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
28
+
29
+ def __init__(self, ndim, bias):
30
+ super().__init__()
31
+ self.weight = nn.Parameter(torch.ones(ndim))
32
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
33
+
34
+ def forward(self, input):
35
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
36
+
37
+ class CausalSelfAttention(nn.Module):
38
+
39
+ def __init__(self, config):
40
+ super().__init__()
41
+ assert config.n_embd % config.n_head == 0
42
+ # key, query, value projections for all heads, but in a batch
43
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
44
+ # output projection
45
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
46
+ # regularization
47
+ self.attn_dropout = nn.Dropout(config.dropout)
48
+ self.resid_dropout = nn.Dropout(config.dropout)
49
+ self.n_head = config.n_head
50
+ self.n_embd = config.n_embd
51
+ self.dropout = config.dropout
52
+ # flash attention make GPU go brrrrr but support is only in PyTorch nightly and still a bit scary
53
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and self.dropout == 0.0
54
+ if not self.flash:
55
+ # print("WARNING: using slow attention. Flash Attention atm needs PyTorch nightly and dropout=0.0")
56
+ # causal mask to ensure that attention is only applied to the left in the input sequence
57
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
58
+ .view(1, 1, config.block_size, config.block_size))
59
+
60
+ def forward(self, x):
61
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
62
+
63
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
64
+ q, k ,v = self.c_attn(x).split(self.n_embd, dim=2)
65
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
66
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
67
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
68
+
69
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
70
+ if self.flash:
71
+ # efficient attention using Flash Attention CUDA kernels
72
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout, is_causal=True)
73
+ else:
74
+ # manual implementation of attention
75
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
76
+ att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
77
+ att = F.softmax(att, dim=-1)
78
+ att = self.attn_dropout(att)
79
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
80
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
81
+
82
+ # output projection
83
+ y = self.resid_dropout(self.c_proj(y))
84
+ return y
85
+
86
+ class MLP(nn.Module):
87
+
88
+ def __init__(self, config):
89
+ super().__init__()
90
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
91
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
92
+ self.dropout = nn.Dropout(config.dropout)
93
+
94
+ def forward(self, x):
95
+ x = self.c_fc(x)
96
+ x = new_gelu(x)
97
+ x = self.c_proj(x)
98
+ x = self.dropout(x)
99
+ return x
100
+
101
+ class Block(nn.Module):
102
+
103
+ def __init__(self, config):
104
+ super().__init__()
105
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
106
+ self.attn = CausalSelfAttention(config)
107
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
108
+ self.mlp = MLP(config)
109
+
110
+ def forward(self, x):
111
+ x = x + self.attn(self.ln_1(x))
112
+ x = x + self.mlp(self.ln_2(x))
113
+ return x
114
+
115
+ @dataclass
116
+ class GPTConfig:
117
+ block_size: int = 1024
118
+ vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
119
+ n_layer: int = 12
120
+ n_head: int = 12
121
+ n_embd: int = 768
122
+ dropout: float = 0.0
123
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
124
+
125
+ class GPT(nn.Module):
126
+
127
+ def __init__(self, config):
128
+ super().__init__()
129
+ assert config.vocab_size is not None
130
+ assert config.block_size is not None
131
+ self.config = config
132
+
133
+ self.transformer = nn.ModuleDict(dict(
134
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
135
+ wpe = nn.Embedding(config.block_size, config.n_embd),
136
+ drop = nn.Dropout(config.dropout),
137
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
138
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
139
+ ))
140
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
141
+ # with weight tying when using torch.compile() some warnings get generated:
142
+ # "UserWarning: functional_call was passed multiple values for tied weights.
143
+ # This behavior is deprecated and will be an error in future versions"
144
+ # not 100% sure what this is, so far seems to be harmless. TODO investigate
145
+ self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
146
+
147
+ # init all weights
148
+ self.apply(self._init_weights)
149
+ # apply special scaled init to the residual projections, per GPT-2 paper
150
+ for pn, p in self.named_parameters():
151
+ if pn.endswith('c_proj.weight'):
152
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
153
+
154
+ # report number of parameters
155
+ print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
156
+
157
+ def get_num_params(self, non_embedding=True):
158
+ """
159
+ Return the number of parameters in the model.
160
+ For non-embedding count (default), the position embeddings get subtracted.
161
+ The token embeddings would too, except due to the parameter sharing these
162
+ params are actually used as weights in the final layer, so we include them.
163
+ """
164
+ n_params = sum(p.numel() for p in self.parameters())
165
+ if non_embedding:
166
+ n_params -= self.transformer.wpe.weight.numel()
167
+ return n_params
168
+
169
+ def reset_parameters(self):
170
+ # Initialize weights using Glorot initialization
171
+ for param in self.parameters():
172
+ if param.dim() > 1:
173
+ torch.nn.init.xavier_uniform_(param)
174
+
175
+ def _init_weights(self, module):
176
+ if isinstance(module, nn.Linear):
177
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
178
+ if module.bias is not None:
179
+ torch.nn.init.zeros_(module.bias)
180
+ elif isinstance(module, nn.Embedding):
181
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
182
+
183
+ def forward(self, idx, targets=None):
184
+ device = idx.device
185
+ b, t = idx.size()
186
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
187
+ pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
188
+
189
+ # forward the GPT model itself
190
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
191
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
192
+ x = self.transformer.drop(tok_emb + pos_emb)
193
+ for block in self.transformer.h:
194
+ x = block(x)
195
+ x = self.transformer.ln_f(x)
196
+
197
+ if targets is not None:
198
+ # if we are given some desired targets also calculate the loss
199
+ logits = self.lm_head(x)
200
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
201
+ else:
202
+ # inference-time mini-optimization: only forward the lm_head on the very last position
203
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
204
+ loss = None
205
+
206
+ return logits, loss
207
+
208
+ def crop_block_size(self, block_size):
209
+ # model surgery to decrease the block size if necessary
210
+ # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
211
+ # but want to use a smaller block size for some smaller, simpler model
212
+ assert block_size <= self.config.block_size
213
+ self.config.block_size = block_size
214
+ self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
215
+ for block in self.transformer.h:
216
+ block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
217
+
218
+ @classmethod
219
+ def from_pretrained(cls, model_type, override_args=None):
220
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
221
+ override_args = override_args or {} # default to empty dict
222
+ # only dropout can be overridden see more notes below
223
+ assert all(k == 'dropout' for k in override_args)
224
+ from transformers import GPT2LMHeadModel
225
+ print("loading weights from pretrained gpt: %s" % model_type)
226
+
227
+ # n_layer, n_head and n_embd are determined from model_type
228
+ config_args = {
229
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
230
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
231
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
232
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
233
+ }[model_type]
234
+ print("forcing vocab_size=50257, block_size=1024, bias=True")
235
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
236
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
237
+ config_args['bias'] = True # always True for GPT model checkpoints
238
+ # we can override the dropout rate, if desired
239
+ if 'dropout' in override_args:
240
+ print(f"overriding dropout rate to {override_args['dropout']}")
241
+ config_args['dropout'] = override_args['dropout']
242
+ # create a from-scratch initialized minGPT model
243
+ config = GPTConfig(**config_args)
244
+ model = GPT(config)
245
+ sd = model.state_dict()
246
+ sd_keys = sd.keys()
247
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
248
+
249
+ # init a huggingface/transformers model
250
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
251
+ sd_hf = model_hf.state_dict()
252
+
253
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
254
+ sd_keys_hf = sd_hf.keys()
255
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
256
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
257
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
258
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
259
+ # this means that we have to transpose these weights when we import them
260
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
261
+ for k in sd_keys_hf:
262
+ if any(k.endswith(w) for w in transposed):
263
+ # special treatment for the Conv1D weights we need to transpose
264
+ assert sd_hf[k].shape[::-1] == sd[k].shape
265
+ with torch.no_grad():
266
+ sd[k].copy_(sd_hf[k].t())
267
+ else:
268
+ # vanilla copy over the other parameters
269
+ assert sd_hf[k].shape == sd[k].shape
270
+ with torch.no_grad():
271
+ sd[k].copy_(sd_hf[k])
272
+
273
+ return model
274
+
275
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
276
+ """
277
+ This long function is unfortunately doing something very simple and is being very defensive:
278
+ We are separating out all parameters of the model into two buckets: those that will experience
279
+ weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
280
+ We are then returning the PyTorch optimizer object.
281
+ """
282
+
283
+ # separate out all parameters to those that will and won't experience regularizing weight decay
284
+ decay = set()
285
+ no_decay = set()
286
+ whitelist_weight_modules = (torch.nn.Linear, )
287
+ blacklist_weight_modules = (torch.nn.LayerNorm, LayerNorm, torch.nn.Embedding)
288
+ for mn, m in self.named_modules():
289
+ for pn, p in m.named_parameters():
290
+ fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
291
+ # random note: because named_modules and named_parameters are recursive
292
+ # we will see the same tensors p many many times. but doing it this way
293
+ # allows us to know which parent module any tensor p belongs to...
294
+ if pn.endswith('bias'):
295
+ # all biases will not be decayed
296
+ no_decay.add(fpn)
297
+ elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
298
+ # weights of whitelist modules will be weight decayed
299
+ decay.add(fpn)
300
+ elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
301
+ # weights of blacklist modules will NOT be weight decayed
302
+ no_decay.add(fpn)
303
+
304
+ # subtle: 'transformer.wte.weight' and 'lm_head.weight' are tied, so they
305
+ # will appear in the no_decay and decay sets respectively after the above.
306
+ # In addition, because named_parameters() doesn't return duplicates, it
307
+ # will only return the first occurence, key'd by 'transformer.wte.weight', below.
308
+ # so let's manually remove 'lm_head.weight' from decay set. This will include
309
+ # this tensor into optimization via transformer.wte.weight only, and not decayed.
310
+ decay.remove('lm_head.weight')
311
+
312
+ # validate that we considered every parameter
313
+ param_dict = {pn: p for pn, p in self.named_parameters()}
314
+ inter_params = decay & no_decay
315
+ union_params = decay | no_decay
316
+ assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
317
+ assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
318
+ % (str(param_dict.keys() - union_params), )
319
+
320
+ # create the pytorch optimizer object
321
+ optim_groups = [
322
+ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay},
323
+ {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
324
+ ]
325
+ # new PyTorch nightly has a new 'fused' option for AdamW that is much faster
326
+ use_fused = (device_type == 'cuda') and ('fused' in inspect.signature(torch.optim.AdamW).parameters)
327
+ print(f"using fused AdamW: {use_fused}")
328
+ extra_args = dict(fused=True) if use_fused else dict()
329
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
330
+
331
+ return optimizer
332
+
333
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
334
+ """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
335
+ # first estimate the number of flops we do per iteration.
336
+ # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
337
+ N = self.get_num_params()
338
+ cfg = self.config
339
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
340
+ flops_per_token = 6*N + 12*L*H*Q*T
341
+ flops_per_fwdbwd = flops_per_token * T
342
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
343
+ # express our flops throughput as ratio of A100 bfloat16 peak flops
344
+ flops_achieved = flops_per_iter * (1.0/dt) # per second
345
+ flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
346
+ mfu = flops_achieved / flops_promised
347
+ return mfu
348
+
349
+ @torch.no_grad()
350
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
351
+ """
352
+ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
353
+ the sequence max_new_tokens times, feeding the predictions back into the model each time.
354
+ Most likely you'll want to make sure to be in model.eval() mode of operation for this.
355
+ """
356
+ for _ in range(max_new_tokens):
357
+ # if the sequence context is growing too long we must crop it at block_size
358
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
359
+ # forward the model to get the logits for the index in the sequence
360
+ logits, _ = self(idx_cond)
361
+ # pluck the logits at the final step and scale by desired temperature
362
+ logits = logits[:, -1, :] / temperature
363
+ # optionally crop the logits to only the top k options
364
+ if top_k is not None:
365
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
366
+ logits[logits < v[:, [-1]]] = -float('Inf')
367
+ # apply softmax to convert logits to (normalized) probabilities
368
+ probs = F.softmax(logits, dim=-1)
369
+ # sample from the distribution
370
+ idx_next = torch.multinomial(probs, num_samples=1)
371
+ # append sampled index to the running sequence and continue
372
+ idx = torch.cat((idx, idx_next), dim=1)
373
+
374
+ return idx
375
+
376
+ def generate_streaming(self, idx, max_new_tokens, temperature=1.0, top_k=None):
377
+ """
378
+ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
379
+ the sequence max_new_tokens times, feeding the predictions back into the model each time.
380
+ Yield the generated indices one at a time rather than concatenating them into a single tensor.
381
+ Most likely you'll want to make sure to be in model.eval() mode of operation for this.
382
+ """
383
+ for _ in range(max_new_tokens):
384
+ # if the sequence context is growing too long we must crop it at block_size
385
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
386
+ # forward the model to get the logits for the index in the sequence
387
+ logits, _ = self(idx_cond)
388
+ # pluck the logits at the final step and scale by desired temperature
389
+ logits = logits[:, -1, :] / temperature
390
+ # optionally crop the logits to only the top k options
391
+ if top_k is not None:
392
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
393
+ logits[logits < v[:, [-1]]] = -float('Inf')
394
+ # apply softmax to convert logits to (normalized) probabilities
395
+ probs = F.softmax(logits, dim=-1)
396
+ # sample from the distribution
397
+ idx_next = torch.multinomial(probs, num_samples=1)
398
+ # yield the next index
399
+ # append sampled index to the running sequence and continue
400
+ idx = torch.cat((idx, idx_next), dim=1)
401
+ yield idx_next.item()