0xsuid commited on
Commit
5ea9a88
1 Parent(s): 8055c91

Model save

Browse files
last-checkpoint/generation_config.json → generation_config.json RENAMED
File without changes
last-checkpoint/config.json DELETED
@@ -1,74 +0,0 @@
1
- {
2
- "_name_or_path": "EleutherAI/gpt-neo-1.3B",
3
- "activation_function": "gelu_new",
4
- "architectures": [
5
- "GPTNeoForCausalLM"
6
- ],
7
- "attention_dropout": 0,
8
- "attention_layers": [
9
- "global",
10
- "local",
11
- "global",
12
- "local",
13
- "global",
14
- "local",
15
- "global",
16
- "local",
17
- "global",
18
- "local",
19
- "global",
20
- "local",
21
- "global",
22
- "local",
23
- "global",
24
- "local",
25
- "global",
26
- "local",
27
- "global",
28
- "local",
29
- "global",
30
- "local",
31
- "global",
32
- "local"
33
- ],
34
- "attention_types": [
35
- [
36
- [
37
- "global",
38
- "local"
39
- ],
40
- 12
41
- ]
42
- ],
43
- "bos_token_id": 50256,
44
- "embed_dropout": 0,
45
- "eos_token_id": 50256,
46
- "gradient_checkpointing": false,
47
- "hidden_size": 2048,
48
- "initializer_range": 0.02,
49
- "intermediate_size": null,
50
- "layer_norm_epsilon": 1e-05,
51
- "max_position_embeddings": 2048,
52
- "model_type": "gpt_neo",
53
- "num_heads": 16,
54
- "num_layers": 24,
55
- "resid_dropout": 0,
56
- "summary_activation": null,
57
- "summary_first_dropout": 0.1,
58
- "summary_proj_to_labels": true,
59
- "summary_type": "cls_index",
60
- "summary_use_proj": true,
61
- "task_specific_params": {
62
- "text-generation": {
63
- "do_sample": true,
64
- "max_length": 50,
65
- "temperature": 0.9
66
- }
67
- },
68
- "tokenizer_class": "GPT2Tokenizer",
69
- "torch_dtype": "float32",
70
- "transformers_version": "4.26.0",
71
- "use_cache": false,
72
- "vocab_size": 50257,
73
- "window_size": 256
74
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/global_step1143/mp_rank_00_model_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b30eb2fb488eaec8a27bb0b26a171963d3112a31c76479ce8dfdd718e9567bf2
3
- size 5363072554
 
 
 
 
last-checkpoint/global_step1143/zero_pp_rank_0_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3e6445cc536442d208833ad59863f984bc71140490a8cd3e18575006fcdd4e1
3
- size 3946735038
 
 
 
 
last-checkpoint/global_step1143/zero_pp_rank_1_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:11f87ca6a8d81d91ce9669ce0a0b132e16e31c589165b9dfc0f95bec607a1ce3
3
- size 3946736318
 
 
 
 
last-checkpoint/global_step1143/zero_pp_rank_2_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:076063a2d14362bcc0562f87c7d5497798acb4d6078c32ba220db1ed473f33c8
3
- size 3946737086
 
 
 
 
last-checkpoint/global_step1143/zero_pp_rank_3_mp_rank_00_optim_states.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5e8189f9e1fe3d7e7bb628368a866880f0176b1f62f4c271d4ccf25c9e8e98a
3
- size 3946736574
 
 
 
 
last-checkpoint/latest DELETED
@@ -1 +0,0 @@
1
- global_step1143
 
 
last-checkpoint/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d6d85bffffb2fe97ca10f0460ebc0b029a11e6d606ade9c54de58bcb6de72ec8
3
- size 5363024236
 
 
 
 
last-checkpoint/rng_state_0.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f6bc3b332b1d7b34dd8e7d7ed0389c868155059ddb1d908e9ac3feb6672b23c
3
- size 14583
 
 
 
 
last-checkpoint/rng_state_1.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8de5e0c7dadcd828a8d62fffc136e170202022509240a895985c7bc45cabbced
3
- size 14583
 
 
 
 
last-checkpoint/rng_state_2.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c420d12d8aa09a561480241f19154d4aedd8a866de54ed145d69f860bae6f94
3
- size 14583
 
 
 
 
last-checkpoint/rng_state_3.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec873fd7c31f869e7956f098c0d1e17d2296924b3c55e4971a059dd097690b6f
3
- size 14583
 
 
 
 
last-checkpoint/special_tokens_map.json DELETED
@@ -1,6 +0,0 @@
1
- {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
4
- "pad_token": "<|endoftext|>",
5
- "unk_token": "<|endoftext|>"
6
- }
 
 
 
 
 
 
 
last-checkpoint/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/tokenizer_config.json DELETED
@@ -1,34 +0,0 @@
1
- {
2
- "add_bos_token": false,
3
- "add_prefix_space": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "<|endoftext|>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "eos_token": {
13
- "__type": "AddedToken",
14
- "content": "<|endoftext|>",
15
- "lstrip": false,
16
- "normalized": true,
17
- "rstrip": false,
18
- "single_word": false
19
- },
20
- "errors": "replace",
21
- "model_max_length": 2048,
22
- "name_or_path": "EleutherAI/gpt-neo-1.3B",
23
- "pad_token": null,
24
- "special_tokens_map_file": null,
25
- "tokenizer_class": "GPT2Tokenizer",
26
- "unk_token": {
27
- "__type": "AddedToken",
28
- "content": "<|endoftext|>",
29
- "lstrip": false,
30
- "normalized": true,
31
- "rstrip": false,
32
- "single_word": false
33
- }
34
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/trainer_state.json DELETED
@@ -1,1390 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 4.996176952484981,
5
- "global_step": 1140,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.0,
12
- "learning_rate": 0.0,
13
- "loss": 5.4178,
14
- "step": 1
15
- },
16
- {
17
- "epoch": 0.02,
18
- "learning_rate": 1.294882868674145e-05,
19
- "loss": 0.446,
20
- "step": 5
21
- },
22
- {
23
- "epoch": 0.04,
24
- "learning_rate": 1.852558565662928e-05,
25
- "loss": 0.3922,
26
- "step": 10
27
- },
28
- {
29
- "epoch": 0.07,
30
- "learning_rate": 2.1787779359648994e-05,
31
- "loss": 0.3732,
32
- "step": 15
33
- },
34
- {
35
- "epoch": 0.09,
36
- "learning_rate": 2.41023426265171e-05,
37
- "loss": 0.3591,
38
- "step": 20
39
- },
40
- {
41
- "epoch": 0.11,
42
- "learning_rate": 2.58976573734829e-05,
43
- "loss": 0.3384,
44
- "step": 25
45
- },
46
- {
47
- "epoch": 0.13,
48
- "learning_rate": 2.7364536329536817e-05,
49
- "loss": 0.3291,
50
- "step": 30
51
- },
52
- {
53
- "epoch": 0.15,
54
- "learning_rate": 2.8604764815275082e-05,
55
- "loss": 0.326,
56
- "step": 35
57
- },
58
- {
59
- "epoch": 0.17,
60
- "learning_rate": 2.9679099596404923e-05,
61
- "loss": 0.3234,
62
- "step": 40
63
- },
64
- {
65
- "epoch": 0.2,
66
- "learning_rate": 3.0626730032556536e-05,
67
- "loss": 0.3089,
68
- "step": 45
69
- },
70
- {
71
- "epoch": 0.22,
72
- "learning_rate": 3.147441434337073e-05,
73
- "loss": 0.2995,
74
- "step": 50
75
- },
76
- {
77
- "epoch": 0.24,
78
- "learning_rate": 3.224123807782732e-05,
79
- "loss": 0.2933,
80
- "step": 55
81
- },
82
- {
83
- "epoch": 0.26,
84
- "learning_rate": 3.294129329942464e-05,
85
- "loss": 0.2777,
86
- "step": 60
87
- },
88
- {
89
- "epoch": 0.28,
90
- "learning_rate": 3.358528167653452e-05,
91
- "loss": 0.2757,
92
- "step": 65
93
- },
94
- {
95
- "epoch": 0.31,
96
- "learning_rate": 3.4181521785162905e-05,
97
- "loss": 0.2674,
98
- "step": 70
99
- },
100
- {
101
- "epoch": 0.33,
102
- "learning_rate": 3.473660804639045e-05,
103
- "loss": 0.258,
104
- "step": 75
105
- },
106
- {
107
- "epoch": 0.35,
108
- "learning_rate": 3.525585656629274e-05,
109
- "loss": 0.2524,
110
- "step": 80
111
- },
112
- {
113
- "epoch": 0.37,
114
- "learning_rate": 3.574361557584177e-05,
115
- "loss": 0.2498,
116
- "step": 85
117
- },
118
- {
119
- "epoch": 0.39,
120
- "learning_rate": 3.620348700244436e-05,
121
- "loss": 0.2412,
122
- "step": 90
123
- },
124
- {
125
- "epoch": 0.42,
126
- "learning_rate": 3.6638488054916214e-05,
127
- "loss": 0.231,
128
- "step": 95
129
- },
130
- {
131
- "epoch": 0.44,
132
- "learning_rate": 3.705117131325856e-05,
133
- "loss": 0.227,
134
- "step": 100
135
- },
136
- {
137
- "epoch": 0.46,
138
- "learning_rate": 3.7443715488182624e-05,
139
- "loss": 0.219,
140
- "step": 105
141
- },
142
- {
143
- "epoch": 0.48,
144
- "learning_rate": 3.781799504771514e-05,
145
- "loss": 0.2189,
146
- "step": 110
147
- },
148
- {
149
- "epoch": 0.5,
150
- "learning_rate": 3.81756343539018e-05,
151
- "loss": 0.2148,
152
- "step": 115
153
- },
154
- {
155
- "epoch": 0.52,
156
- "learning_rate": 3.851805026931246e-05,
157
- "loss": 0.2054,
158
- "step": 120
159
- },
160
- {
161
- "epoch": 0.55,
162
- "learning_rate": 3.8846486060224364e-05,
163
- "loss": 0.2012,
164
- "step": 125
165
- },
166
- {
167
- "epoch": 0.57,
168
- "learning_rate": 3.916203864642234e-05,
169
- "loss": 0.1942,
170
- "step": 130
171
- },
172
- {
173
- "epoch": 0.59,
174
- "learning_rate": 3.946568070546408e-05,
175
- "loss": 0.1882,
176
- "step": 135
177
- },
178
- {
179
- "epoch": 0.61,
180
- "learning_rate": 3.975827875505073e-05,
181
- "loss": 0.1829,
182
- "step": 140
183
- },
184
- {
185
- "epoch": 0.63,
186
- "learning_rate": 4.004060806090172e-05,
187
- "loss": 0.1845,
188
- "step": 145
189
- },
190
- {
191
- "epoch": 0.66,
192
- "learning_rate": 4.031336501627827e-05,
193
- "loss": 0.1678,
194
- "step": 150
195
- },
196
- {
197
- "epoch": 0.68,
198
- "learning_rate": 4.0577177490884e-05,
199
- "loss": 0.1798,
200
- "step": 155
201
- },
202
- {
203
- "epoch": 0.7,
204
- "learning_rate": 4.0832613536180565e-05,
205
- "loss": 0.1735,
206
- "step": 160
207
- },
208
- {
209
- "epoch": 0.72,
210
- "learning_rate": 4.1080188750734856e-05,
211
- "loss": 0.176,
212
- "step": 165
213
- },
214
- {
215
- "epoch": 0.74,
216
- "learning_rate": 4.1320372545729594e-05,
217
- "loss": 0.1662,
218
- "step": 170
219
- },
220
- {
221
- "epoch": 0.76,
222
- "learning_rate": 4.155359350201654e-05,
223
- "loss": 0.1565,
224
- "step": 175
225
- },
226
- {
227
- "epoch": 0.79,
228
- "learning_rate": 4.178024397233218e-05,
229
- "loss": 0.1574,
230
- "step": 180
231
- },
232
- {
233
- "epoch": 0.81,
234
- "learning_rate": 4.200068405281827e-05,
235
- "loss": 0.1549,
236
- "step": 185
237
- },
238
- {
239
- "epoch": 0.83,
240
- "learning_rate": 4.221524502480404e-05,
241
- "loss": 0.1573,
242
- "step": 190
243
- },
244
- {
245
- "epoch": 0.85,
246
- "learning_rate": 4.242423234944206e-05,
247
- "loss": 0.1477,
248
- "step": 195
249
- },
250
- {
251
- "epoch": 0.87,
252
- "learning_rate": 4.262792828314637e-05,
253
- "loss": 0.1567,
254
- "step": 200
255
- },
256
- {
257
- "epoch": 0.9,
258
- "learning_rate": 4.282659417003183e-05,
259
- "loss": 0.1423,
260
- "step": 205
261
- },
262
- {
263
- "epoch": 0.92,
264
- "learning_rate": 4.302047245807045e-05,
265
- "loss": 0.1405,
266
- "step": 210
267
- },
268
- {
269
- "epoch": 0.94,
270
- "learning_rate": 4.320978847798302e-05,
271
- "loss": 0.1406,
272
- "step": 215
273
- },
274
- {
275
- "epoch": 0.96,
276
- "learning_rate": 4.3394752017602966e-05,
277
- "loss": 0.1381,
278
- "step": 220
279
- },
280
- {
281
- "epoch": 0.98,
282
- "learning_rate": 4.357555871929799e-05,
283
- "loss": 0.131,
284
- "step": 225
285
- },
286
- {
287
- "epoch": 1.01,
288
- "learning_rate": 4.375239132378962e-05,
289
- "loss": 0.1507,
290
- "step": 230
291
- },
292
- {
293
- "epoch": 1.03,
294
- "learning_rate": 4.392542078019592e-05,
295
- "loss": 0.1199,
296
- "step": 235
297
- },
298
- {
299
- "epoch": 1.05,
300
- "learning_rate": 4.4094807239200284e-05,
301
- "loss": 0.1257,
302
- "step": 240
303
- },
304
- {
305
- "epoch": 1.07,
306
- "learning_rate": 4.426070094380871e-05,
307
- "loss": 0.1185,
308
- "step": 245
309
- },
310
- {
311
- "epoch": 1.1,
312
- "learning_rate": 4.442324303011218e-05,
313
- "loss": 0.115,
314
- "step": 250
315
- },
316
- {
317
- "epoch": 1.12,
318
- "learning_rate": 4.458256624874931e-05,
319
- "loss": 0.1188,
320
- "step": 255
321
- },
322
- {
323
- "epoch": 1.14,
324
- "learning_rate": 4.4738795616310163e-05,
325
- "loss": 0.108,
326
- "step": 260
327
- },
328
- {
329
- "epoch": 1.16,
330
- "learning_rate": 4.48920490046898e-05,
331
- "loss": 0.1125,
332
- "step": 265
333
- },
334
- {
335
- "epoch": 1.18,
336
- "learning_rate": 4.50424376753519e-05,
337
- "loss": 0.1094,
338
- "step": 270
339
- },
340
- {
341
- "epoch": 1.21,
342
- "learning_rate": 4.5190066764568774e-05,
343
- "loss": 0.1033,
344
- "step": 275
345
- },
346
- {
347
- "epoch": 1.23,
348
- "learning_rate": 4.533503572493855e-05,
349
- "loss": 0.1037,
350
- "step": 280
351
- },
352
- {
353
- "epoch": 1.25,
354
- "learning_rate": 4.547743872782376e-05,
355
- "loss": 0.1038,
356
- "step": 285
357
- },
358
- {
359
- "epoch": 1.27,
360
- "learning_rate": 4.5617365030789545e-05,
361
- "loss": 0.1003,
362
- "step": 290
363
- },
364
- {
365
- "epoch": 1.29,
366
- "learning_rate": 4.575489931363226e-05,
367
- "loss": 0.1047,
368
- "step": 295
369
- },
370
- {
371
- "epoch": 1.31,
372
- "learning_rate": 4.589012198616609e-05,
373
- "loss": 0.0961,
374
- "step": 300
375
- },
376
- {
377
- "epoch": 1.34,
378
- "learning_rate": 4.602310947056923e-05,
379
- "loss": 0.0959,
380
- "step": 305
381
- },
382
- {
383
- "epoch": 1.36,
384
- "learning_rate": 4.615393446077182e-05,
385
- "loss": 0.0992,
386
- "step": 310
387
- },
388
- {
389
- "epoch": 1.38,
390
- "learning_rate": 4.6282666161090166e-05,
391
- "loss": 0.0938,
392
- "step": 315
393
- },
394
- {
395
- "epoch": 1.4,
396
- "learning_rate": 4.640937050606839e-05,
397
- "loss": 0.0963,
398
- "step": 320
399
- },
400
- {
401
- "epoch": 1.42,
402
- "learning_rate": 4.653411036327597e-05,
403
- "loss": 0.0943,
404
- "step": 325
405
- },
406
- {
407
- "epoch": 1.45,
408
- "learning_rate": 4.665694572062267e-05,
409
- "loss": 0.0889,
410
- "step": 330
411
- },
412
- {
413
- "epoch": 1.47,
414
- "learning_rate": 4.677793385958802e-05,
415
- "loss": 0.0845,
416
- "step": 335
417
- },
418
- {
419
- "epoch": 1.49,
420
- "learning_rate": 4.689712951561742e-05,
421
- "loss": 0.0864,
422
- "step": 340
423
- },
424
- {
425
- "epoch": 1.51,
426
- "learning_rate": 4.701458502680934e-05,
427
- "loss": 0.0869,
428
- "step": 345
429
- },
430
- {
431
- "epoch": 1.53,
432
- "learning_rate": 4.713035047190436e-05,
433
- "loss": 0.086,
434
- "step": 350
435
- },
436
- {
437
- "epoch": 1.55,
438
- "learning_rate": 4.7244473798486756e-05,
439
- "loss": 0.0863,
440
- "step": 355
441
- },
442
- {
443
- "epoch": 1.58,
444
- "learning_rate": 4.735700094222001e-05,
445
- "loss": 0.0824,
446
- "step": 360
447
- },
448
- {
449
- "epoch": 1.6,
450
- "learning_rate": 4.746797593785841e-05,
451
- "loss": 0.0875,
452
- "step": 365
453
- },
454
- {
455
- "epoch": 1.62,
456
- "learning_rate": 4.7577441022706095e-05,
457
- "loss": 0.0842,
458
- "step": 370
459
- },
460
- {
461
- "epoch": 1.64,
462
- "learning_rate": 4.76854367331319e-05,
463
- "loss": 0.0789,
464
- "step": 375
465
- },
466
- {
467
- "epoch": 1.66,
468
- "learning_rate": 4.7792001994691866e-05,
469
- "loss": 0.0791,
470
- "step": 380
471
- },
472
- {
473
- "epoch": 1.69,
474
- "learning_rate": 4.7897174206360945e-05,
475
- "loss": 0.0828,
476
- "step": 385
477
- },
478
- {
479
- "epoch": 1.71,
480
- "learning_rate": 4.800098931932989e-05,
481
- "loss": 0.077,
482
- "step": 390
483
- },
484
- {
485
- "epoch": 1.73,
486
- "learning_rate": 4.810348191078279e-05,
487
- "loss": 0.0776,
488
- "step": 395
489
- },
490
- {
491
- "epoch": 1.75,
492
- "learning_rate": 4.82046852530342e-05,
493
- "loss": 0.0798,
494
- "step": 400
495
- },
496
- {
497
- "epoch": 1.77,
498
- "learning_rate": 4.830463137837162e-05,
499
- "loss": 0.0762,
500
- "step": 405
501
- },
502
- {
503
- "epoch": 1.8,
504
- "learning_rate": 4.8403351139919656e-05,
505
- "loss": 0.0762,
506
- "step": 410
507
- },
508
- {
509
- "epoch": 1.82,
510
- "learning_rate": 4.850087426881512e-05,
511
- "loss": 0.0799,
512
- "step": 415
513
- },
514
- {
515
- "epoch": 1.84,
516
- "learning_rate": 4.859722942795827e-05,
517
- "loss": 0.0757,
518
- "step": 420
519
- },
520
- {
521
- "epoch": 1.86,
522
- "learning_rate": 4.8692444262583224e-05,
523
- "loss": 0.0733,
524
- "step": 425
525
- },
526
- {
527
- "epoch": 1.88,
528
- "learning_rate": 4.8786545447870833e-05,
529
- "loss": 0.0755,
530
- "step": 430
531
- },
532
- {
533
- "epoch": 1.9,
534
- "learning_rate": 4.8879558733809264e-05,
535
- "loss": 0.0731,
536
- "step": 435
537
- },
538
- {
539
- "epoch": 1.93,
540
- "learning_rate": 4.897150898749078e-05,
541
- "loss": 0.0729,
542
- "step": 440
543
- },
544
- {
545
- "epoch": 1.95,
546
- "learning_rate": 4.9062420233018894e-05,
547
- "loss": 0.0738,
548
- "step": 445
549
- },
550
- {
551
- "epoch": 1.97,
552
- "learning_rate": 4.915231568918581e-05,
553
- "loss": 0.0734,
554
- "step": 450
555
- },
556
- {
557
- "epoch": 1.99,
558
- "learning_rate": 4.924121780506815e-05,
559
- "loss": 0.0729,
560
- "step": 455
561
- },
562
- {
563
- "epoch": 2.02,
564
- "learning_rate": 4.9346619656948594e-05,
565
- "loss": 0.082,
566
- "step": 460
567
- },
568
- {
569
- "epoch": 2.04,
570
- "learning_rate": 4.9433411864583826e-05,
571
- "loss": 0.0661,
572
- "step": 465
573
- },
574
- {
575
- "epoch": 2.06,
576
- "learning_rate": 4.9519277776977253e-05,
577
- "loss": 0.0636,
578
- "step": 470
579
- },
580
- {
581
- "epoch": 2.08,
582
- "learning_rate": 4.9604236957409594e-05,
583
- "loss": 0.0666,
584
- "step": 475
585
- },
586
- {
587
- "epoch": 2.1,
588
- "learning_rate": 4.9688308355869887e-05,
589
- "loss": 0.0667,
590
- "step": 480
591
- },
592
- {
593
- "epoch": 2.13,
594
- "learning_rate": 4.977151033442553e-05,
595
- "loss": 0.064,
596
- "step": 485
597
- },
598
- {
599
- "epoch": 2.15,
600
- "learning_rate": 4.9853860691293774e-05,
601
- "loss": 0.0659,
602
- "step": 490
603
- },
604
- {
605
- "epoch": 2.17,
606
- "learning_rate": 4.993537668369384e-05,
607
- "loss": 0.0629,
608
- "step": 495
609
- },
610
- {
611
- "epoch": 2.19,
612
- "learning_rate": 5e-05,
613
- "loss": 0.0665,
614
- "step": 500
615
- },
616
- {
617
- "epoch": 2.21,
618
- "learning_rate": 5e-05,
619
- "loss": 0.0626,
620
- "step": 505
621
- },
622
- {
623
- "epoch": 2.24,
624
- "learning_rate": 5e-05,
625
- "loss": 0.0686,
626
- "step": 510
627
- },
628
- {
629
- "epoch": 2.26,
630
- "learning_rate": 5e-05,
631
- "loss": 0.0652,
632
- "step": 515
633
- },
634
- {
635
- "epoch": 2.28,
636
- "learning_rate": 5e-05,
637
- "loss": 0.0622,
638
- "step": 520
639
- },
640
- {
641
- "epoch": 2.3,
642
- "learning_rate": 5e-05,
643
- "loss": 0.0626,
644
- "step": 525
645
- },
646
- {
647
- "epoch": 2.32,
648
- "learning_rate": 5e-05,
649
- "loss": 0.0658,
650
- "step": 530
651
- },
652
- {
653
- "epoch": 2.35,
654
- "learning_rate": 5e-05,
655
- "loss": 0.0614,
656
- "step": 535
657
- },
658
- {
659
- "epoch": 2.37,
660
- "learning_rate": 5e-05,
661
- "loss": 0.0615,
662
- "step": 540
663
- },
664
- {
665
- "epoch": 2.39,
666
- "learning_rate": 5e-05,
667
- "loss": 0.0626,
668
- "step": 545
669
- },
670
- {
671
- "epoch": 2.41,
672
- "learning_rate": 5e-05,
673
- "loss": 0.0643,
674
- "step": 550
675
- },
676
- {
677
- "epoch": 2.43,
678
- "learning_rate": 5e-05,
679
- "loss": 0.0622,
680
- "step": 555
681
- },
682
- {
683
- "epoch": 2.45,
684
- "learning_rate": 5e-05,
685
- "loss": 0.0645,
686
- "step": 560
687
- },
688
- {
689
- "epoch": 2.48,
690
- "learning_rate": 5e-05,
691
- "loss": 0.0632,
692
- "step": 565
693
- },
694
- {
695
- "epoch": 2.5,
696
- "learning_rate": 5e-05,
697
- "loss": 0.0641,
698
- "step": 570
699
- },
700
- {
701
- "epoch": 2.52,
702
- "learning_rate": 5e-05,
703
- "loss": 0.0607,
704
- "step": 575
705
- },
706
- {
707
- "epoch": 2.54,
708
- "learning_rate": 5e-05,
709
- "loss": 0.0622,
710
- "step": 580
711
- },
712
- {
713
- "epoch": 2.56,
714
- "learning_rate": 5e-05,
715
- "loss": 0.0635,
716
- "step": 585
717
- },
718
- {
719
- "epoch": 2.59,
720
- "learning_rate": 5e-05,
721
- "loss": 0.0619,
722
- "step": 590
723
- },
724
- {
725
- "epoch": 2.61,
726
- "learning_rate": 5e-05,
727
- "loss": 0.0613,
728
- "step": 595
729
- },
730
- {
731
- "epoch": 2.63,
732
- "learning_rate": 5e-05,
733
- "loss": 0.0628,
734
- "step": 600
735
- },
736
- {
737
- "epoch": 2.65,
738
- "learning_rate": 5e-05,
739
- "loss": 0.0613,
740
- "step": 605
741
- },
742
- {
743
- "epoch": 2.67,
744
- "learning_rate": 5e-05,
745
- "loss": 0.064,
746
- "step": 610
747
- },
748
- {
749
- "epoch": 2.69,
750
- "learning_rate": 5e-05,
751
- "loss": 0.0654,
752
- "step": 615
753
- },
754
- {
755
- "epoch": 2.72,
756
- "learning_rate": 5e-05,
757
- "loss": 0.0635,
758
- "step": 620
759
- },
760
- {
761
- "epoch": 2.74,
762
- "learning_rate": 5e-05,
763
- "loss": 0.0611,
764
- "step": 625
765
- },
766
- {
767
- "epoch": 2.76,
768
- "learning_rate": 5e-05,
769
- "loss": 0.0605,
770
- "step": 630
771
- },
772
- {
773
- "epoch": 2.78,
774
- "learning_rate": 5e-05,
775
- "loss": 0.0606,
776
- "step": 635
777
- },
778
- {
779
- "epoch": 2.8,
780
- "learning_rate": 5e-05,
781
- "loss": 0.0647,
782
- "step": 640
783
- },
784
- {
785
- "epoch": 2.83,
786
- "learning_rate": 5e-05,
787
- "loss": 0.0625,
788
- "step": 645
789
- },
790
- {
791
- "epoch": 2.85,
792
- "learning_rate": 5e-05,
793
- "loss": 0.0621,
794
- "step": 650
795
- },
796
- {
797
- "epoch": 2.87,
798
- "learning_rate": 5e-05,
799
- "loss": 0.0607,
800
- "step": 655
801
- },
802
- {
803
- "epoch": 2.89,
804
- "learning_rate": 5e-05,
805
- "loss": 0.0581,
806
- "step": 660
807
- },
808
- {
809
- "epoch": 2.91,
810
- "learning_rate": 5e-05,
811
- "loss": 0.063,
812
- "step": 665
813
- },
814
- {
815
- "epoch": 2.94,
816
- "learning_rate": 5e-05,
817
- "loss": 0.0582,
818
- "step": 670
819
- },
820
- {
821
- "epoch": 2.96,
822
- "learning_rate": 5e-05,
823
- "loss": 0.0598,
824
- "step": 675
825
- },
826
- {
827
- "epoch": 2.98,
828
- "learning_rate": 5e-05,
829
- "loss": 0.0609,
830
- "step": 680
831
- },
832
- {
833
- "epoch": 3.0,
834
- "learning_rate": 5e-05,
835
- "loss": 0.073,
836
- "step": 685
837
- },
838
- {
839
- "epoch": 3.03,
840
- "learning_rate": 5e-05,
841
- "loss": 0.055,
842
- "step": 690
843
- },
844
- {
845
- "epoch": 3.05,
846
- "learning_rate": 5e-05,
847
- "loss": 0.0587,
848
- "step": 695
849
- },
850
- {
851
- "epoch": 3.07,
852
- "learning_rate": 5e-05,
853
- "loss": 0.055,
854
- "step": 700
855
- },
856
- {
857
- "epoch": 3.09,
858
- "learning_rate": 5e-05,
859
- "loss": 0.0559,
860
- "step": 705
861
- },
862
- {
863
- "epoch": 3.11,
864
- "learning_rate": 5e-05,
865
- "loss": 0.0578,
866
- "step": 710
867
- },
868
- {
869
- "epoch": 3.14,
870
- "learning_rate": 5e-05,
871
- "loss": 0.0541,
872
- "step": 715
873
- },
874
- {
875
- "epoch": 3.16,
876
- "learning_rate": 5e-05,
877
- "loss": 0.0558,
878
- "step": 720
879
- },
880
- {
881
- "epoch": 3.18,
882
- "learning_rate": 5e-05,
883
- "loss": 0.0561,
884
- "step": 725
885
- },
886
- {
887
- "epoch": 3.2,
888
- "learning_rate": 5e-05,
889
- "loss": 0.0559,
890
- "step": 730
891
- },
892
- {
893
- "epoch": 3.22,
894
- "learning_rate": 5e-05,
895
- "loss": 0.0544,
896
- "step": 735
897
- },
898
- {
899
- "epoch": 3.24,
900
- "learning_rate": 5e-05,
901
- "loss": 0.0556,
902
- "step": 740
903
- },
904
- {
905
- "epoch": 3.27,
906
- "learning_rate": 5e-05,
907
- "loss": 0.0566,
908
- "step": 745
909
- },
910
- {
911
- "epoch": 3.29,
912
- "learning_rate": 5e-05,
913
- "loss": 0.0551,
914
- "step": 750
915
- },
916
- {
917
- "epoch": 3.31,
918
- "learning_rate": 5e-05,
919
- "loss": 0.0572,
920
- "step": 755
921
- },
922
- {
923
- "epoch": 3.33,
924
- "learning_rate": 5e-05,
925
- "loss": 0.0551,
926
- "step": 760
927
- },
928
- {
929
- "epoch": 3.35,
930
- "learning_rate": 5e-05,
931
- "loss": 0.056,
932
- "step": 765
933
- },
934
- {
935
- "epoch": 3.38,
936
- "learning_rate": 5e-05,
937
- "loss": 0.0547,
938
- "step": 770
939
- },
940
- {
941
- "epoch": 3.4,
942
- "learning_rate": 5e-05,
943
- "loss": 0.0564,
944
- "step": 775
945
- },
946
- {
947
- "epoch": 3.42,
948
- "learning_rate": 5e-05,
949
- "loss": 0.0569,
950
- "step": 780
951
- },
952
- {
953
- "epoch": 3.44,
954
- "learning_rate": 5e-05,
955
- "loss": 0.0559,
956
- "step": 785
957
- },
958
- {
959
- "epoch": 3.46,
960
- "learning_rate": 5e-05,
961
- "loss": 0.0548,
962
- "step": 790
963
- },
964
- {
965
- "epoch": 3.48,
966
- "learning_rate": 5e-05,
967
- "loss": 0.0548,
968
- "step": 795
969
- },
970
- {
971
- "epoch": 3.51,
972
- "learning_rate": 5e-05,
973
- "loss": 0.0537,
974
- "step": 800
975
- },
976
- {
977
- "epoch": 3.53,
978
- "learning_rate": 5e-05,
979
- "loss": 0.0547,
980
- "step": 805
981
- },
982
- {
983
- "epoch": 3.55,
984
- "learning_rate": 5e-05,
985
- "loss": 0.0559,
986
- "step": 810
987
- },
988
- {
989
- "epoch": 3.57,
990
- "learning_rate": 5e-05,
991
- "loss": 0.057,
992
- "step": 815
993
- },
994
- {
995
- "epoch": 3.59,
996
- "learning_rate": 5e-05,
997
- "loss": 0.0561,
998
- "step": 820
999
- },
1000
- {
1001
- "epoch": 3.62,
1002
- "learning_rate": 5e-05,
1003
- "loss": 0.0546,
1004
- "step": 825
1005
- },
1006
- {
1007
- "epoch": 3.64,
1008
- "learning_rate": 5e-05,
1009
- "loss": 0.0552,
1010
- "step": 830
1011
- },
1012
- {
1013
- "epoch": 3.66,
1014
- "learning_rate": 5e-05,
1015
- "loss": 0.0566,
1016
- "step": 835
1017
- },
1018
- {
1019
- "epoch": 3.68,
1020
- "learning_rate": 5e-05,
1021
- "loss": 0.0531,
1022
- "step": 840
1023
- },
1024
- {
1025
- "epoch": 3.7,
1026
- "learning_rate": 5e-05,
1027
- "loss": 0.056,
1028
- "step": 845
1029
- },
1030
- {
1031
- "epoch": 3.73,
1032
- "learning_rate": 5e-05,
1033
- "loss": 0.0557,
1034
- "step": 850
1035
- },
1036
- {
1037
- "epoch": 3.75,
1038
- "learning_rate": 5e-05,
1039
- "loss": 0.057,
1040
- "step": 855
1041
- },
1042
- {
1043
- "epoch": 3.77,
1044
- "learning_rate": 5e-05,
1045
- "loss": 0.0539,
1046
- "step": 860
1047
- },
1048
- {
1049
- "epoch": 3.79,
1050
- "learning_rate": 5e-05,
1051
- "loss": 0.0529,
1052
- "step": 865
1053
- },
1054
- {
1055
- "epoch": 3.81,
1056
- "learning_rate": 5e-05,
1057
- "loss": 0.0552,
1058
- "step": 870
1059
- },
1060
- {
1061
- "epoch": 3.83,
1062
- "learning_rate": 5e-05,
1063
- "loss": 0.0547,
1064
- "step": 875
1065
- },
1066
- {
1067
- "epoch": 3.86,
1068
- "learning_rate": 5e-05,
1069
- "loss": 0.0553,
1070
- "step": 880
1071
- },
1072
- {
1073
- "epoch": 3.88,
1074
- "learning_rate": 5e-05,
1075
- "loss": 0.0558,
1076
- "step": 885
1077
- },
1078
- {
1079
- "epoch": 3.9,
1080
- "learning_rate": 5e-05,
1081
- "loss": 0.054,
1082
- "step": 890
1083
- },
1084
- {
1085
- "epoch": 3.92,
1086
- "learning_rate": 5e-05,
1087
- "loss": 0.0549,
1088
- "step": 895
1089
- },
1090
- {
1091
- "epoch": 3.94,
1092
- "learning_rate": 5e-05,
1093
- "loss": 0.0544,
1094
- "step": 900
1095
- },
1096
- {
1097
- "epoch": 3.97,
1098
- "learning_rate": 5e-05,
1099
- "loss": 0.0558,
1100
- "step": 905
1101
- },
1102
- {
1103
- "epoch": 3.99,
1104
- "learning_rate": 5e-05,
1105
- "loss": 0.0545,
1106
- "step": 910
1107
- },
1108
- {
1109
- "epoch": 4.01,
1110
- "learning_rate": 5e-05,
1111
- "loss": 0.0604,
1112
- "step": 915
1113
- },
1114
- {
1115
- "epoch": 4.03,
1116
- "learning_rate": 5e-05,
1117
- "loss": 0.0497,
1118
- "step": 920
1119
- },
1120
- {
1121
- "epoch": 4.06,
1122
- "learning_rate": 5e-05,
1123
- "loss": 0.049,
1124
- "step": 925
1125
- },
1126
- {
1127
- "epoch": 4.08,
1128
- "learning_rate": 5e-05,
1129
- "loss": 0.0488,
1130
- "step": 930
1131
- },
1132
- {
1133
- "epoch": 4.1,
1134
- "learning_rate": 5e-05,
1135
- "loss": 0.0495,
1136
- "step": 935
1137
- },
1138
- {
1139
- "epoch": 4.12,
1140
- "learning_rate": 5e-05,
1141
- "loss": 0.049,
1142
- "step": 940
1143
- },
1144
- {
1145
- "epoch": 4.14,
1146
- "learning_rate": 5e-05,
1147
- "loss": 0.0502,
1148
- "step": 945
1149
- },
1150
- {
1151
- "epoch": 4.17,
1152
- "learning_rate": 5e-05,
1153
- "loss": 0.0493,
1154
- "step": 950
1155
- },
1156
- {
1157
- "epoch": 4.19,
1158
- "learning_rate": 5e-05,
1159
- "loss": 0.0496,
1160
- "step": 955
1161
- },
1162
- {
1163
- "epoch": 4.21,
1164
- "learning_rate": 5e-05,
1165
- "loss": 0.0475,
1166
- "step": 960
1167
- },
1168
- {
1169
- "epoch": 4.23,
1170
- "learning_rate": 5e-05,
1171
- "loss": 0.0486,
1172
- "step": 965
1173
- },
1174
- {
1175
- "epoch": 4.25,
1176
- "learning_rate": 5e-05,
1177
- "loss": 0.0503,
1178
- "step": 970
1179
- },
1180
- {
1181
- "epoch": 4.28,
1182
- "learning_rate": 5e-05,
1183
- "loss": 0.0508,
1184
- "step": 975
1185
- },
1186
- {
1187
- "epoch": 4.3,
1188
- "learning_rate": 5e-05,
1189
- "loss": 0.0501,
1190
- "step": 980
1191
- },
1192
- {
1193
- "epoch": 4.32,
1194
- "learning_rate": 5e-05,
1195
- "loss": 0.0499,
1196
- "step": 985
1197
- },
1198
- {
1199
- "epoch": 4.34,
1200
- "learning_rate": 5e-05,
1201
- "loss": 0.0485,
1202
- "step": 990
1203
- },
1204
- {
1205
- "epoch": 4.36,
1206
- "learning_rate": 5e-05,
1207
- "loss": 0.0494,
1208
- "step": 995
1209
- },
1210
- {
1211
- "epoch": 4.38,
1212
- "learning_rate": 5e-05,
1213
- "loss": 0.0503,
1214
- "step": 1000
1215
- },
1216
- {
1217
- "epoch": 4.41,
1218
- "learning_rate": 5e-05,
1219
- "loss": 0.0512,
1220
- "step": 1005
1221
- },
1222
- {
1223
- "epoch": 4.43,
1224
- "learning_rate": 5e-05,
1225
- "loss": 0.0513,
1226
- "step": 1010
1227
- },
1228
- {
1229
- "epoch": 4.45,
1230
- "learning_rate": 5e-05,
1231
- "loss": 0.0496,
1232
- "step": 1015
1233
- },
1234
- {
1235
- "epoch": 4.47,
1236
- "learning_rate": 5e-05,
1237
- "loss": 0.0493,
1238
- "step": 1020
1239
- },
1240
- {
1241
- "epoch": 4.49,
1242
- "learning_rate": 5e-05,
1243
- "loss": 0.0516,
1244
- "step": 1025
1245
- },
1246
- {
1247
- "epoch": 4.52,
1248
- "learning_rate": 5e-05,
1249
- "loss": 0.0498,
1250
- "step": 1030
1251
- },
1252
- {
1253
- "epoch": 4.54,
1254
- "learning_rate": 5e-05,
1255
- "loss": 0.0498,
1256
- "step": 1035
1257
- },
1258
- {
1259
- "epoch": 4.56,
1260
- "learning_rate": 5e-05,
1261
- "loss": 0.0491,
1262
- "step": 1040
1263
- },
1264
- {
1265
- "epoch": 4.58,
1266
- "learning_rate": 5e-05,
1267
- "loss": 0.047,
1268
- "step": 1045
1269
- },
1270
- {
1271
- "epoch": 4.6,
1272
- "learning_rate": 5e-05,
1273
- "loss": 0.0493,
1274
- "step": 1050
1275
- },
1276
- {
1277
- "epoch": 4.62,
1278
- "learning_rate": 5e-05,
1279
- "loss": 0.0488,
1280
- "step": 1055
1281
- },
1282
- {
1283
- "epoch": 4.65,
1284
- "learning_rate": 5e-05,
1285
- "loss": 0.0502,
1286
- "step": 1060
1287
- },
1288
- {
1289
- "epoch": 4.67,
1290
- "learning_rate": 5e-05,
1291
- "loss": 0.0511,
1292
- "step": 1065
1293
- },
1294
- {
1295
- "epoch": 4.69,
1296
- "learning_rate": 5e-05,
1297
- "loss": 0.0498,
1298
- "step": 1070
1299
- },
1300
- {
1301
- "epoch": 4.71,
1302
- "learning_rate": 5e-05,
1303
- "loss": 0.0511,
1304
- "step": 1075
1305
- },
1306
- {
1307
- "epoch": 4.73,
1308
- "learning_rate": 5e-05,
1309
- "loss": 0.0498,
1310
- "step": 1080
1311
- },
1312
- {
1313
- "epoch": 4.76,
1314
- "learning_rate": 5e-05,
1315
- "loss": 0.0521,
1316
- "step": 1085
1317
- },
1318
- {
1319
- "epoch": 4.78,
1320
- "learning_rate": 5e-05,
1321
- "loss": 0.0503,
1322
- "step": 1090
1323
- },
1324
- {
1325
- "epoch": 4.8,
1326
- "learning_rate": 5e-05,
1327
- "loss": 0.0509,
1328
- "step": 1095
1329
- },
1330
- {
1331
- "epoch": 4.82,
1332
- "learning_rate": 5e-05,
1333
- "loss": 0.0523,
1334
- "step": 1100
1335
- },
1336
- {
1337
- "epoch": 4.84,
1338
- "learning_rate": 5e-05,
1339
- "loss": 0.0465,
1340
- "step": 1105
1341
- },
1342
- {
1343
- "epoch": 4.87,
1344
- "learning_rate": 5e-05,
1345
- "loss": 0.0521,
1346
- "step": 1110
1347
- },
1348
- {
1349
- "epoch": 4.89,
1350
- "learning_rate": 5e-05,
1351
- "loss": 0.0488,
1352
- "step": 1115
1353
- },
1354
- {
1355
- "epoch": 4.91,
1356
- "learning_rate": 5e-05,
1357
- "loss": 0.0488,
1358
- "step": 1120
1359
- },
1360
- {
1361
- "epoch": 4.93,
1362
- "learning_rate": 5e-05,
1363
- "loss": 0.0502,
1364
- "step": 1125
1365
- },
1366
- {
1367
- "epoch": 4.95,
1368
- "learning_rate": 5e-05,
1369
- "loss": 0.048,
1370
- "step": 1130
1371
- },
1372
- {
1373
- "epoch": 4.97,
1374
- "learning_rate": 5e-05,
1375
- "loss": 0.0497,
1376
- "step": 1135
1377
- },
1378
- {
1379
- "epoch": 5.0,
1380
- "learning_rate": 5e-05,
1381
- "loss": 0.0484,
1382
- "step": 1140
1383
- }
1384
- ],
1385
- "max_steps": 1140,
1386
- "num_train_epochs": 5,
1387
- "total_flos": 8.693964764402942e+18,
1388
- "trial_name": null,
1389
- "trial_params": null
1390
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:35e1a0fcbf25720ed542bd10424c03d13a02ac8f5eff85f51e6389fe1f87d97f
3
- size 4603
 
 
 
 
last-checkpoint/vocab.json DELETED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/zero_to_fp32.py DELETED
@@ -1,482 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
4
- # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
5
- # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
6
- # application.
7
- #
8
- # example: python zero_to_fp32.py . pytorch_model.bin
9
-
10
- import argparse
11
- import torch
12
- import glob
13
- import math
14
- import os
15
- import re
16
- from collections import OrderedDict
17
-
18
- # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
19
- # DeepSpeed data structures it has to be available in the current python environment.
20
- from deepspeed.utils import logger
21
- from deepspeed.checkpoint.constants import (DS_VERSION,
22
- OPTIMIZER_STATE_DICT,
23
- SINGLE_PARTITION_OF_FP32_GROUPS,
24
- FP32_FLAT_GROUPS,
25
- ZERO_STAGE,
26
- PARTITION_COUNT,
27
- PARAM_SHAPES,
28
- BUFFER_NAMES)
29
-
30
- debug = 0
31
-
32
- # load to cpu
33
- device = torch.device('cpu')
34
-
35
-
36
- def atoi(text):
37
- return int(text) if text.isdigit() else text
38
-
39
-
40
- def natural_keys(text):
41
- '''
42
- alist.sort(key=natural_keys) sorts in human order
43
- http://nedbatchelder.com/blog/200712/human_sorting.html
44
- (See Toothy's implementation in the comments)
45
- '''
46
- return [atoi(c) for c in re.split(r'(\d+)', text)]
47
-
48
-
49
- def get_model_state_file(checkpoint_dir, zero_stage):
50
- if not os.path.isdir(checkpoint_dir):
51
- raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
52
-
53
- # there should be only one file
54
- if zero_stage == 2:
55
- file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
56
- elif zero_stage == 3:
57
- file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
58
-
59
- if not os.path.exists(file):
60
- raise FileNotFoundError(f"can't find model states file at '{file}'")
61
-
62
- return file
63
-
64
-
65
- def get_optim_files(checkpoint_dir):
66
- # XXX: need to test that this simple glob rule works for multi-node setup too
67
- optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
68
- "*_optim_states.pt")),
69
- key=natural_keys)
70
-
71
- if len(optim_files) == 0:
72
- raise FileNotFoundError(
73
- f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
74
-
75
- return optim_files
76
-
77
-
78
- def parse_model_state(file):
79
- state_dict = torch.load(file, map_location=device)
80
-
81
- if BUFFER_NAMES not in state_dict:
82
- raise ValueError(f"{file} is not a model state checkpoint")
83
- buffer_names = state_dict[BUFFER_NAMES]
84
- if debug:
85
- print("Found buffers:", buffer_names)
86
-
87
- # recover just the buffers while restoring them to fp32 if they were saved in fp16
88
- buffers = {
89
- k: v.float()
90
- for k,
91
- v in state_dict["module"].items() if k in buffer_names
92
- }
93
- param_shapes = state_dict[PARAM_SHAPES]
94
-
95
- ds_version = state_dict.get(DS_VERSION, None)
96
-
97
- return buffers, param_shapes, ds_version
98
-
99
-
100
- def parse_optim_states(files, ds_checkpoint_dir):
101
-
102
- total_files = len(files)
103
- state_dicts = []
104
- for f in files:
105
- state_dicts.append(torch.load(f, map_location=device))
106
-
107
- if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
108
- raise ValueError(f"{files[0]} is not a zero checkpoint")
109
- zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
110
- world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
111
-
112
- # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
113
- # parameters can be different from data parallelism for non-expert parameters. So we can just
114
- # use the max of the partition_count to get the dp world_size.
115
-
116
- if type(world_size) is list:
117
- world_size = max(world_size)
118
-
119
- if world_size != total_files:
120
- raise ValueError(
121
- f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
122
- "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
123
- )
124
-
125
- # the groups are named differently in each stage
126
- if zero_stage == 2:
127
- fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
128
- elif zero_stage == 3:
129
- fp32_groups_key = FP32_FLAT_GROUPS
130
- else:
131
- raise ValueError(f"unknown zero stage {zero_stage}")
132
-
133
- if zero_stage == 2:
134
- fp32_flat_groups = [
135
- state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
136
- for i in range(len(state_dicts))
137
- ]
138
- elif zero_stage == 3:
139
- # if there is more than one param group, there will be multiple flattened tensors - one
140
- # flattened tensor per group - for simplicity merge them into a single tensor
141
- #
142
- # XXX: could make the script more memory efficient for when there are multiple groups - it
143
- # will require matching the sub-lists of param_shapes for each param group flattened tensor
144
-
145
- fp32_flat_groups = [
146
- torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
147
- 0) for i in range(len(state_dicts))
148
- ]
149
-
150
- return zero_stage, world_size, fp32_flat_groups
151
-
152
-
153
- def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
154
- """
155
- Returns fp32 state_dict reconstructed from ds checkpoint
156
-
157
- Args:
158
- - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
159
-
160
- """
161
- print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
162
-
163
- optim_files = get_optim_files(ds_checkpoint_dir)
164
- zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
165
- print(
166
- f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
167
-
168
- model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
169
- buffers, param_shapes, ds_version = parse_model_state(model_file)
170
- print(f'Parsing checkpoint created by deepspeed=={ds_version}')
171
-
172
- if zero_stage == 2:
173
- return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
174
- param_shapes,
175
- fp32_flat_groups,
176
- buffers)
177
- elif zero_stage == 3:
178
- return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
179
- param_shapes,
180
- fp32_flat_groups,
181
- buffers)
182
-
183
-
184
- def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
185
- param_shapes,
186
- fp32_flat_groups,
187
- buffers):
188
-
189
- # Reconstruction protocol:
190
- #
191
- # XXX: document this
192
-
193
- if debug:
194
- for i in range(world_size):
195
- for j in range(len(fp32_flat_groups[0])):
196
- print(
197
- f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
198
-
199
- # XXX: memory usage doubles here (zero2)
200
- num_param_groups = len(fp32_flat_groups[0])
201
- merged_single_partition_of_fp32_groups = []
202
- for i in range(num_param_groups):
203
- merged_partitions = [sd[i] for sd in fp32_flat_groups]
204
- full_single_fp32_vector = torch.cat(merged_partitions, 0)
205
- merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
206
- avail_numel = sum([
207
- full_single_fp32_vector.numel()
208
- for full_single_fp32_vector in merged_single_partition_of_fp32_groups
209
- ])
210
-
211
- if debug:
212
- wanted_params = sum([len(shapes) for shapes in param_shapes])
213
- wanted_numel = sum(
214
- [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
215
- # not asserting if there is a mismatch due to possible padding
216
- print(f"Have {avail_numel} numels to process.")
217
- print(f"Need {wanted_numel} numels in {wanted_params} params.")
218
-
219
- state_dict = OrderedDict()
220
-
221
- # buffers
222
- state_dict.update(buffers)
223
- if debug:
224
- print(f"added {len(buffers)} buffers")
225
-
226
- # params
227
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
228
- # out-of-core computing solution
229
- total_numel = 0
230
- total_params = 0
231
- for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
232
- offset = 0
233
- avail_numel = full_single_fp32_vector.numel()
234
- for name, shape in shapes.items():
235
-
236
- unpartitioned_numel = shape.numel()
237
- total_numel += unpartitioned_numel
238
- total_params += 1
239
-
240
- if debug:
241
- print(
242
- f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
243
- )
244
- state_dict[name] = full_single_fp32_vector.narrow(
245
- 0,
246
- offset,
247
- unpartitioned_numel).view(shape)
248
- offset += unpartitioned_numel
249
-
250
- # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
251
- # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
252
- # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
253
- # live optimizer object, so we are checking that the numbers are within the right range
254
- align_to = 2 * world_size
255
-
256
- def zero2_align(x):
257
- return align_to * math.ceil(x / align_to)
258
-
259
- if debug:
260
- print(f"original offset={offset}, avail_numel={avail_numel}")
261
-
262
- offset = zero2_align(offset)
263
- avail_numel = zero2_align(avail_numel)
264
-
265
- if debug:
266
- print(f"aligned offset={offset}, avail_numel={avail_numel}")
267
-
268
- # Sanity check
269
- if offset != avail_numel:
270
- raise ValueError(
271
- f"consumed {offset} numels out of {avail_numel} - something is wrong")
272
-
273
- print(
274
- f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
275
- )
276
-
277
- return state_dict
278
-
279
-
280
- def zero3_partitioned_param_info(unpartitioned_numel, world_size):
281
- remainder = unpartitioned_numel % world_size
282
- padding_numel = (world_size - remainder) if remainder else 0
283
- partitioned_numel = math.ceil(unpartitioned_numel / world_size)
284
- return partitioned_numel, padding_numel
285
-
286
-
287
- def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
288
- param_shapes,
289
- fp32_flat_groups,
290
- buffers):
291
-
292
- # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
293
- # param, re-consolidating each param, while dealing with padding if any
294
-
295
- avail_numel = fp32_flat_groups[0].numel() * world_size
296
- # merge list of dicts, preserving order
297
- param_shapes = {k: v for d in param_shapes for k, v in d.items()}
298
-
299
- if debug:
300
- for i in range(world_size):
301
- print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
302
-
303
- wanted_params = len(param_shapes)
304
- wanted_numel = sum(shape.numel() for shape in param_shapes.values())
305
- # not asserting if there is a mismatch due to possible padding
306
- print(f"Have {avail_numel} numels to process.")
307
- print(f"Need {wanted_numel} numels in {wanted_params} params.")
308
-
309
- state_dict = OrderedDict()
310
-
311
- # buffers
312
- state_dict.update(buffers)
313
- if debug:
314
- print(f"added {len(buffers)} buffers")
315
-
316
- # params
317
- # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
318
- # out-of-core computing solution
319
- offset = 0
320
- total_numel = 0
321
- total_params = 0
322
- for name, shape in param_shapes.items():
323
-
324
- unpartitioned_numel = shape.numel()
325
- total_numel += unpartitioned_numel
326
- total_params += 1
327
-
328
- partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
329
-
330
- if debug:
331
- print(
332
- f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
333
- )
334
-
335
- # XXX: memory usage doubles here
336
- state_dict[name] = torch.cat(
337
- tuple(fp32_flat_groups[i].narrow(0,
338
- offset,
339
- partitioned_numel)
340
- for i in range(world_size)),
341
- 0).narrow(0,
342
- 0,
343
- unpartitioned_numel).view(shape)
344
- offset += partitioned_numel
345
-
346
- offset *= world_size
347
-
348
- # Sanity check
349
- if offset != avail_numel:
350
- raise ValueError(
351
- f"consumed {offset} numels out of {avail_numel} - something is wrong")
352
-
353
- print(
354
- f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
355
- )
356
-
357
- return state_dict
358
-
359
-
360
- def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
361
- """
362
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
363
- ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
364
- via a model hub.
365
-
366
- Args:
367
- - ``checkpoint_dir``: path to the desired checkpoint folder
368
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
369
-
370
- Returns:
371
- - pytorch ``state_dict``
372
-
373
- Note: this approach may not work if your application doesn't have sufficient free CPU memory and
374
- you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
375
- the checkpoint.
376
-
377
- A typical usage might be ::
378
-
379
- from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
380
- # do the training and checkpoint saving
381
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
382
- model = model.cpu() # move to cpu
383
- model.load_state_dict(state_dict)
384
- # submit to model hub or save the model to share with others
385
-
386
- In this example the ``model`` will no longer be usable in the deepspeed context of the same
387
- application. i.e. you will need to re-initialize the deepspeed engine, since
388
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
389
-
390
- If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
391
-
392
- """
393
- if tag is None:
394
- latest_path = os.path.join(checkpoint_dir, 'latest')
395
- if os.path.isfile(latest_path):
396
- with open(latest_path, 'r') as fd:
397
- tag = fd.read().strip()
398
- else:
399
- raise ValueError(f"Unable to find 'latest' file at {latest_path}")
400
-
401
- ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
402
-
403
- if not os.path.isdir(ds_checkpoint_dir):
404
- raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
405
-
406
- return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
407
-
408
-
409
- def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
410
- """
411
- Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
412
- loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
413
-
414
- Args:
415
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
416
- - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
417
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
418
- """
419
-
420
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
421
- print(f"Saving fp32 state dict to {output_file}")
422
- torch.save(state_dict, output_file)
423
-
424
-
425
- def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
426
- """
427
- 1. Put the provided model to cpu
428
- 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
429
- 3. Load it into the provided model
430
-
431
- Args:
432
- - ``model``: the model object to update
433
- - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
434
- - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
435
-
436
- Returns:
437
- - ``model`: modified model
438
-
439
- Make sure you have plenty of CPU memory available before you call this function. If you don't
440
- have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
441
- conveniently placed for you in the checkpoint folder.
442
-
443
- A typical usage might be ::
444
-
445
- from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
446
- model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
447
- # submit to model hub or save the model to share with others
448
-
449
- Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
450
- of the same application. i.e. you will need to re-initialize the deepspeed engine, since
451
- ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
452
-
453
- """
454
- logger.info(f"Extracting fp32 weights")
455
- state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
456
-
457
- logger.info(f"Overwriting model with fp32 weights")
458
- model = model.cpu()
459
- model.load_state_dict(state_dict, strict=False)
460
-
461
- return model
462
-
463
-
464
- if __name__ == "__main__":
465
-
466
- parser = argparse.ArgumentParser()
467
- parser.add_argument(
468
- "checkpoint_dir",
469
- type=str,
470
- help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
471
- parser.add_argument(
472
- "output_file",
473
- type=str,
474
- help=
475
- "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
476
- )
477
- parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
478
- args = parser.parse_args()
479
-
480
- debug = args.debug
481
-
482
- convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)