Kristijan commited on
Commit
41e0c10
1 Parent(s): 88f240e

Upload 8 files

Browse files

adding the whole checkpoint folder (with optimizer, model, config etc) for the gpt2 small-like 12 layer model

Files changed (8) hide show
  1. config.json +30 -0
  2. optimizer.pt +3 -0
  3. pytorch_model.bin +3 -0
  4. rng_state.pth +3 -0
  5. scaler.pt +3 -0
  6. scheduler.pt +3 -0
  7. trainer_state.json +822 -0
  8. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 0,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 0,
10
+ "gradient_checkpointing": false,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "resid_pdrop": 0.1,
21
+ "scale_attn_weights": true,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "transformers_version": "4.6.1",
28
+ "use_cache": true,
29
+ "vocab_size": 28439
30
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5fc39bca73ec237baf3ac9a798f6440424e2563b4119332e70464a05e0b04fe
3
+ size 861561495
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275fb18cbd8fe5dbb8408286abf41616807dc35d954fda3770642866f22d50cb
3
+ size 443383419
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb6ce2e55d10782731f1ba5fa76fd3a0aaad0a59a0fa74b67e588a896b35eac1
3
+ size 14657
scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:923ead5faf30bd8c7efdc44ed28f0b833c8fceb006bd42e6c034f8faf6fd4ec1
3
+ size 559
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9dc201b0daadb61ba308d04c165a144a35adf1b3f4de97f9d3ad94c0e36d533
3
+ size 559
trainer_state.json ADDED
@@ -0,0 +1,822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 3.699657678604126,
3
+ "best_model_checkpoint": "/scratch/ka2773/project/lm-mem/checkpoints/gpt2_40m_12-768-1024_a_02/checkpoint-31000",
4
+ "epoch": 8.481532147742818,
5
+ "global_step": 31000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.14,
12
+ "learning_rate": 6e-05,
13
+ "loss": 6.6803,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.14,
18
+ "eval_loss": 6.017919540405273,
19
+ "eval_runtime": 9.4636,
20
+ "eval_samples_per_second": 24.938,
21
+ "step": 500
22
+ },
23
+ {
24
+ "epoch": 0.27,
25
+ "learning_rate": 6e-05,
26
+ "loss": 6.0117,
27
+ "step": 1000
28
+ },
29
+ {
30
+ "epoch": 0.27,
31
+ "eval_loss": 5.752170085906982,
32
+ "eval_runtime": 9.4619,
33
+ "eval_samples_per_second": 24.942,
34
+ "step": 1000
35
+ },
36
+ {
37
+ "epoch": 0.41,
38
+ "learning_rate": 6e-05,
39
+ "loss": 5.8304,
40
+ "step": 1500
41
+ },
42
+ {
43
+ "epoch": 0.41,
44
+ "eval_loss": 5.598978519439697,
45
+ "eval_runtime": 9.4601,
46
+ "eval_samples_per_second": 24.947,
47
+ "step": 1500
48
+ },
49
+ {
50
+ "epoch": 0.55,
51
+ "learning_rate": 6e-05,
52
+ "loss": 5.6932,
53
+ "step": 2000
54
+ },
55
+ {
56
+ "epoch": 0.55,
57
+ "eval_loss": 5.4606218338012695,
58
+ "eval_runtime": 9.4514,
59
+ "eval_samples_per_second": 24.97,
60
+ "step": 2000
61
+ },
62
+ {
63
+ "epoch": 0.68,
64
+ "learning_rate": 6e-05,
65
+ "loss": 5.5582,
66
+ "step": 2500
67
+ },
68
+ {
69
+ "epoch": 0.68,
70
+ "eval_loss": 5.324854373931885,
71
+ "eval_runtime": 9.4555,
72
+ "eval_samples_per_second": 24.959,
73
+ "step": 2500
74
+ },
75
+ {
76
+ "epoch": 0.82,
77
+ "learning_rate": 6e-05,
78
+ "loss": 5.4194,
79
+ "step": 3000
80
+ },
81
+ {
82
+ "epoch": 0.82,
83
+ "eval_loss": 5.187784194946289,
84
+ "eval_runtime": 9.4406,
85
+ "eval_samples_per_second": 24.998,
86
+ "step": 3000
87
+ },
88
+ {
89
+ "epoch": 0.96,
90
+ "learning_rate": 6e-05,
91
+ "loss": 5.2874,
92
+ "step": 3500
93
+ },
94
+ {
95
+ "epoch": 0.96,
96
+ "eval_loss": 5.043916702270508,
97
+ "eval_runtime": 9.4594,
98
+ "eval_samples_per_second": 24.949,
99
+ "step": 3500
100
+ },
101
+ {
102
+ "epoch": 1.09,
103
+ "learning_rate": 6e-05,
104
+ "loss": 5.1502,
105
+ "step": 4000
106
+ },
107
+ {
108
+ "epoch": 1.09,
109
+ "eval_loss": 4.9315690994262695,
110
+ "eval_runtime": 9.458,
111
+ "eval_samples_per_second": 24.952,
112
+ "step": 4000
113
+ },
114
+ {
115
+ "epoch": 1.23,
116
+ "learning_rate": 6e-05,
117
+ "loss": 5.0351,
118
+ "step": 4500
119
+ },
120
+ {
121
+ "epoch": 1.23,
122
+ "eval_loss": 4.828794479370117,
123
+ "eval_runtime": 9.4563,
124
+ "eval_samples_per_second": 24.957,
125
+ "step": 4500
126
+ },
127
+ {
128
+ "epoch": 1.37,
129
+ "learning_rate": 6e-05,
130
+ "loss": 4.9495,
131
+ "step": 5000
132
+ },
133
+ {
134
+ "epoch": 1.37,
135
+ "eval_loss": 4.735019683837891,
136
+ "eval_runtime": 9.4648,
137
+ "eval_samples_per_second": 24.934,
138
+ "step": 5000
139
+ },
140
+ {
141
+ "epoch": 1.5,
142
+ "learning_rate": 6e-05,
143
+ "loss": 4.8715,
144
+ "step": 5500
145
+ },
146
+ {
147
+ "epoch": 1.5,
148
+ "eval_loss": 4.6624579429626465,
149
+ "eval_runtime": 9.4463,
150
+ "eval_samples_per_second": 24.983,
151
+ "step": 5500
152
+ },
153
+ {
154
+ "epoch": 1.64,
155
+ "learning_rate": 6e-05,
156
+ "loss": 4.7962,
157
+ "step": 6000
158
+ },
159
+ {
160
+ "epoch": 1.64,
161
+ "eval_loss": 4.586391925811768,
162
+ "eval_runtime": 9.4563,
163
+ "eval_samples_per_second": 24.957,
164
+ "step": 6000
165
+ },
166
+ {
167
+ "epoch": 1.78,
168
+ "learning_rate": 6e-05,
169
+ "loss": 4.7329,
170
+ "step": 6500
171
+ },
172
+ {
173
+ "epoch": 1.78,
174
+ "eval_loss": 4.520113468170166,
175
+ "eval_runtime": 9.4613,
176
+ "eval_samples_per_second": 24.944,
177
+ "step": 6500
178
+ },
179
+ {
180
+ "epoch": 1.92,
181
+ "learning_rate": 6e-05,
182
+ "loss": 4.6692,
183
+ "step": 7000
184
+ },
185
+ {
186
+ "epoch": 1.92,
187
+ "eval_loss": 4.469329357147217,
188
+ "eval_runtime": 9.4531,
189
+ "eval_samples_per_second": 24.965,
190
+ "step": 7000
191
+ },
192
+ {
193
+ "epoch": 2.05,
194
+ "learning_rate": 6e-05,
195
+ "loss": 4.603,
196
+ "step": 7500
197
+ },
198
+ {
199
+ "epoch": 2.05,
200
+ "eval_loss": 4.408419132232666,
201
+ "eval_runtime": 9.4573,
202
+ "eval_samples_per_second": 24.954,
203
+ "step": 7500
204
+ },
205
+ {
206
+ "epoch": 2.19,
207
+ "learning_rate": 6e-05,
208
+ "loss": 4.5274,
209
+ "step": 8000
210
+ },
211
+ {
212
+ "epoch": 2.19,
213
+ "eval_loss": 4.358486175537109,
214
+ "eval_runtime": 9.4556,
215
+ "eval_samples_per_second": 24.959,
216
+ "step": 8000
217
+ },
218
+ {
219
+ "epoch": 2.33,
220
+ "learning_rate": 6e-05,
221
+ "loss": 4.482,
222
+ "step": 8500
223
+ },
224
+ {
225
+ "epoch": 2.33,
226
+ "eval_loss": 4.316709041595459,
227
+ "eval_runtime": 9.4572,
228
+ "eval_samples_per_second": 24.954,
229
+ "step": 8500
230
+ },
231
+ {
232
+ "epoch": 2.46,
233
+ "learning_rate": 6e-05,
234
+ "loss": 4.4431,
235
+ "step": 9000
236
+ },
237
+ {
238
+ "epoch": 2.46,
239
+ "eval_loss": 4.261863708496094,
240
+ "eval_runtime": 9.4644,
241
+ "eval_samples_per_second": 24.935,
242
+ "step": 9000
243
+ },
244
+ {
245
+ "epoch": 2.6,
246
+ "learning_rate": 6e-05,
247
+ "loss": 4.4003,
248
+ "step": 9500
249
+ },
250
+ {
251
+ "epoch": 2.6,
252
+ "eval_loss": 4.2196125984191895,
253
+ "eval_runtime": 9.4647,
254
+ "eval_samples_per_second": 24.935,
255
+ "step": 9500
256
+ },
257
+ {
258
+ "epoch": 2.74,
259
+ "learning_rate": 6e-05,
260
+ "loss": 4.3602,
261
+ "step": 10000
262
+ },
263
+ {
264
+ "epoch": 2.74,
265
+ "eval_loss": 4.1680402755737305,
266
+ "eval_runtime": 9.4399,
267
+ "eval_samples_per_second": 25.0,
268
+ "step": 10000
269
+ },
270
+ {
271
+ "epoch": 2.87,
272
+ "learning_rate": 6e-05,
273
+ "loss": 4.3072,
274
+ "step": 10500
275
+ },
276
+ {
277
+ "epoch": 2.87,
278
+ "eval_loss": 4.134276390075684,
279
+ "eval_runtime": 9.4305,
280
+ "eval_samples_per_second": 25.025,
281
+ "step": 10500
282
+ },
283
+ {
284
+ "epoch": 3.01,
285
+ "learning_rate": 6e-05,
286
+ "loss": 4.2849,
287
+ "step": 11000
288
+ },
289
+ {
290
+ "epoch": 3.01,
291
+ "eval_loss": 4.093378067016602,
292
+ "eval_runtime": 9.4676,
293
+ "eval_samples_per_second": 24.927,
294
+ "step": 11000
295
+ },
296
+ {
297
+ "epoch": 3.15,
298
+ "learning_rate": 6e-05,
299
+ "loss": 4.2092,
300
+ "step": 11500
301
+ },
302
+ {
303
+ "epoch": 3.15,
304
+ "eval_loss": 4.067226886749268,
305
+ "eval_runtime": 9.4548,
306
+ "eval_samples_per_second": 24.961,
307
+ "step": 11500
308
+ },
309
+ {
310
+ "epoch": 3.28,
311
+ "learning_rate": 6e-05,
312
+ "loss": 4.1923,
313
+ "step": 12000
314
+ },
315
+ {
316
+ "epoch": 3.28,
317
+ "eval_loss": 4.044227600097656,
318
+ "eval_runtime": 9.4733,
319
+ "eval_samples_per_second": 24.912,
320
+ "step": 12000
321
+ },
322
+ {
323
+ "epoch": 3.42,
324
+ "learning_rate": 6e-05,
325
+ "loss": 4.1774,
326
+ "step": 12500
327
+ },
328
+ {
329
+ "epoch": 3.42,
330
+ "eval_loss": 4.027223587036133,
331
+ "eval_runtime": 9.4672,
332
+ "eval_samples_per_second": 24.928,
333
+ "step": 12500
334
+ },
335
+ {
336
+ "epoch": 3.56,
337
+ "learning_rate": 6e-05,
338
+ "loss": 4.1547,
339
+ "step": 13000
340
+ },
341
+ {
342
+ "epoch": 3.56,
343
+ "eval_loss": 4.007537841796875,
344
+ "eval_runtime": 9.4577,
345
+ "eval_samples_per_second": 24.953,
346
+ "step": 13000
347
+ },
348
+ {
349
+ "epoch": 3.69,
350
+ "learning_rate": 6e-05,
351
+ "loss": 4.141,
352
+ "step": 13500
353
+ },
354
+ {
355
+ "epoch": 3.69,
356
+ "eval_loss": 3.9847309589385986,
357
+ "eval_runtime": 9.464,
358
+ "eval_samples_per_second": 24.937,
359
+ "step": 13500
360
+ },
361
+ {
362
+ "epoch": 3.83,
363
+ "learning_rate": 6e-05,
364
+ "loss": 4.1375,
365
+ "step": 14000
366
+ },
367
+ {
368
+ "epoch": 3.83,
369
+ "eval_loss": 3.9671761989593506,
370
+ "eval_runtime": 9.4732,
371
+ "eval_samples_per_second": 24.912,
372
+ "step": 14000
373
+ },
374
+ {
375
+ "epoch": 3.97,
376
+ "learning_rate": 6e-05,
377
+ "loss": 4.1064,
378
+ "step": 14500
379
+ },
380
+ {
381
+ "epoch": 3.97,
382
+ "eval_loss": 3.9399986267089844,
383
+ "eval_runtime": 9.4647,
384
+ "eval_samples_per_second": 24.935,
385
+ "step": 14500
386
+ },
387
+ {
388
+ "epoch": 4.1,
389
+ "learning_rate": 6e-05,
390
+ "loss": 4.0485,
391
+ "step": 15000
392
+ },
393
+ {
394
+ "epoch": 4.1,
395
+ "eval_loss": 3.932234525680542,
396
+ "eval_runtime": 9.4672,
397
+ "eval_samples_per_second": 24.928,
398
+ "step": 15000
399
+ },
400
+ {
401
+ "epoch": 4.24,
402
+ "learning_rate": 6e-05,
403
+ "loss": 4.0335,
404
+ "step": 15500
405
+ },
406
+ {
407
+ "epoch": 4.24,
408
+ "eval_loss": 3.9215309619903564,
409
+ "eval_runtime": 9.4597,
410
+ "eval_samples_per_second": 24.948,
411
+ "step": 15500
412
+ },
413
+ {
414
+ "epoch": 4.38,
415
+ "learning_rate": 6e-05,
416
+ "loss": 4.0358,
417
+ "step": 16000
418
+ },
419
+ {
420
+ "epoch": 4.38,
421
+ "eval_loss": 3.898069381713867,
422
+ "eval_runtime": 9.4635,
423
+ "eval_samples_per_second": 24.938,
424
+ "step": 16000
425
+ },
426
+ {
427
+ "epoch": 4.51,
428
+ "learning_rate": 6e-05,
429
+ "loss": 4.0187,
430
+ "step": 16500
431
+ },
432
+ {
433
+ "epoch": 4.51,
434
+ "eval_loss": 3.893036127090454,
435
+ "eval_runtime": 9.4622,
436
+ "eval_samples_per_second": 24.941,
437
+ "step": 16500
438
+ },
439
+ {
440
+ "epoch": 4.65,
441
+ "learning_rate": 6e-05,
442
+ "loss": 4.0188,
443
+ "step": 17000
444
+ },
445
+ {
446
+ "epoch": 4.65,
447
+ "eval_loss": 3.8782527446746826,
448
+ "eval_runtime": 9.459,
449
+ "eval_samples_per_second": 24.95,
450
+ "step": 17000
451
+ },
452
+ {
453
+ "epoch": 4.79,
454
+ "learning_rate": 6e-05,
455
+ "loss": 4.0176,
456
+ "step": 17500
457
+ },
458
+ {
459
+ "epoch": 4.79,
460
+ "eval_loss": 3.8655033111572266,
461
+ "eval_runtime": 9.4578,
462
+ "eval_samples_per_second": 24.953,
463
+ "step": 17500
464
+ },
465
+ {
466
+ "epoch": 4.92,
467
+ "learning_rate": 6e-05,
468
+ "loss": 4.0025,
469
+ "step": 18000
470
+ },
471
+ {
472
+ "epoch": 4.92,
473
+ "eval_loss": 3.8568382263183594,
474
+ "eval_runtime": 9.4581,
475
+ "eval_samples_per_second": 24.952,
476
+ "step": 18000
477
+ },
478
+ {
479
+ "epoch": 5.06,
480
+ "learning_rate": 6e-05,
481
+ "loss": 3.9554,
482
+ "step": 18500
483
+ },
484
+ {
485
+ "epoch": 5.06,
486
+ "eval_loss": 3.849705696105957,
487
+ "eval_runtime": 9.4471,
488
+ "eval_samples_per_second": 24.981,
489
+ "step": 18500
490
+ },
491
+ {
492
+ "epoch": 5.2,
493
+ "learning_rate": 6e-05,
494
+ "loss": 3.9252,
495
+ "step": 19000
496
+ },
497
+ {
498
+ "epoch": 5.2,
499
+ "eval_loss": 3.838930130004883,
500
+ "eval_runtime": 9.4605,
501
+ "eval_samples_per_second": 24.946,
502
+ "step": 19000
503
+ },
504
+ {
505
+ "epoch": 5.34,
506
+ "learning_rate": 6e-05,
507
+ "loss": 3.9239,
508
+ "step": 19500
509
+ },
510
+ {
511
+ "epoch": 5.34,
512
+ "eval_loss": 3.8333945274353027,
513
+ "eval_runtime": 9.462,
514
+ "eval_samples_per_second": 24.942,
515
+ "step": 19500
516
+ },
517
+ {
518
+ "epoch": 5.47,
519
+ "learning_rate": 6e-05,
520
+ "loss": 3.9354,
521
+ "step": 20000
522
+ },
523
+ {
524
+ "epoch": 5.47,
525
+ "eval_loss": 3.8130805492401123,
526
+ "eval_runtime": 9.4596,
527
+ "eval_samples_per_second": 24.948,
528
+ "step": 20000
529
+ },
530
+ {
531
+ "epoch": 5.61,
532
+ "learning_rate": 6e-05,
533
+ "loss": 3.9418,
534
+ "step": 20500
535
+ },
536
+ {
537
+ "epoch": 5.61,
538
+ "eval_loss": 3.8175787925720215,
539
+ "eval_runtime": 9.4602,
540
+ "eval_samples_per_second": 24.947,
541
+ "step": 20500
542
+ },
543
+ {
544
+ "epoch": 5.75,
545
+ "learning_rate": 6e-05,
546
+ "loss": 3.9291,
547
+ "step": 21000
548
+ },
549
+ {
550
+ "epoch": 5.75,
551
+ "eval_loss": 3.807772636413574,
552
+ "eval_runtime": 9.459,
553
+ "eval_samples_per_second": 24.95,
554
+ "step": 21000
555
+ },
556
+ {
557
+ "epoch": 5.88,
558
+ "learning_rate": 6e-05,
559
+ "loss": 3.9309,
560
+ "step": 21500
561
+ },
562
+ {
563
+ "epoch": 5.88,
564
+ "eval_loss": 3.7906742095947266,
565
+ "eval_runtime": 9.4675,
566
+ "eval_samples_per_second": 24.927,
567
+ "step": 21500
568
+ },
569
+ {
570
+ "epoch": 6.02,
571
+ "learning_rate": 6e-05,
572
+ "loss": 3.9105,
573
+ "step": 22000
574
+ },
575
+ {
576
+ "epoch": 6.02,
577
+ "eval_loss": 3.7875397205352783,
578
+ "eval_runtime": 9.4606,
579
+ "eval_samples_per_second": 24.946,
580
+ "step": 22000
581
+ },
582
+ {
583
+ "epoch": 6.16,
584
+ "learning_rate": 6e-05,
585
+ "loss": 3.8424,
586
+ "step": 22500
587
+ },
588
+ {
589
+ "epoch": 6.16,
590
+ "eval_loss": 3.782013416290283,
591
+ "eval_runtime": 9.4639,
592
+ "eval_samples_per_second": 24.937,
593
+ "step": 22500
594
+ },
595
+ {
596
+ "epoch": 6.29,
597
+ "learning_rate": 6e-05,
598
+ "loss": 3.8579,
599
+ "step": 23000
600
+ },
601
+ {
602
+ "epoch": 6.29,
603
+ "eval_loss": 3.781845808029175,
604
+ "eval_runtime": 9.4621,
605
+ "eval_samples_per_second": 24.941,
606
+ "step": 23000
607
+ },
608
+ {
609
+ "epoch": 6.43,
610
+ "learning_rate": 6e-05,
611
+ "loss": 3.868,
612
+ "step": 23500
613
+ },
614
+ {
615
+ "epoch": 6.43,
616
+ "eval_loss": 3.76729679107666,
617
+ "eval_runtime": 9.4657,
618
+ "eval_samples_per_second": 24.932,
619
+ "step": 23500
620
+ },
621
+ {
622
+ "epoch": 6.57,
623
+ "learning_rate": 6e-05,
624
+ "loss": 3.8631,
625
+ "step": 24000
626
+ },
627
+ {
628
+ "epoch": 6.57,
629
+ "eval_loss": 3.7602250576019287,
630
+ "eval_runtime": 9.4649,
631
+ "eval_samples_per_second": 24.934,
632
+ "step": 24000
633
+ },
634
+ {
635
+ "epoch": 6.7,
636
+ "learning_rate": 6e-05,
637
+ "loss": 3.8635,
638
+ "step": 24500
639
+ },
640
+ {
641
+ "epoch": 6.7,
642
+ "eval_loss": 3.7623238563537598,
643
+ "eval_runtime": 9.456,
644
+ "eval_samples_per_second": 24.958,
645
+ "step": 24500
646
+ },
647
+ {
648
+ "epoch": 6.84,
649
+ "learning_rate": 6e-05,
650
+ "loss": 3.8632,
651
+ "step": 25000
652
+ },
653
+ {
654
+ "epoch": 6.84,
655
+ "eval_loss": 3.7607743740081787,
656
+ "eval_runtime": 9.4601,
657
+ "eval_samples_per_second": 24.947,
658
+ "step": 25000
659
+ },
660
+ {
661
+ "epoch": 6.98,
662
+ "learning_rate": 6e-05,
663
+ "loss": 3.873,
664
+ "step": 25500
665
+ },
666
+ {
667
+ "epoch": 6.98,
668
+ "eval_loss": 3.749258279800415,
669
+ "eval_runtime": 9.4598,
670
+ "eval_samples_per_second": 24.948,
671
+ "step": 25500
672
+ },
673
+ {
674
+ "epoch": 7.11,
675
+ "learning_rate": 6e-05,
676
+ "loss": 3.7911,
677
+ "step": 26000
678
+ },
679
+ {
680
+ "epoch": 7.11,
681
+ "eval_loss": 3.744161367416382,
682
+ "eval_runtime": 9.4582,
683
+ "eval_samples_per_second": 24.952,
684
+ "step": 26000
685
+ },
686
+ {
687
+ "epoch": 7.25,
688
+ "learning_rate": 6e-05,
689
+ "loss": 3.8039,
690
+ "step": 26500
691
+ },
692
+ {
693
+ "epoch": 7.25,
694
+ "eval_loss": 3.739082098007202,
695
+ "eval_runtime": 9.4581,
696
+ "eval_samples_per_second": 24.952,
697
+ "step": 26500
698
+ },
699
+ {
700
+ "epoch": 7.39,
701
+ "learning_rate": 6e-05,
702
+ "loss": 3.7996,
703
+ "step": 27000
704
+ },
705
+ {
706
+ "epoch": 7.39,
707
+ "eval_loss": 3.738431692123413,
708
+ "eval_runtime": 9.4714,
709
+ "eval_samples_per_second": 24.917,
710
+ "step": 27000
711
+ },
712
+ {
713
+ "epoch": 7.52,
714
+ "learning_rate": 6e-05,
715
+ "loss": 3.8157,
716
+ "step": 27500
717
+ },
718
+ {
719
+ "epoch": 7.52,
720
+ "eval_loss": 3.7242279052734375,
721
+ "eval_runtime": 9.4626,
722
+ "eval_samples_per_second": 24.94,
723
+ "step": 27500
724
+ },
725
+ {
726
+ "epoch": 7.66,
727
+ "learning_rate": 6e-05,
728
+ "loss": 3.8224,
729
+ "step": 28000
730
+ },
731
+ {
732
+ "epoch": 7.66,
733
+ "eval_loss": 3.7267138957977295,
734
+ "eval_runtime": 9.4527,
735
+ "eval_samples_per_second": 24.966,
736
+ "step": 28000
737
+ },
738
+ {
739
+ "epoch": 7.8,
740
+ "learning_rate": 6e-05,
741
+ "loss": 3.8246,
742
+ "step": 28500
743
+ },
744
+ {
745
+ "epoch": 7.8,
746
+ "eval_loss": 3.711819887161255,
747
+ "eval_runtime": 9.4622,
748
+ "eval_samples_per_second": 24.941,
749
+ "step": 28500
750
+ },
751
+ {
752
+ "epoch": 7.93,
753
+ "learning_rate": 6e-05,
754
+ "loss": 3.8176,
755
+ "step": 29000
756
+ },
757
+ {
758
+ "epoch": 7.93,
759
+ "eval_loss": 3.707026958465576,
760
+ "eval_runtime": 9.4595,
761
+ "eval_samples_per_second": 24.948,
762
+ "step": 29000
763
+ },
764
+ {
765
+ "epoch": 8.07,
766
+ "learning_rate": 6e-05,
767
+ "loss": 3.7763,
768
+ "step": 29500
769
+ },
770
+ {
771
+ "epoch": 8.07,
772
+ "eval_loss": 3.7134788036346436,
773
+ "eval_runtime": 9.4603,
774
+ "eval_samples_per_second": 24.946,
775
+ "step": 29500
776
+ },
777
+ {
778
+ "epoch": 8.21,
779
+ "learning_rate": 6e-05,
780
+ "loss": 3.7557,
781
+ "step": 30000
782
+ },
783
+ {
784
+ "epoch": 8.21,
785
+ "eval_loss": 3.7067065238952637,
786
+ "eval_runtime": 9.4683,
787
+ "eval_samples_per_second": 24.925,
788
+ "step": 30000
789
+ },
790
+ {
791
+ "epoch": 8.34,
792
+ "learning_rate": 6e-05,
793
+ "loss": 3.7662,
794
+ "step": 30500
795
+ },
796
+ {
797
+ "epoch": 8.34,
798
+ "eval_loss": 3.7023823261260986,
799
+ "eval_runtime": 9.4321,
800
+ "eval_samples_per_second": 25.021,
801
+ "step": 30500
802
+ },
803
+ {
804
+ "epoch": 8.48,
805
+ "learning_rate": 6e-05,
806
+ "loss": 3.7677,
807
+ "step": 31000
808
+ },
809
+ {
810
+ "epoch": 8.48,
811
+ "eval_loss": 3.699657678604126,
812
+ "eval_runtime": 9.4485,
813
+ "eval_samples_per_second": 24.978,
814
+ "step": 31000
815
+ }
816
+ ],
817
+ "max_steps": 36550,
818
+ "num_train_epochs": 10,
819
+ "total_flos": 3969647640576000.0,
820
+ "trial_name": null,
821
+ "trial_params": null
822
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1bdcb04ef96c10bb63e37c24fd535e0062cd403e2f184623b1708e684f818c9
3
+ size 2479