AlekseyKorshuk commited on
Commit
1a17623
1 Parent(s): 2fab1cf

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/queen")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/2gdiva6q/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Queen's lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/13beiwnf) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/13beiwnf/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
 
45
  dataset = load_dataset("huggingartists/queen")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/1v5o4ijc/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Queen's lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/2apdiv6y) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/2apdiv6y/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
config.json CHANGED
@@ -35,7 +35,7 @@
35
  }
36
  },
37
  "torch_dtype": "float32",
38
- "transformers_version": "4.10.2",
39
  "use_cache": true,
40
  "vocab_size": 50257
41
  }
 
35
  }
36
  },
37
  "torch_dtype": "float32",
38
+ "transformers_version": "4.11.3",
39
  "use_cache": true,
40
  "vocab_size": 50257
41
  }
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 2.3717236518859863, "eval_runtime": 4.3967, "eval_samples_per_second": 21.607, "eval_steps_per_second": 2.729, "epoch": 2.0}
 
1
+ {"eval_loss": 1.775342345237732, "eval_runtime": 3.9684, "eval_samples_per_second": 21.923, "eval_steps_per_second": 2.772, "epoch": 12.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:502d1eca67475172a0f7e73acfe65ee0982960cae78a2b5e5553c81196e5486f
3
  size 497764120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78164ec45551b127583fec5e4a0f6928dd1f4f77a75658bc2052260c3e93c6dc
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50eefa38edcc45ea891b86bb4da4f52375500fba7bb5584d8cdb052e5223ca88
3
- size 995603825
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c05aaab90d8caa0c02fb32e76686034b0a99f484a4537df99993a9ff56a1cd41
3
+ size 995604017
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5e0293710fde0a4aceadc99d7a89a6f9829daedae2710b8bf9821bc4ca243d0
3
  size 510403817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eecd5d268e5f975f932bf34dff98dd7e71a2e51f68b094f2ebc1800a6bd5a0f4
3
  size 510403817
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1191b4ca1a5f132d0d48f14b4540315157d0debe4480006a34a7119f47c28f6a
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fe9cc5f8604aa97b538fc3054764ff7cb9b030361ac3a1e8a39c4ddce759080
3
  size 14503
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18209b36204372bc8bd57462514f70b2cc7d1fa3bea72f64e7301510fe0e371f
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066420211cafaee663c00dce05f0eac5d610fd3a9b179d73be449c25b47fdfdc
3
  size 623
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 2.3717236518859863,
3
- "best_model_checkpoint": "output/queen/checkpoint-136",
4
- "epoch": 2.0,
5
- "global_step": 136,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -184,11 +184,657 @@
184
  "eval_samples_per_second": 22.039,
185
  "eval_steps_per_second": 2.784,
186
  "step": 136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  }
188
  ],
189
- "max_steps": 136,
190
- "num_train_epochs": 2,
191
- "total_flos": 141097697280000.0,
192
  "trial_name": null,
193
  "trial_params": null
194
  }
 
1
  {
2
+ "best_metric": 1.775342345237732,
3
+ "best_model_checkpoint": "output/queen/checkpoint-621",
4
+ "epoch": 9.0,
5
+ "global_step": 621,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
184
  "eval_samples_per_second": 22.039,
185
  "eval_steps_per_second": 2.784,
186
  "step": 136
187
+ },
188
+ {
189
+ "epoch": 2.0,
190
+ "eval_loss": 2.222139835357666,
191
+ "eval_runtime": 3.7161,
192
+ "eval_samples_per_second": 23.412,
193
+ "eval_steps_per_second": 2.96,
194
+ "step": 138
195
+ },
196
+ {
197
+ "epoch": 2.03,
198
+ "learning_rate": 0.00013691577939766304,
199
+ "loss": 2.3223,
200
+ "step": 140
201
+ },
202
+ {
203
+ "epoch": 2.1,
204
+ "learning_rate": 0.00013374528334456193,
205
+ "loss": 2.4227,
206
+ "step": 145
207
+ },
208
+ {
209
+ "epoch": 2.17,
210
+ "learning_rate": 0.00012721317115188912,
211
+ "loss": 2.3956,
212
+ "step": 150
213
+ },
214
+ {
215
+ "epoch": 2.25,
216
+ "learning_rate": 0.00011765651179294606,
217
+ "loss": 2.3273,
218
+ "step": 155
219
+ },
220
+ {
221
+ "epoch": 2.32,
222
+ "learning_rate": 0.0001055684464238085,
223
+ "loss": 2.4048,
224
+ "step": 160
225
+ },
226
+ {
227
+ "epoch": 2.39,
228
+ "learning_rate": 9.157274139492967e-05,
229
+ "loss": 2.4168,
230
+ "step": 165
231
+ },
232
+ {
233
+ "epoch": 2.46,
234
+ "learning_rate": 7.639160076306266e-05,
235
+ "loss": 2.4496,
236
+ "step": 170
237
+ },
238
+ {
239
+ "epoch": 2.54,
240
+ "learning_rate": 6.0808399236937385e-05,
241
+ "loss": 2.0669,
242
+ "step": 175
243
+ },
244
+ {
245
+ "epoch": 2.61,
246
+ "learning_rate": 4.5627258605070364e-05,
247
+ "loss": 2.2692,
248
+ "step": 180
249
+ },
250
+ {
251
+ "epoch": 2.68,
252
+ "learning_rate": 3.1631553576191535e-05,
253
+ "loss": 2.1242,
254
+ "step": 185
255
+ },
256
+ {
257
+ "epoch": 2.75,
258
+ "learning_rate": 1.954348820705391e-05,
259
+ "loss": 2.3064,
260
+ "step": 190
261
+ },
262
+ {
263
+ "epoch": 2.83,
264
+ "learning_rate": 9.986828848110892e-06,
265
+ "loss": 2.146,
266
+ "step": 195
267
+ },
268
+ {
269
+ "epoch": 2.9,
270
+ "learning_rate": 3.454716655438096e-06,
271
+ "loss": 2.3787,
272
+ "step": 200
273
+ },
274
+ {
275
+ "epoch": 2.97,
276
+ "learning_rate": 2.8422060233695394e-07,
277
+ "loss": 2.2533,
278
+ "step": 205
279
+ },
280
+ {
281
+ "epoch": 3.0,
282
+ "eval_loss": 2.043508768081665,
283
+ "eval_runtime": 3.8679,
284
+ "eval_samples_per_second": 22.493,
285
+ "eval_steps_per_second": 2.844,
286
+ "step": 207
287
+ },
288
+ {
289
+ "epoch": 3.04,
290
+ "learning_rate": 6.389441019077102e-07,
291
+ "loss": 2.1995,
292
+ "step": 210
293
+ },
294
+ {
295
+ "epoch": 3.12,
296
+ "learning_rate": 4.500582770777967e-06,
297
+ "loss": 2.2239,
298
+ "step": 215
299
+ },
300
+ {
301
+ "epoch": 3.19,
302
+ "learning_rate": 1.166986896886688e-05,
303
+ "loss": 2.1124,
304
+ "step": 220
305
+ },
306
+ {
307
+ "epoch": 3.26,
308
+ "learning_rate": 2.177685437520025e-05,
309
+ "loss": 2.2442,
310
+ "step": 225
311
+ },
312
+ {
313
+ "epoch": 3.33,
314
+ "learning_rate": 3.4300000000000014e-05,
315
+ "loss": 2.0765,
316
+ "step": 230
317
+ },
318
+ {
319
+ "epoch": 3.41,
320
+ "learning_rate": 4.859308855339902e-05,
321
+ "loss": 2.042,
322
+ "step": 235
323
+ },
324
+ {
325
+ "epoch": 3.48,
326
+ "learning_rate": 6.391857044318346e-05,
327
+ "loss": 2.0462,
328
+ "step": 240
329
+ },
330
+ {
331
+ "epoch": 3.55,
332
+ "learning_rate": 7.948562268689874e-05,
333
+ "loss": 1.9776,
334
+ "step": 245
335
+ },
336
+ {
337
+ "epoch": 3.62,
338
+ "learning_rate": 9.449095682862937e-05,
339
+ "loss": 2.1824,
340
+ "step": 250
341
+ },
342
+ {
343
+ "epoch": 3.7,
344
+ "learning_rate": 0.0001081602700970798,
345
+ "loss": 1.996,
346
+ "step": 255
347
+ },
348
+ {
349
+ "epoch": 3.77,
350
+ "learning_rate": 0.00011978820084915117,
351
+ "loss": 2.1329,
352
+ "step": 260
353
+ },
354
+ {
355
+ "epoch": 3.84,
356
+ "learning_rate": 0.00012877472652481797,
357
+ "loss": 2.1854,
358
+ "step": 265
359
+ },
360
+ {
361
+ "epoch": 3.91,
362
+ "learning_rate": 0.00013465612591205902,
363
+ "loss": 1.8201,
364
+ "step": 270
365
+ },
366
+ {
367
+ "epoch": 3.99,
368
+ "learning_rate": 0.00013712890801216552,
369
+ "loss": 1.8969,
370
+ "step": 275
371
+ },
372
+ {
373
+ "epoch": 4.0,
374
+ "eval_loss": 1.9531660079956055,
375
+ "eval_runtime": 3.8382,
376
+ "eval_samples_per_second": 22.667,
377
+ "eval_steps_per_second": 2.866,
378
+ "step": 276
379
+ },
380
+ {
381
+ "epoch": 4.06,
382
+ "learning_rate": 0.0001360654727320747,
383
+ "loss": 1.817,
384
+ "step": 280
385
+ },
386
+ {
387
+ "epoch": 4.13,
388
+ "learning_rate": 0.0001315206952832741,
389
+ "loss": 1.6319,
390
+ "step": 285
391
+ },
392
+ {
393
+ "epoch": 4.2,
394
+ "learning_rate": 0.00012372909452021153,
395
+ "loss": 1.7738,
396
+ "step": 290
397
+ },
398
+ {
399
+ "epoch": 4.28,
400
+ "learning_rate": 0.00011309273133714503,
401
+ "loss": 1.7757,
402
+ "step": 295
403
+ },
404
+ {
405
+ "epoch": 4.35,
406
+ "learning_rate": 0.0001001604615883571,
407
+ "loss": 1.9699,
408
+ "step": 300
409
+ },
410
+ {
411
+ "epoch": 4.42,
412
+ "learning_rate": 8.5599614119113e-05,
413
+ "loss": 1.9681,
414
+ "step": 305
415
+ },
416
+ {
417
+ "epoch": 4.49,
418
+ "learning_rate": 7.016155537292879e-05,
419
+ "loss": 1.9245,
420
+ "step": 310
421
+ },
422
+ {
423
+ "epoch": 4.57,
424
+ "learning_rate": 5.464291750458938e-05,
425
+ "loss": 1.7497,
426
+ "step": 315
427
+ },
428
+ {
429
+ "epoch": 4.64,
430
+ "learning_rate": 3.98444906994169e-05,
431
+ "loss": 1.7975,
432
+ "step": 320
433
+ },
434
+ {
435
+ "epoch": 4.71,
436
+ "learning_rate": 2.6529900930537204e-05,
437
+ "loss": 1.6675,
438
+ "step": 325
439
+ },
440
+ {
441
+ "epoch": 4.78,
442
+ "learning_rate": 1.5386205457676833e-05,
443
+ "loss": 1.9876,
444
+ "step": 330
445
+ },
446
+ {
447
+ "epoch": 4.86,
448
+ "learning_rate": 6.988439412050469e-06,
449
+ "loss": 1.691,
450
+ "step": 335
451
+ },
452
+ {
453
+ "epoch": 4.93,
454
+ "learning_rate": 1.7699429285993718e-06,
455
+ "loss": 1.8689,
456
+ "step": 340
457
+ },
458
+ {
459
+ "epoch": 5.0,
460
+ "learning_rate": 0.0,
461
+ "loss": 1.9327,
462
+ "step": 345
463
+ },
464
+ {
465
+ "epoch": 5.0,
466
+ "eval_loss": 1.8510653972625732,
467
+ "eval_runtime": 3.8275,
468
+ "eval_samples_per_second": 22.73,
469
+ "eval_steps_per_second": 2.874,
470
+ "step": 345
471
+ },
472
+ {
473
+ "epoch": 5.07,
474
+ "learning_rate": 1.7699429285993566e-06,
475
+ "loss": 1.5991,
476
+ "step": 350
477
+ },
478
+ {
479
+ "epoch": 5.14,
480
+ "learning_rate": 6.988439412050431e-06,
481
+ "loss": 1.6155,
482
+ "step": 355
483
+ },
484
+ {
485
+ "epoch": 5.22,
486
+ "learning_rate": 1.538620545767678e-05,
487
+ "loss": 1.6707,
488
+ "step": 360
489
+ },
490
+ {
491
+ "epoch": 5.29,
492
+ "learning_rate": 2.6529900930537035e-05,
493
+ "loss": 1.7262,
494
+ "step": 365
495
+ },
496
+ {
497
+ "epoch": 5.36,
498
+ "learning_rate": 3.984449069941682e-05,
499
+ "loss": 1.7087,
500
+ "step": 370
501
+ },
502
+ {
503
+ "epoch": 5.43,
504
+ "learning_rate": 5.4642917504589295e-05,
505
+ "loss": 1.7766,
506
+ "step": 375
507
+ },
508
+ {
509
+ "epoch": 5.51,
510
+ "learning_rate": 7.016155537292883e-05,
511
+ "loss": 1.5992,
512
+ "step": 380
513
+ },
514
+ {
515
+ "epoch": 5.58,
516
+ "learning_rate": 8.559961411911291e-05,
517
+ "loss": 1.6561,
518
+ "step": 385
519
+ },
520
+ {
521
+ "epoch": 5.65,
522
+ "learning_rate": 0.00010016046158835702,
523
+ "loss": 1.6108,
524
+ "step": 390
525
+ },
526
+ {
527
+ "epoch": 5.72,
528
+ "learning_rate": 0.00011309273133714504,
529
+ "loss": 1.6236,
530
+ "step": 395
531
+ },
532
+ {
533
+ "epoch": 5.8,
534
+ "learning_rate": 0.0001237290945202115,
535
+ "loss": 1.6319,
536
+ "step": 400
537
+ },
538
+ {
539
+ "epoch": 5.87,
540
+ "learning_rate": 0.00013152069528327408,
541
+ "loss": 1.4268,
542
+ "step": 405
543
+ },
544
+ {
545
+ "epoch": 5.94,
546
+ "learning_rate": 0.00013606547273207472,
547
+ "loss": 1.7278,
548
+ "step": 410
549
+ },
550
+ {
551
+ "epoch": 6.0,
552
+ "eval_loss": 1.8254233598709106,
553
+ "eval_runtime": 3.854,
554
+ "eval_samples_per_second": 22.574,
555
+ "eval_steps_per_second": 2.854,
556
+ "step": 414
557
+ },
558
+ {
559
+ "epoch": 6.01,
560
+ "learning_rate": 0.00013712890801216552,
561
+ "loss": 1.8762,
562
+ "step": 415
563
+ },
564
+ {
565
+ "epoch": 6.09,
566
+ "learning_rate": 0.00013465612591205902,
567
+ "loss": 1.4495,
568
+ "step": 420
569
+ },
570
+ {
571
+ "epoch": 6.16,
572
+ "learning_rate": 0.00012877472652481797,
573
+ "loss": 1.5224,
574
+ "step": 425
575
+ },
576
+ {
577
+ "epoch": 6.23,
578
+ "learning_rate": 0.00011978820084915123,
579
+ "loss": 1.5038,
580
+ "step": 430
581
+ },
582
+ {
583
+ "epoch": 6.3,
584
+ "learning_rate": 0.00010816027009708009,
585
+ "loss": 1.4097,
586
+ "step": 435
587
+ },
588
+ {
589
+ "epoch": 6.38,
590
+ "learning_rate": 9.449095682862935e-05,
591
+ "loss": 1.3203,
592
+ "step": 440
593
+ },
594
+ {
595
+ "epoch": 6.45,
596
+ "learning_rate": 7.948562268689883e-05,
597
+ "loss": 1.3634,
598
+ "step": 445
599
+ },
600
+ {
601
+ "epoch": 6.52,
602
+ "learning_rate": 6.39185704431838e-05,
603
+ "loss": 1.4181,
604
+ "step": 450
605
+ },
606
+ {
607
+ "epoch": 6.59,
608
+ "learning_rate": 4.859308855339899e-05,
609
+ "loss": 1.5025,
610
+ "step": 455
611
+ },
612
+ {
613
+ "epoch": 6.67,
614
+ "learning_rate": 3.429999999999998e-05,
615
+ "loss": 1.5881,
616
+ "step": 460
617
+ },
618
+ {
619
+ "epoch": 6.74,
620
+ "learning_rate": 2.1776854375200487e-05,
621
+ "loss": 1.5042,
622
+ "step": 465
623
+ },
624
+ {
625
+ "epoch": 6.81,
626
+ "learning_rate": 1.1669868968866859e-05,
627
+ "loss": 1.4229,
628
+ "step": 470
629
+ },
630
+ {
631
+ "epoch": 6.88,
632
+ "learning_rate": 4.500582770777952e-06,
633
+ "loss": 1.5551,
634
+ "step": 475
635
+ },
636
+ {
637
+ "epoch": 6.96,
638
+ "learning_rate": 6.389441019077407e-07,
639
+ "loss": 1.6253,
640
+ "step": 480
641
+ },
642
+ {
643
+ "epoch": 7.0,
644
+ "eval_loss": 1.787222146987915,
645
+ "eval_runtime": 3.8357,
646
+ "eval_samples_per_second": 22.681,
647
+ "eval_steps_per_second": 2.868,
648
+ "step": 483
649
+ },
650
+ {
651
+ "epoch": 7.03,
652
+ "learning_rate": 2.8422060233696156e-07,
653
+ "loss": 1.5683,
654
+ "step": 485
655
+ },
656
+ {
657
+ "epoch": 7.1,
658
+ "learning_rate": 3.4547166554380356e-06,
659
+ "loss": 1.1749,
660
+ "step": 490
661
+ },
662
+ {
663
+ "epoch": 7.17,
664
+ "learning_rate": 9.986828848110785e-06,
665
+ "loss": 1.2655,
666
+ "step": 495
667
+ },
668
+ {
669
+ "epoch": 7.25,
670
+ "learning_rate": 1.9543488207053935e-05,
671
+ "loss": 1.3456,
672
+ "step": 500
673
+ },
674
+ {
675
+ "epoch": 7.32,
676
+ "learning_rate": 3.163155357619136e-05,
677
+ "loss": 1.2074,
678
+ "step": 505
679
+ },
680
+ {
681
+ "epoch": 7.39,
682
+ "learning_rate": 4.5627258605070174e-05,
683
+ "loss": 1.3068,
684
+ "step": 510
685
+ },
686
+ {
687
+ "epoch": 7.46,
688
+ "learning_rate": 6.080839923693748e-05,
689
+ "loss": 1.1119,
690
+ "step": 515
691
+ },
692
+ {
693
+ "epoch": 7.54,
694
+ "learning_rate": 7.639160076306239e-05,
695
+ "loss": 1.3026,
696
+ "step": 520
697
+ },
698
+ {
699
+ "epoch": 7.61,
700
+ "learning_rate": 9.15727413949297e-05,
701
+ "loss": 1.4164,
702
+ "step": 525
703
+ },
704
+ {
705
+ "epoch": 7.68,
706
+ "learning_rate": 0.00010556844642380854,
707
+ "loss": 1.6188,
708
+ "step": 530
709
+ },
710
+ {
711
+ "epoch": 7.75,
712
+ "learning_rate": 0.00011765651179294598,
713
+ "loss": 1.438,
714
+ "step": 535
715
+ },
716
+ {
717
+ "epoch": 7.83,
718
+ "learning_rate": 0.00012721317115188917,
719
+ "loss": 1.267,
720
+ "step": 540
721
+ },
722
+ {
723
+ "epoch": 7.9,
724
+ "learning_rate": 0.00013374528334456193,
725
+ "loss": 1.2976,
726
+ "step": 545
727
+ },
728
+ {
729
+ "epoch": 7.97,
730
+ "learning_rate": 0.00013691577939766304,
731
+ "loss": 1.4411,
732
+ "step": 550
733
+ },
734
+ {
735
+ "epoch": 8.0,
736
+ "eval_loss": 1.795061707496643,
737
+ "eval_runtime": 3.8467,
738
+ "eval_samples_per_second": 22.617,
739
+ "eval_steps_per_second": 2.86,
740
+ "step": 552
741
+ },
742
+ {
743
+ "epoch": 8.04,
744
+ "learning_rate": 0.0001365610558980923,
745
+ "loss": 1.266,
746
+ "step": 555
747
+ },
748
+ {
749
+ "epoch": 8.12,
750
+ "learning_rate": 0.000132699417229222,
751
+ "loss": 1.2792,
752
+ "step": 560
753
+ },
754
+ {
755
+ "epoch": 8.19,
756
+ "learning_rate": 0.0001255301310311332,
757
+ "loss": 1.2561,
758
+ "step": 565
759
+ },
760
+ {
761
+ "epoch": 8.26,
762
+ "learning_rate": 0.00011542314562479977,
763
+ "loss": 1.2462,
764
+ "step": 570
765
+ },
766
+ {
767
+ "epoch": 8.33,
768
+ "learning_rate": 0.00010289999999999993,
769
+ "loss": 1.2013,
770
+ "step": 575
771
+ },
772
+ {
773
+ "epoch": 8.41,
774
+ "learning_rate": 8.860691144660113e-05,
775
+ "loss": 1.2052,
776
+ "step": 580
777
+ },
778
+ {
779
+ "epoch": 8.48,
780
+ "learning_rate": 7.328142955681657e-05,
781
+ "loss": 1.3885,
782
+ "step": 585
783
+ },
784
+ {
785
+ "epoch": 8.55,
786
+ "learning_rate": 5.771437731310106e-05,
787
+ "loss": 1.0822,
788
+ "step": 590
789
+ },
790
+ {
791
+ "epoch": 8.62,
792
+ "learning_rate": 4.270904317137078e-05,
793
+ "loss": 1.3064,
794
+ "step": 595
795
+ },
796
+ {
797
+ "epoch": 8.7,
798
+ "learning_rate": 2.9039729902920224e-05,
799
+ "loss": 1.2009,
800
+ "step": 600
801
+ },
802
+ {
803
+ "epoch": 8.77,
804
+ "learning_rate": 1.7411799150848703e-05,
805
+ "loss": 1.2057,
806
+ "step": 605
807
+ },
808
+ {
809
+ "epoch": 8.84,
810
+ "learning_rate": 8.425273475182102e-06,
811
+ "loss": 1.1498,
812
+ "step": 610
813
+ },
814
+ {
815
+ "epoch": 8.91,
816
+ "learning_rate": 2.5438740879410024e-06,
817
+ "loss": 1.0197,
818
+ "step": 615
819
+ },
820
+ {
821
+ "epoch": 8.99,
822
+ "learning_rate": 7.109198783448199e-08,
823
+ "loss": 1.1622,
824
+ "step": 620
825
+ },
826
+ {
827
+ "epoch": 9.0,
828
+ "eval_loss": 1.775342345237732,
829
+ "eval_runtime": 3.8327,
830
+ "eval_samples_per_second": 22.699,
831
+ "eval_steps_per_second": 2.87,
832
+ "step": 621
833
  }
834
  ],
835
+ "max_steps": 828,
836
+ "num_train_epochs": 12,
837
+ "total_flos": 642778398720000.0,
838
  "trial_name": null,
839
  "trial_params": null
840
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe033cb76b844ba6d85ac8b88771923eea234dc7f7b0240e79578bf023ae7efa
3
- size 2671
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27f7066b415f01cd40fe79d9609a25d25e2e5ba0f11a92fd67adab5753419b6f
3
+ size 2863