joelniklaus commited on
Commit
4872d96
1 Parent(s): 465ed4b

Training in progress, step 600000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:987153c5d8eda1b318dcc75104e011aee4bfc6ddc603be6a5cc3924764aec6a9
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69b6701bd22ffdc878411bf1dc4db5ba25bd2d3977ba5bfb9c755b5ccc760666
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0950dad6d78902f05457b620561f86874c33496df55b82f98399f507de93416f
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd75095c4597acd6406bda79e55f3e47837f8f338b561712587309fcdb721a0b
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e3a53b43b6589fd65e7762600b5802e8a0d8b2fef1ed331fa6133de1e786e15
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b390312ddc1614538c7fd82ca2c4639dfed127a83cb04c40dedde6f67b4e460
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f595742cd0d96240559aaf1ff72fa8686f62da9f07c5878ab2af30ab1e4f0a07
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.111874,
5
- "global_step": 550000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -3394,11 +3394,319 @@
3394
  "eval_samples_per_second": 470.363,
3395
  "eval_steps_per_second": 3.763,
3396
  "step": 550000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3397
  }
3398
  ],
3399
  "max_steps": 1000000,
3400
  "num_train_epochs": 9223372036854775807,
3401
- "total_flos": 9.265007135350063e+18,
3402
  "trial_name": null,
3403
  "trial_params": null
3404
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.015832,
5
+ "global_step": 600000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
3394
  "eval_samples_per_second": 470.363,
3395
  "eval_steps_per_second": 3.763,
3396
  "step": 550000
3397
+ },
3398
+ {
3399
+ "epoch": 3.11,
3400
+ "learning_rate": 4.570627336558915e-05,
3401
+ "loss": 0.65,
3402
+ "step": 551000
3403
+ },
3404
+ {
3405
+ "epoch": 3.11,
3406
+ "learning_rate": 4.554156096030149e-05,
3407
+ "loss": 0.738,
3408
+ "step": 552000
3409
+ },
3410
+ {
3411
+ "epoch": 3.11,
3412
+ "learning_rate": 4.537689731178883e-05,
3413
+ "loss": 0.7532,
3414
+ "step": 553000
3415
+ },
3416
+ {
3417
+ "epoch": 3.12,
3418
+ "learning_rate": 4.5212284220786494e-05,
3419
+ "loss": 0.6496,
3420
+ "step": 554000
3421
+ },
3422
+ {
3423
+ "epoch": 3.12,
3424
+ "learning_rate": 4.504772348747687e-05,
3425
+ "loss": 0.6133,
3426
+ "step": 555000
3427
+ },
3428
+ {
3429
+ "epoch": 3.12,
3430
+ "learning_rate": 4.488321691146975e-05,
3431
+ "loss": 0.6301,
3432
+ "step": 556000
3433
+ },
3434
+ {
3435
+ "epoch": 3.12,
3436
+ "learning_rate": 4.471876629178273e-05,
3437
+ "loss": 0.7678,
3438
+ "step": 557000
3439
+ },
3440
+ {
3441
+ "epoch": 3.12,
3442
+ "learning_rate": 4.4554373426821374e-05,
3443
+ "loss": 0.7341,
3444
+ "step": 558000
3445
+ },
3446
+ {
3447
+ "epoch": 3.12,
3448
+ "learning_rate": 4.439004011435979e-05,
3449
+ "loss": 0.6461,
3450
+ "step": 559000
3451
+ },
3452
+ {
3453
+ "epoch": 3.12,
3454
+ "learning_rate": 4.4225768151520694e-05,
3455
+ "loss": 0.6158,
3456
+ "step": 560000
3457
+ },
3458
+ {
3459
+ "epoch": 3.12,
3460
+ "learning_rate": 4.406155933475599e-05,
3461
+ "loss": 0.6459,
3462
+ "step": 561000
3463
+ },
3464
+ {
3465
+ "epoch": 3.12,
3466
+ "learning_rate": 4.3897415459827e-05,
3467
+ "loss": 0.7568,
3468
+ "step": 562000
3469
+ },
3470
+ {
3471
+ "epoch": 3.12,
3472
+ "learning_rate": 4.373333832178478e-05,
3473
+ "loss": 0.7228,
3474
+ "step": 563000
3475
+ },
3476
+ {
3477
+ "epoch": 3.13,
3478
+ "learning_rate": 4.3569329714950704e-05,
3479
+ "loss": 0.6679,
3480
+ "step": 564000
3481
+ },
3482
+ {
3483
+ "epoch": 3.13,
3484
+ "learning_rate": 4.3405391432896555e-05,
3485
+ "loss": 0.6106,
3486
+ "step": 565000
3487
+ },
3488
+ {
3489
+ "epoch": 3.13,
3490
+ "learning_rate": 4.324152526842517e-05,
3491
+ "loss": 0.6409,
3492
+ "step": 566000
3493
+ },
3494
+ {
3495
+ "epoch": 3.13,
3496
+ "learning_rate": 4.307773301355062e-05,
3497
+ "loss": 0.7355,
3498
+ "step": 567000
3499
+ },
3500
+ {
3501
+ "epoch": 3.13,
3502
+ "learning_rate": 4.291401645947879e-05,
3503
+ "loss": 0.7502,
3504
+ "step": 568000
3505
+ },
3506
+ {
3507
+ "epoch": 3.13,
3508
+ "learning_rate": 4.275037739658771e-05,
3509
+ "loss": 0.6526,
3510
+ "step": 569000
3511
+ },
3512
+ {
3513
+ "epoch": 3.13,
3514
+ "learning_rate": 4.2586817614407895e-05,
3515
+ "loss": 0.615,
3516
+ "step": 570000
3517
+ },
3518
+ {
3519
+ "epoch": 3.13,
3520
+ "learning_rate": 4.2423338901602985e-05,
3521
+ "loss": 0.6179,
3522
+ "step": 571000
3523
+ },
3524
+ {
3525
+ "epoch": 3.13,
3526
+ "learning_rate": 4.2259943045949934e-05,
3527
+ "loss": 0.7647,
3528
+ "step": 572000
3529
+ },
3530
+ {
3531
+ "epoch": 3.13,
3532
+ "learning_rate": 4.209663183431969e-05,
3533
+ "loss": 0.7322,
3534
+ "step": 573000
3535
+ },
3536
+ {
3537
+ "epoch": 3.14,
3538
+ "learning_rate": 4.1933407052657456e-05,
3539
+ "loss": 0.6497,
3540
+ "step": 574000
3541
+ },
3542
+ {
3543
+ "epoch": 3.14,
3544
+ "learning_rate": 4.17702704859633e-05,
3545
+ "loss": 0.6112,
3546
+ "step": 575000
3547
+ },
3548
+ {
3549
+ "epoch": 3.14,
3550
+ "learning_rate": 4.160722391827262e-05,
3551
+ "loss": 0.6397,
3552
+ "step": 576000
3553
+ },
3554
+ {
3555
+ "epoch": 3.14,
3556
+ "learning_rate": 4.14442691326365e-05,
3557
+ "loss": 0.754,
3558
+ "step": 577000
3559
+ },
3560
+ {
3561
+ "epoch": 3.14,
3562
+ "learning_rate": 4.1281407911102425e-05,
3563
+ "loss": 0.7233,
3564
+ "step": 578000
3565
+ },
3566
+ {
3567
+ "epoch": 3.14,
3568
+ "learning_rate": 4.111864203469457e-05,
3569
+ "loss": 0.6685,
3570
+ "step": 579000
3571
+ },
3572
+ {
3573
+ "epoch": 3.14,
3574
+ "learning_rate": 4.095597328339452e-05,
3575
+ "loss": 0.6101,
3576
+ "step": 580000
3577
+ },
3578
+ {
3579
+ "epoch": 3.14,
3580
+ "learning_rate": 4.079340343612165e-05,
3581
+ "loss": 0.6318,
3582
+ "step": 581000
3583
+ },
3584
+ {
3585
+ "epoch": 3.14,
3586
+ "learning_rate": 4.063093427071376e-05,
3587
+ "loss": 0.735,
3588
+ "step": 582000
3589
+ },
3590
+ {
3591
+ "epoch": 3.14,
3592
+ "learning_rate": 4.046856756390767e-05,
3593
+ "loss": 0.7493,
3594
+ "step": 583000
3595
+ },
3596
+ {
3597
+ "epoch": 3.15,
3598
+ "learning_rate": 4.0306305091319595e-05,
3599
+ "loss": 0.6557,
3600
+ "step": 584000
3601
+ },
3602
+ {
3603
+ "epoch": 4.0,
3604
+ "learning_rate": 4.0144148627425993e-05,
3605
+ "loss": 0.5653,
3606
+ "step": 585000
3607
+ },
3608
+ {
3609
+ "epoch": 4.0,
3610
+ "learning_rate": 3.9982099945543945e-05,
3611
+ "loss": 0.616,
3612
+ "step": 586000
3613
+ },
3614
+ {
3615
+ "epoch": 4.0,
3616
+ "learning_rate": 3.982016081781189e-05,
3617
+ "loss": 0.6738,
3618
+ "step": 587000
3619
+ },
3620
+ {
3621
+ "epoch": 4.0,
3622
+ "learning_rate": 3.965833301517017e-05,
3623
+ "loss": 0.7626,
3624
+ "step": 588000
3625
+ },
3626
+ {
3627
+ "epoch": 4.0,
3628
+ "learning_rate": 3.949661830734172e-05,
3629
+ "loss": 0.7203,
3630
+ "step": 589000
3631
+ },
3632
+ {
3633
+ "epoch": 4.01,
3634
+ "learning_rate": 3.933501846281267e-05,
3635
+ "loss": 0.6282,
3636
+ "step": 590000
3637
+ },
3638
+ {
3639
+ "epoch": 4.01,
3640
+ "learning_rate": 3.917353524881302e-05,
3641
+ "loss": 0.5968,
3642
+ "step": 591000
3643
+ },
3644
+ {
3645
+ "epoch": 4.01,
3646
+ "learning_rate": 3.901217043129735e-05,
3647
+ "loss": 0.6728,
3648
+ "step": 592000
3649
+ },
3650
+ {
3651
+ "epoch": 4.01,
3652
+ "learning_rate": 3.8850925774925425e-05,
3653
+ "loss": 0.7535,
3654
+ "step": 593000
3655
+ },
3656
+ {
3657
+ "epoch": 4.01,
3658
+ "learning_rate": 3.8689803043043e-05,
3659
+ "loss": 0.7408,
3660
+ "step": 594000
3661
+ },
3662
+ {
3663
+ "epoch": 4.01,
3664
+ "learning_rate": 3.852880399766243e-05,
3665
+ "loss": 0.6196,
3666
+ "step": 595000
3667
+ },
3668
+ {
3669
+ "epoch": 4.01,
3670
+ "learning_rate": 3.836793039944349e-05,
3671
+ "loss": 0.5852,
3672
+ "step": 596000
3673
+ },
3674
+ {
3675
+ "epoch": 4.01,
3676
+ "learning_rate": 3.820718400767409e-05,
3677
+ "loss": 0.6785,
3678
+ "step": 597000
3679
+ },
3680
+ {
3681
+ "epoch": 4.01,
3682
+ "learning_rate": 3.8046566580251e-05,
3683
+ "loss": 0.7568,
3684
+ "step": 598000
3685
+ },
3686
+ {
3687
+ "epoch": 4.01,
3688
+ "learning_rate": 3.788607987366069e-05,
3689
+ "loss": 0.736,
3690
+ "step": 599000
3691
+ },
3692
+ {
3693
+ "epoch": 4.02,
3694
+ "learning_rate": 3.772572564296005e-05,
3695
+ "loss": 0.5983,
3696
+ "step": 600000
3697
+ },
3698
+ {
3699
+ "epoch": 4.02,
3700
+ "eval_loss": 0.5038217902183533,
3701
+ "eval_runtime": 10.9218,
3702
+ "eval_samples_per_second": 457.799,
3703
+ "eval_steps_per_second": 3.662,
3704
+ "step": 600000
3705
  }
3706
  ],
3707
  "max_steps": 1000000,
3708
  "num_train_epochs": 9223372036854775807,
3709
+ "total_flos": 1.0107289699690217e+19,
3710
  "trial_name": null,
3711
  "trial_params": null
3712
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0950dad6d78902f05457b620561f86874c33496df55b82f98399f507de93416f
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd75095c4597acd6406bda79e55f3e47837f8f338b561712587309fcdb721a0b
3
  size 442675755
runs/Dec27_23-39-04_t1v-n-6071ee6f-w-0/events.out.tfevents.1672184365.t1v-n-6071ee6f-w-0.108001.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f10e42d1c67c6a2a744bca9281c75758905e67d2240af8837ac76abf63882c9
3
- size 94769
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e161f68f5d425e1485115cd1d86b943be71aa2833b81b38e52c45658c4731eec
3
+ size 103045