Training in progress, step 12000
Browse files- optimizer.pt +1 -1
- rng_state.pth +1 -1
- runs/Jun07_12-33-16_DESKTOP-69FPKCK/events.out.tfevents.1717788805.DESKTOP-69FPKCK +2 -2
- scheduler.pt +1 -1
- trainer_state.json +712 -3
optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 11230198
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf5c3ae23e0061e18908dedb416fcb0d32e5ecadfc7f18abb693d4f1c6a53a96
|
3 |
size 11230198
|
rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69f50a692634404f2eebb2eab9f456865957578d752987bc52d843ac2a774366
|
3 |
size 14244
|
runs/Jun07_12-33-16_DESKTOP-69FPKCK/events.out.tfevents.1717788805.DESKTOP-69FPKCK
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43356ad5026fe7bb50ebc6b79634c8aa625fabdcc290f5b9e335f623df1606f8
|
3 |
+
size 132761
|
scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af7225d5b0731cf57528f6961f709bb0e7ed929fa0d79711b3aa9685866e262d
|
3 |
size 1064
|
trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 2000,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -3552,6 +3552,715 @@
|
|
3552 |
"eval_samples_per_second": 2857.071,
|
3553 |
"eval_steps_per_second": 11.163,
|
3554 |
"step": 10000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3555 |
}
|
3556 |
],
|
3557 |
"logging_steps": 20,
|
@@ -3559,7 +4268,7 @@
|
|
3559 |
"num_input_tokens_seen": 0,
|
3560 |
"num_train_epochs": 3,
|
3561 |
"save_steps": 100,
|
3562 |
-
"total_flos":
|
3563 |
"train_batch_size": 256,
|
3564 |
"trial_name": null,
|
3565 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.05429397472615476,
|
5 |
"eval_steps": 2000,
|
6 |
+
"global_step": 12000,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
3552 |
"eval_samples_per_second": 2857.071,
|
3553 |
"eval_steps_per_second": 11.163,
|
3554 |
"step": 10000
|
3555 |
+
},
|
3556 |
+
{
|
3557 |
+
"epoch": 0.04533546889633923,
|
3558 |
+
"grad_norm": 12.951713562011719,
|
3559 |
+
"learning_rate": 0.00013589720387295268,
|
3560 |
+
"loss": 9.154,
|
3561 |
+
"step": 10020
|
3562 |
+
},
|
3563 |
+
{
|
3564 |
+
"epoch": 0.04542595885421615,
|
3565 |
+
"grad_norm": 9.139362335205078,
|
3566 |
+
"learning_rate": 0.00013616867251832414,
|
3567 |
+
"loss": 9.154,
|
3568 |
+
"step": 10040
|
3569 |
+
},
|
3570 |
+
{
|
3571 |
+
"epoch": 0.04551644881209308,
|
3572 |
+
"grad_norm": 8.388337135314941,
|
3573 |
+
"learning_rate": 0.0001364401411636956,
|
3574 |
+
"loss": 9.1391,
|
3575 |
+
"step": 10060
|
3576 |
+
},
|
3577 |
+
{
|
3578 |
+
"epoch": 0.045606938769970004,
|
3579 |
+
"grad_norm": 10.0809326171875,
|
3580 |
+
"learning_rate": 0.00013671160980906704,
|
3581 |
+
"loss": 9.1417,
|
3582 |
+
"step": 10080
|
3583 |
+
},
|
3584 |
+
{
|
3585 |
+
"epoch": 0.04569742872784693,
|
3586 |
+
"grad_norm": 8.565701484680176,
|
3587 |
+
"learning_rate": 0.0001369830784544385,
|
3588 |
+
"loss": 9.1112,
|
3589 |
+
"step": 10100
|
3590 |
+
},
|
3591 |
+
{
|
3592 |
+
"epoch": 0.04578791868572385,
|
3593 |
+
"grad_norm": 10.437520027160645,
|
3594 |
+
"learning_rate": 0.00013725454709980997,
|
3595 |
+
"loss": 9.1169,
|
3596 |
+
"step": 10120
|
3597 |
+
},
|
3598 |
+
{
|
3599 |
+
"epoch": 0.04587840864360078,
|
3600 |
+
"grad_norm": 8.615896224975586,
|
3601 |
+
"learning_rate": 0.00013752601574518143,
|
3602 |
+
"loss": 9.1003,
|
3603 |
+
"step": 10140
|
3604 |
+
},
|
3605 |
+
{
|
3606 |
+
"epoch": 0.0459688986014777,
|
3607 |
+
"grad_norm": 10.89583683013916,
|
3608 |
+
"learning_rate": 0.0001377974843905529,
|
3609 |
+
"loss": 9.101,
|
3610 |
+
"step": 10160
|
3611 |
+
},
|
3612 |
+
{
|
3613 |
+
"epoch": 0.046059388559354625,
|
3614 |
+
"grad_norm": 9.786931991577148,
|
3615 |
+
"learning_rate": 0.00013806895303592433,
|
3616 |
+
"loss": 9.0689,
|
3617 |
+
"step": 10180
|
3618 |
+
},
|
3619 |
+
{
|
3620 |
+
"epoch": 0.04614987851723155,
|
3621 |
+
"grad_norm": 9.010174751281738,
|
3622 |
+
"learning_rate": 0.0001383404216812958,
|
3623 |
+
"loss": 9.0579,
|
3624 |
+
"step": 10200
|
3625 |
+
},
|
3626 |
+
{
|
3627 |
+
"epoch": 0.04624036847510848,
|
3628 |
+
"grad_norm": 11.039669036865234,
|
3629 |
+
"learning_rate": 0.00013861189032666725,
|
3630 |
+
"loss": 9.0865,
|
3631 |
+
"step": 10220
|
3632 |
+
},
|
3633 |
+
{
|
3634 |
+
"epoch": 0.0463308584329854,
|
3635 |
+
"grad_norm": 12.055830001831055,
|
3636 |
+
"learning_rate": 0.00013888335897203872,
|
3637 |
+
"loss": 9.0955,
|
3638 |
+
"step": 10240
|
3639 |
+
},
|
3640 |
+
{
|
3641 |
+
"epoch": 0.04642134839086232,
|
3642 |
+
"grad_norm": 8.361885070800781,
|
3643 |
+
"learning_rate": 0.00013915482761741018,
|
3644 |
+
"loss": 9.07,
|
3645 |
+
"step": 10260
|
3646 |
+
},
|
3647 |
+
{
|
3648 |
+
"epoch": 0.046511838348739246,
|
3649 |
+
"grad_norm": 7.196146011352539,
|
3650 |
+
"learning_rate": 0.00013942629626278164,
|
3651 |
+
"loss": 9.0528,
|
3652 |
+
"step": 10280
|
3653 |
+
},
|
3654 |
+
{
|
3655 |
+
"epoch": 0.046602328306616175,
|
3656 |
+
"grad_norm": 9.67076587677002,
|
3657 |
+
"learning_rate": 0.0001396977649081531,
|
3658 |
+
"loss": 9.0546,
|
3659 |
+
"step": 10300
|
3660 |
+
},
|
3661 |
+
{
|
3662 |
+
"epoch": 0.0466928182644931,
|
3663 |
+
"grad_norm": 10.09327220916748,
|
3664 |
+
"learning_rate": 0.00013996923355352457,
|
3665 |
+
"loss": 9.0741,
|
3666 |
+
"step": 10320
|
3667 |
+
},
|
3668 |
+
{
|
3669 |
+
"epoch": 0.04678330822237002,
|
3670 |
+
"grad_norm": 9.639015197753906,
|
3671 |
+
"learning_rate": 0.00014024070219889603,
|
3672 |
+
"loss": 9.0633,
|
3673 |
+
"step": 10340
|
3674 |
+
},
|
3675 |
+
{
|
3676 |
+
"epoch": 0.04687379818024695,
|
3677 |
+
"grad_norm": 10.251932144165039,
|
3678 |
+
"learning_rate": 0.0001405121708442675,
|
3679 |
+
"loss": 9.0446,
|
3680 |
+
"step": 10360
|
3681 |
+
},
|
3682 |
+
{
|
3683 |
+
"epoch": 0.04696428813812387,
|
3684 |
+
"grad_norm": 11.07875919342041,
|
3685 |
+
"learning_rate": 0.00014078363948963896,
|
3686 |
+
"loss": 9.0418,
|
3687 |
+
"step": 10380
|
3688 |
+
},
|
3689 |
+
{
|
3690 |
+
"epoch": 0.047054778096000796,
|
3691 |
+
"grad_norm": 9.328507423400879,
|
3692 |
+
"learning_rate": 0.00014105510813501042,
|
3693 |
+
"loss": 9.0287,
|
3694 |
+
"step": 10400
|
3695 |
+
},
|
3696 |
+
{
|
3697 |
+
"epoch": 0.04714526805387772,
|
3698 |
+
"grad_norm": 7.056753635406494,
|
3699 |
+
"learning_rate": 0.00014132657678038186,
|
3700 |
+
"loss": 9.0362,
|
3701 |
+
"step": 10420
|
3702 |
+
},
|
3703 |
+
{
|
3704 |
+
"epoch": 0.04723575801175465,
|
3705 |
+
"grad_norm": 8.899680137634277,
|
3706 |
+
"learning_rate": 0.0001415980454257533,
|
3707 |
+
"loss": 9.036,
|
3708 |
+
"step": 10440
|
3709 |
+
},
|
3710 |
+
{
|
3711 |
+
"epoch": 0.04732624796963157,
|
3712 |
+
"grad_norm": 9.175132751464844,
|
3713 |
+
"learning_rate": 0.00014186951407112476,
|
3714 |
+
"loss": 9.0444,
|
3715 |
+
"step": 10460
|
3716 |
+
},
|
3717 |
+
{
|
3718 |
+
"epoch": 0.047416737927508494,
|
3719 |
+
"grad_norm": 9.374978065490723,
|
3720 |
+
"learning_rate": 0.00014214098271649622,
|
3721 |
+
"loss": 9.0372,
|
3722 |
+
"step": 10480
|
3723 |
+
},
|
3724 |
+
{
|
3725 |
+
"epoch": 0.04750722788538542,
|
3726 |
+
"grad_norm": 9.893750190734863,
|
3727 |
+
"learning_rate": 0.00014241245136186769,
|
3728 |
+
"loss": 9.0424,
|
3729 |
+
"step": 10500
|
3730 |
+
},
|
3731 |
+
{
|
3732 |
+
"epoch": 0.04759771784326235,
|
3733 |
+
"grad_norm": 7.787280082702637,
|
3734 |
+
"learning_rate": 0.00014265677314270202,
|
3735 |
+
"loss": 8.9691,
|
3736 |
+
"step": 10520
|
3737 |
+
},
|
3738 |
+
{
|
3739 |
+
"epoch": 0.04768820780113927,
|
3740 |
+
"grad_norm": 17.40734100341797,
|
3741 |
+
"learning_rate": 0.00014277893403311917,
|
3742 |
+
"loss": 8.2225,
|
3743 |
+
"step": 10540
|
3744 |
+
},
|
3745 |
+
{
|
3746 |
+
"epoch": 0.04777869775901619,
|
3747 |
+
"grad_norm": NaN,
|
3748 |
+
"learning_rate": 0.00014286037462673062,
|
3749 |
+
"loss": 6.6046,
|
3750 |
+
"step": 10560
|
3751 |
+
},
|
3752 |
+
{
|
3753 |
+
"epoch": 0.047869187716893115,
|
3754 |
+
"grad_norm": NaN,
|
3755 |
+
"learning_rate": 0.0001429146683558049,
|
3756 |
+
"loss": 3.0921,
|
3757 |
+
"step": 10580
|
3758 |
+
},
|
3759 |
+
{
|
3760 |
+
"epoch": 0.047959677674770045,
|
3761 |
+
"grad_norm": NaN,
|
3762 |
+
"learning_rate": 0.00014294181522034205,
|
3763 |
+
"loss": 3.9765,
|
3764 |
+
"step": 10600
|
3765 |
+
},
|
3766 |
+
{
|
3767 |
+
"epoch": 0.04805016763264697,
|
3768 |
+
"grad_norm": NaN,
|
3769 |
+
"learning_rate": 0.00014298253551714776,
|
3770 |
+
"loss": 6.9972,
|
3771 |
+
"step": 10620
|
3772 |
+
},
|
3773 |
+
{
|
3774 |
+
"epoch": 0.04814065759052389,
|
3775 |
+
"grad_norm": NaN,
|
3776 |
+
"learning_rate": 0.00014298253551714776,
|
3777 |
+
"loss": 0.0,
|
3778 |
+
"step": 10640
|
3779 |
+
},
|
3780 |
+
{
|
3781 |
+
"epoch": 0.04823114754840081,
|
3782 |
+
"grad_norm": NaN,
|
3783 |
+
"learning_rate": 0.00014298253551714776,
|
3784 |
+
"loss": 0.0,
|
3785 |
+
"step": 10660
|
3786 |
+
},
|
3787 |
+
{
|
3788 |
+
"epoch": 0.04832163750627774,
|
3789 |
+
"grad_norm": NaN,
|
3790 |
+
"learning_rate": 0.00014298253551714776,
|
3791 |
+
"loss": 0.0,
|
3792 |
+
"step": 10680
|
3793 |
+
},
|
3794 |
+
{
|
3795 |
+
"epoch": 0.048412127464154665,
|
3796 |
+
"grad_norm": NaN,
|
3797 |
+
"learning_rate": 0.00014298253551714776,
|
3798 |
+
"loss": 0.0,
|
3799 |
+
"step": 10700
|
3800 |
+
},
|
3801 |
+
{
|
3802 |
+
"epoch": 0.04850261742203159,
|
3803 |
+
"grad_norm": NaN,
|
3804 |
+
"learning_rate": 0.00014298253551714776,
|
3805 |
+
"loss": 0.0,
|
3806 |
+
"step": 10720
|
3807 |
+
},
|
3808 |
+
{
|
3809 |
+
"epoch": 0.04859310737990852,
|
3810 |
+
"grad_norm": NaN,
|
3811 |
+
"learning_rate": 0.00014298253551714776,
|
3812 |
+
"loss": 0.0,
|
3813 |
+
"step": 10740
|
3814 |
+
},
|
3815 |
+
{
|
3816 |
+
"epoch": 0.04868359733778544,
|
3817 |
+
"grad_norm": NaN,
|
3818 |
+
"learning_rate": 0.00014298253551714776,
|
3819 |
+
"loss": 0.0,
|
3820 |
+
"step": 10760
|
3821 |
+
},
|
3822 |
+
{
|
3823 |
+
"epoch": 0.04877408729566236,
|
3824 |
+
"grad_norm": NaN,
|
3825 |
+
"learning_rate": 0.00014298253551714776,
|
3826 |
+
"loss": 0.0,
|
3827 |
+
"step": 10780
|
3828 |
+
},
|
3829 |
+
{
|
3830 |
+
"epoch": 0.048864577253539286,
|
3831 |
+
"grad_norm": NaN,
|
3832 |
+
"learning_rate": 0.00014298253551714776,
|
3833 |
+
"loss": 0.0,
|
3834 |
+
"step": 10800
|
3835 |
+
},
|
3836 |
+
{
|
3837 |
+
"epoch": 0.048955067211416216,
|
3838 |
+
"grad_norm": NaN,
|
3839 |
+
"learning_rate": 0.00014298253551714776,
|
3840 |
+
"loss": 0.0,
|
3841 |
+
"step": 10820
|
3842 |
+
},
|
3843 |
+
{
|
3844 |
+
"epoch": 0.04904555716929314,
|
3845 |
+
"grad_norm": NaN,
|
3846 |
+
"learning_rate": 0.00014298253551714776,
|
3847 |
+
"loss": 0.0,
|
3848 |
+
"step": 10840
|
3849 |
+
},
|
3850 |
+
{
|
3851 |
+
"epoch": 0.04913604712717006,
|
3852 |
+
"grad_norm": NaN,
|
3853 |
+
"learning_rate": 0.00014298253551714776,
|
3854 |
+
"loss": 0.0,
|
3855 |
+
"step": 10860
|
3856 |
+
},
|
3857 |
+
{
|
3858 |
+
"epoch": 0.049226537085046984,
|
3859 |
+
"grad_norm": NaN,
|
3860 |
+
"learning_rate": 0.00014298253551714776,
|
3861 |
+
"loss": 0.0,
|
3862 |
+
"step": 10880
|
3863 |
+
},
|
3864 |
+
{
|
3865 |
+
"epoch": 0.049317027042923914,
|
3866 |
+
"grad_norm": NaN,
|
3867 |
+
"learning_rate": 0.00014298253551714776,
|
3868 |
+
"loss": 0.0,
|
3869 |
+
"step": 10900
|
3870 |
+
},
|
3871 |
+
{
|
3872 |
+
"epoch": 0.04940751700080084,
|
3873 |
+
"grad_norm": NaN,
|
3874 |
+
"learning_rate": 0.00014298253551714776,
|
3875 |
+
"loss": 0.0,
|
3876 |
+
"step": 10920
|
3877 |
+
},
|
3878 |
+
{
|
3879 |
+
"epoch": 0.04949800695867776,
|
3880 |
+
"grad_norm": NaN,
|
3881 |
+
"learning_rate": 0.00014298253551714776,
|
3882 |
+
"loss": 0.0,
|
3883 |
+
"step": 10940
|
3884 |
+
},
|
3885 |
+
{
|
3886 |
+
"epoch": 0.04958849691655468,
|
3887 |
+
"grad_norm": NaN,
|
3888 |
+
"learning_rate": 0.00014298253551714776,
|
3889 |
+
"loss": 0.0,
|
3890 |
+
"step": 10960
|
3891 |
+
},
|
3892 |
+
{
|
3893 |
+
"epoch": 0.04967898687443161,
|
3894 |
+
"grad_norm": NaN,
|
3895 |
+
"learning_rate": 0.00014298253551714776,
|
3896 |
+
"loss": 0.0,
|
3897 |
+
"step": 10980
|
3898 |
+
},
|
3899 |
+
{
|
3900 |
+
"epoch": 0.049769476832308535,
|
3901 |
+
"grad_norm": NaN,
|
3902 |
+
"learning_rate": 0.00014298253551714776,
|
3903 |
+
"loss": 0.0,
|
3904 |
+
"step": 11000
|
3905 |
+
},
|
3906 |
+
{
|
3907 |
+
"epoch": 0.04985996679018546,
|
3908 |
+
"grad_norm": NaN,
|
3909 |
+
"learning_rate": 0.00014298253551714776,
|
3910 |
+
"loss": 0.0,
|
3911 |
+
"step": 11020
|
3912 |
+
},
|
3913 |
+
{
|
3914 |
+
"epoch": 0.04995045674806239,
|
3915 |
+
"grad_norm": NaN,
|
3916 |
+
"learning_rate": 0.00014298253551714776,
|
3917 |
+
"loss": 0.0,
|
3918 |
+
"step": 11040
|
3919 |
+
},
|
3920 |
+
{
|
3921 |
+
"epoch": 0.05004094670593931,
|
3922 |
+
"grad_norm": NaN,
|
3923 |
+
"learning_rate": 0.00014298253551714776,
|
3924 |
+
"loss": 0.0,
|
3925 |
+
"step": 11060
|
3926 |
+
},
|
3927 |
+
{
|
3928 |
+
"epoch": 0.05013143666381623,
|
3929 |
+
"grad_norm": NaN,
|
3930 |
+
"learning_rate": 0.00014298253551714776,
|
3931 |
+
"loss": 0.0,
|
3932 |
+
"step": 11080
|
3933 |
+
},
|
3934 |
+
{
|
3935 |
+
"epoch": 0.050221926621693155,
|
3936 |
+
"grad_norm": NaN,
|
3937 |
+
"learning_rate": 0.00014298253551714776,
|
3938 |
+
"loss": 0.0,
|
3939 |
+
"step": 11100
|
3940 |
+
},
|
3941 |
+
{
|
3942 |
+
"epoch": 0.050312416579570085,
|
3943 |
+
"grad_norm": NaN,
|
3944 |
+
"learning_rate": 0.00014298253551714776,
|
3945 |
+
"loss": 0.0,
|
3946 |
+
"step": 11120
|
3947 |
+
},
|
3948 |
+
{
|
3949 |
+
"epoch": 0.05040290653744701,
|
3950 |
+
"grad_norm": NaN,
|
3951 |
+
"learning_rate": 0.00014298253551714776,
|
3952 |
+
"loss": 0.0,
|
3953 |
+
"step": 11140
|
3954 |
+
},
|
3955 |
+
{
|
3956 |
+
"epoch": 0.05049339649532393,
|
3957 |
+
"grad_norm": NaN,
|
3958 |
+
"learning_rate": 0.00014298253551714776,
|
3959 |
+
"loss": 0.0,
|
3960 |
+
"step": 11160
|
3961 |
+
},
|
3962 |
+
{
|
3963 |
+
"epoch": 0.05058388645320085,
|
3964 |
+
"grad_norm": NaN,
|
3965 |
+
"learning_rate": 0.00014298253551714776,
|
3966 |
+
"loss": 0.0,
|
3967 |
+
"step": 11180
|
3968 |
+
},
|
3969 |
+
{
|
3970 |
+
"epoch": 0.05067437641107778,
|
3971 |
+
"grad_norm": NaN,
|
3972 |
+
"learning_rate": 0.00014298253551714776,
|
3973 |
+
"loss": 0.0,
|
3974 |
+
"step": 11200
|
3975 |
+
},
|
3976 |
+
{
|
3977 |
+
"epoch": 0.050764866368954706,
|
3978 |
+
"grad_norm": NaN,
|
3979 |
+
"learning_rate": 0.00014298253551714776,
|
3980 |
+
"loss": 0.0,
|
3981 |
+
"step": 11220
|
3982 |
+
},
|
3983 |
+
{
|
3984 |
+
"epoch": 0.05085535632683163,
|
3985 |
+
"grad_norm": NaN,
|
3986 |
+
"learning_rate": 0.00014298253551714776,
|
3987 |
+
"loss": 0.0,
|
3988 |
+
"step": 11240
|
3989 |
+
},
|
3990 |
+
{
|
3991 |
+
"epoch": 0.05094584628470855,
|
3992 |
+
"grad_norm": NaN,
|
3993 |
+
"learning_rate": 0.00014298253551714776,
|
3994 |
+
"loss": 0.0,
|
3995 |
+
"step": 11260
|
3996 |
+
},
|
3997 |
+
{
|
3998 |
+
"epoch": 0.05103633624258548,
|
3999 |
+
"grad_norm": NaN,
|
4000 |
+
"learning_rate": 0.00014298253551714776,
|
4001 |
+
"loss": 0.0,
|
4002 |
+
"step": 11280
|
4003 |
+
},
|
4004 |
+
{
|
4005 |
+
"epoch": 0.051126826200462404,
|
4006 |
+
"grad_norm": NaN,
|
4007 |
+
"learning_rate": 0.00014298253551714776,
|
4008 |
+
"loss": 0.0,
|
4009 |
+
"step": 11300
|
4010 |
+
},
|
4011 |
+
{
|
4012 |
+
"epoch": 0.05121731615833933,
|
4013 |
+
"grad_norm": NaN,
|
4014 |
+
"learning_rate": 0.00014298253551714776,
|
4015 |
+
"loss": 0.0,
|
4016 |
+
"step": 11320
|
4017 |
+
},
|
4018 |
+
{
|
4019 |
+
"epoch": 0.051307806116216256,
|
4020 |
+
"grad_norm": NaN,
|
4021 |
+
"learning_rate": 0.00014298253551714776,
|
4022 |
+
"loss": 0.0,
|
4023 |
+
"step": 11340
|
4024 |
+
},
|
4025 |
+
{
|
4026 |
+
"epoch": 0.05139829607409318,
|
4027 |
+
"grad_norm": NaN,
|
4028 |
+
"learning_rate": 0.00014298253551714776,
|
4029 |
+
"loss": 0.0,
|
4030 |
+
"step": 11360
|
4031 |
+
},
|
4032 |
+
{
|
4033 |
+
"epoch": 0.0514887860319701,
|
4034 |
+
"grad_norm": NaN,
|
4035 |
+
"learning_rate": 0.00014298253551714776,
|
4036 |
+
"loss": 0.0,
|
4037 |
+
"step": 11380
|
4038 |
+
},
|
4039 |
+
{
|
4040 |
+
"epoch": 0.051579275989847025,
|
4041 |
+
"grad_norm": NaN,
|
4042 |
+
"learning_rate": 0.00014298253551714776,
|
4043 |
+
"loss": 0.0,
|
4044 |
+
"step": 11400
|
4045 |
+
},
|
4046 |
+
{
|
4047 |
+
"epoch": 0.051669765947723954,
|
4048 |
+
"grad_norm": NaN,
|
4049 |
+
"learning_rate": 0.00014298253551714776,
|
4050 |
+
"loss": 0.0,
|
4051 |
+
"step": 11420
|
4052 |
+
},
|
4053 |
+
{
|
4054 |
+
"epoch": 0.05176025590560088,
|
4055 |
+
"grad_norm": NaN,
|
4056 |
+
"learning_rate": 0.00014298253551714776,
|
4057 |
+
"loss": 0.0,
|
4058 |
+
"step": 11440
|
4059 |
+
},
|
4060 |
+
{
|
4061 |
+
"epoch": 0.0518507458634778,
|
4062 |
+
"grad_norm": NaN,
|
4063 |
+
"learning_rate": 0.00014298253551714776,
|
4064 |
+
"loss": 0.0,
|
4065 |
+
"step": 11460
|
4066 |
+
},
|
4067 |
+
{
|
4068 |
+
"epoch": 0.05194123582135472,
|
4069 |
+
"grad_norm": NaN,
|
4070 |
+
"learning_rate": 0.00014298253551714776,
|
4071 |
+
"loss": 0.0,
|
4072 |
+
"step": 11480
|
4073 |
+
},
|
4074 |
+
{
|
4075 |
+
"epoch": 0.05203172577923165,
|
4076 |
+
"grad_norm": NaN,
|
4077 |
+
"learning_rate": 0.00014298253551714776,
|
4078 |
+
"loss": 0.0,
|
4079 |
+
"step": 11500
|
4080 |
+
},
|
4081 |
+
{
|
4082 |
+
"epoch": 0.052122215737108575,
|
4083 |
+
"grad_norm": NaN,
|
4084 |
+
"learning_rate": 0.00014298253551714776,
|
4085 |
+
"loss": 0.0,
|
4086 |
+
"step": 11520
|
4087 |
+
},
|
4088 |
+
{
|
4089 |
+
"epoch": 0.0522127056949855,
|
4090 |
+
"grad_norm": NaN,
|
4091 |
+
"learning_rate": 0.00014298253551714776,
|
4092 |
+
"loss": 0.0,
|
4093 |
+
"step": 11540
|
4094 |
+
},
|
4095 |
+
{
|
4096 |
+
"epoch": 0.05230319565286242,
|
4097 |
+
"grad_norm": NaN,
|
4098 |
+
"learning_rate": 0.00014298253551714776,
|
4099 |
+
"loss": 0.0,
|
4100 |
+
"step": 11560
|
4101 |
+
},
|
4102 |
+
{
|
4103 |
+
"epoch": 0.05239368561073935,
|
4104 |
+
"grad_norm": NaN,
|
4105 |
+
"learning_rate": 0.00014298253551714776,
|
4106 |
+
"loss": 0.0,
|
4107 |
+
"step": 11580
|
4108 |
+
},
|
4109 |
+
{
|
4110 |
+
"epoch": 0.05248417556861627,
|
4111 |
+
"grad_norm": NaN,
|
4112 |
+
"learning_rate": 0.00014298253551714776,
|
4113 |
+
"loss": 0.0,
|
4114 |
+
"step": 11600
|
4115 |
+
},
|
4116 |
+
{
|
4117 |
+
"epoch": 0.052574665526493196,
|
4118 |
+
"grad_norm": NaN,
|
4119 |
+
"learning_rate": 0.00014298253551714776,
|
4120 |
+
"loss": 0.0,
|
4121 |
+
"step": 11620
|
4122 |
+
},
|
4123 |
+
{
|
4124 |
+
"epoch": 0.05266515548437012,
|
4125 |
+
"grad_norm": NaN,
|
4126 |
+
"learning_rate": 0.00014298253551714776,
|
4127 |
+
"loss": 0.0,
|
4128 |
+
"step": 11640
|
4129 |
+
},
|
4130 |
+
{
|
4131 |
+
"epoch": 0.05275564544224705,
|
4132 |
+
"grad_norm": NaN,
|
4133 |
+
"learning_rate": 0.00014298253551714776,
|
4134 |
+
"loss": 0.0,
|
4135 |
+
"step": 11660
|
4136 |
+
},
|
4137 |
+
{
|
4138 |
+
"epoch": 0.05284613540012397,
|
4139 |
+
"grad_norm": NaN,
|
4140 |
+
"learning_rate": 0.00014298253551714776,
|
4141 |
+
"loss": 0.0,
|
4142 |
+
"step": 11680
|
4143 |
+
},
|
4144 |
+
{
|
4145 |
+
"epoch": 0.052936625358000894,
|
4146 |
+
"grad_norm": NaN,
|
4147 |
+
"learning_rate": 0.00014298253551714776,
|
4148 |
+
"loss": 0.0,
|
4149 |
+
"step": 11700
|
4150 |
+
},
|
4151 |
+
{
|
4152 |
+
"epoch": 0.053027115315877824,
|
4153 |
+
"grad_norm": NaN,
|
4154 |
+
"learning_rate": 0.00014298253551714776,
|
4155 |
+
"loss": 0.0,
|
4156 |
+
"step": 11720
|
4157 |
+
},
|
4158 |
+
{
|
4159 |
+
"epoch": 0.053117605273754746,
|
4160 |
+
"grad_norm": NaN,
|
4161 |
+
"learning_rate": 0.00014298253551714776,
|
4162 |
+
"loss": 0.0,
|
4163 |
+
"step": 11740
|
4164 |
+
},
|
4165 |
+
{
|
4166 |
+
"epoch": 0.05320809523163167,
|
4167 |
+
"grad_norm": NaN,
|
4168 |
+
"learning_rate": 0.00014298253551714776,
|
4169 |
+
"loss": 0.0,
|
4170 |
+
"step": 11760
|
4171 |
+
},
|
4172 |
+
{
|
4173 |
+
"epoch": 0.05329858518950859,
|
4174 |
+
"grad_norm": NaN,
|
4175 |
+
"learning_rate": 0.00014298253551714776,
|
4176 |
+
"loss": 0.0,
|
4177 |
+
"step": 11780
|
4178 |
+
},
|
4179 |
+
{
|
4180 |
+
"epoch": 0.05338907514738552,
|
4181 |
+
"grad_norm": NaN,
|
4182 |
+
"learning_rate": 0.00014298253551714776,
|
4183 |
+
"loss": 0.0,
|
4184 |
+
"step": 11800
|
4185 |
+
},
|
4186 |
+
{
|
4187 |
+
"epoch": 0.053479565105262444,
|
4188 |
+
"grad_norm": NaN,
|
4189 |
+
"learning_rate": 0.00014298253551714776,
|
4190 |
+
"loss": 0.0,
|
4191 |
+
"step": 11820
|
4192 |
+
},
|
4193 |
+
{
|
4194 |
+
"epoch": 0.05357005506313937,
|
4195 |
+
"grad_norm": NaN,
|
4196 |
+
"learning_rate": 0.00014298253551714776,
|
4197 |
+
"loss": 0.0,
|
4198 |
+
"step": 11840
|
4199 |
+
},
|
4200 |
+
{
|
4201 |
+
"epoch": 0.05366054502101629,
|
4202 |
+
"grad_norm": NaN,
|
4203 |
+
"learning_rate": 0.00014298253551714776,
|
4204 |
+
"loss": 0.0,
|
4205 |
+
"step": 11860
|
4206 |
+
},
|
4207 |
+
{
|
4208 |
+
"epoch": 0.05375103497889322,
|
4209 |
+
"grad_norm": NaN,
|
4210 |
+
"learning_rate": 0.00014298253551714776,
|
4211 |
+
"loss": 0.0,
|
4212 |
+
"step": 11880
|
4213 |
+
},
|
4214 |
+
{
|
4215 |
+
"epoch": 0.05384152493677014,
|
4216 |
+
"grad_norm": NaN,
|
4217 |
+
"learning_rate": 0.00014298253551714776,
|
4218 |
+
"loss": 0.0,
|
4219 |
+
"step": 11900
|
4220 |
+
},
|
4221 |
+
{
|
4222 |
+
"epoch": 0.053932014894647065,
|
4223 |
+
"grad_norm": NaN,
|
4224 |
+
"learning_rate": 0.00014298253551714776,
|
4225 |
+
"loss": 0.0,
|
4226 |
+
"step": 11920
|
4227 |
+
},
|
4228 |
+
{
|
4229 |
+
"epoch": 0.05402250485252399,
|
4230 |
+
"grad_norm": NaN,
|
4231 |
+
"learning_rate": 0.00014298253551714776,
|
4232 |
+
"loss": 0.0,
|
4233 |
+
"step": 11940
|
4234 |
+
},
|
4235 |
+
{
|
4236 |
+
"epoch": 0.05411299481040092,
|
4237 |
+
"grad_norm": NaN,
|
4238 |
+
"learning_rate": 0.00014298253551714776,
|
4239 |
+
"loss": 0.0,
|
4240 |
+
"step": 11960
|
4241 |
+
},
|
4242 |
+
{
|
4243 |
+
"epoch": 0.05420348476827784,
|
4244 |
+
"grad_norm": NaN,
|
4245 |
+
"learning_rate": 0.00014298253551714776,
|
4246 |
+
"loss": 0.0,
|
4247 |
+
"step": 11980
|
4248 |
+
},
|
4249 |
+
{
|
4250 |
+
"epoch": 0.05429397472615476,
|
4251 |
+
"grad_norm": NaN,
|
4252 |
+
"learning_rate": 0.00014298253551714776,
|
4253 |
+
"loss": 0.0,
|
4254 |
+
"step": 12000
|
4255 |
+
},
|
4256 |
+
{
|
4257 |
+
"epoch": 0.05429397472615476,
|
4258 |
+
"eval_accuracy": 0.021626624590642192,
|
4259 |
+
"eval_loss": NaN,
|
4260 |
+
"eval_runtime": 218.9297,
|
4261 |
+
"eval_samples_per_second": 2776.417,
|
4262 |
+
"eval_steps_per_second": 10.848,
|
4263 |
+
"step": 12000
|
4264 |
}
|
4265 |
],
|
4266 |
"logging_steps": 20,
|
|
|
4268 |
"num_input_tokens_seen": 0,
|
4269 |
"num_train_epochs": 3,
|
4270 |
"save_steps": 100,
|
4271 |
+
"total_flos": 4315086323712000.0,
|
4272 |
"train_batch_size": 256,
|
4273 |
"trial_name": null,
|
4274 |
"trial_params": null
|