chansung's picture
Model save
e5f2ea9 verified
raw
history blame
No virus
55.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 14.92822966507177,
"eval_steps": 500,
"global_step": 1560,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009569377990430622,
"grad_norm": 2.171875,
"learning_rate": 1.282051282051282e-06,
"loss": 3.0241,
"step": 1
},
{
"epoch": 0.04784688995215311,
"grad_norm": 2.40625,
"learning_rate": 6.41025641025641e-06,
"loss": 3.0093,
"step": 5
},
{
"epoch": 0.09569377990430622,
"grad_norm": 2.09375,
"learning_rate": 1.282051282051282e-05,
"loss": 3.0072,
"step": 10
},
{
"epoch": 0.14354066985645933,
"grad_norm": 1.984375,
"learning_rate": 1.923076923076923e-05,
"loss": 2.9982,
"step": 15
},
{
"epoch": 0.19138755980861244,
"grad_norm": 2.015625,
"learning_rate": 2.564102564102564e-05,
"loss": 2.9122,
"step": 20
},
{
"epoch": 0.23923444976076555,
"grad_norm": 2.53125,
"learning_rate": 3.205128205128206e-05,
"loss": 2.785,
"step": 25
},
{
"epoch": 0.28708133971291866,
"grad_norm": 1.875,
"learning_rate": 3.846153846153846e-05,
"loss": 2.6063,
"step": 30
},
{
"epoch": 0.3349282296650718,
"grad_norm": 2.65625,
"learning_rate": 4.4871794871794874e-05,
"loss": 2.4641,
"step": 35
},
{
"epoch": 0.3827751196172249,
"grad_norm": 1.890625,
"learning_rate": 5.128205128205128e-05,
"loss": 2.3442,
"step": 40
},
{
"epoch": 0.430622009569378,
"grad_norm": 73.5,
"learning_rate": 5.769230769230769e-05,
"loss": 2.2165,
"step": 45
},
{
"epoch": 0.4784688995215311,
"grad_norm": 1.1171875,
"learning_rate": 6.410256410256412e-05,
"loss": 2.0922,
"step": 50
},
{
"epoch": 0.5263157894736842,
"grad_norm": 0.87890625,
"learning_rate": 7.051282051282052e-05,
"loss": 1.9553,
"step": 55
},
{
"epoch": 0.5741626794258373,
"grad_norm": 0.89453125,
"learning_rate": 7.692307692307693e-05,
"loss": 1.8161,
"step": 60
},
{
"epoch": 0.6220095693779905,
"grad_norm": 0.65234375,
"learning_rate": 8.333333333333334e-05,
"loss": 1.6939,
"step": 65
},
{
"epoch": 0.6698564593301436,
"grad_norm": 0.52734375,
"learning_rate": 8.974358974358975e-05,
"loss": 1.6032,
"step": 70
},
{
"epoch": 0.7177033492822966,
"grad_norm": 0.546875,
"learning_rate": 9.615384615384617e-05,
"loss": 1.5079,
"step": 75
},
{
"epoch": 0.7655502392344498,
"grad_norm": 0.498046875,
"learning_rate": 0.00010256410256410256,
"loss": 1.4389,
"step": 80
},
{
"epoch": 0.8133971291866029,
"grad_norm": 0.67578125,
"learning_rate": 0.00010897435897435896,
"loss": 1.3952,
"step": 85
},
{
"epoch": 0.861244019138756,
"grad_norm": 0.302734375,
"learning_rate": 0.00011538461538461538,
"loss": 1.3286,
"step": 90
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.55859375,
"learning_rate": 0.00012179487179487179,
"loss": 1.3036,
"step": 95
},
{
"epoch": 0.9569377990430622,
"grad_norm": 0.515625,
"learning_rate": 0.00012820512820512823,
"loss": 1.2724,
"step": 100
},
{
"epoch": 0.9952153110047847,
"eval_loss": 2.5530645847320557,
"eval_runtime": 0.6584,
"eval_samples_per_second": 15.189,
"eval_steps_per_second": 1.519,
"step": 104
},
{
"epoch": 1.0047846889952152,
"grad_norm": 0.31640625,
"learning_rate": 0.00013461538461538464,
"loss": 1.2461,
"step": 105
},
{
"epoch": 1.0526315789473684,
"grad_norm": 0.486328125,
"learning_rate": 0.00014102564102564104,
"loss": 1.2256,
"step": 110
},
{
"epoch": 1.1004784688995215,
"grad_norm": 0.255859375,
"learning_rate": 0.00014743589743589745,
"loss": 1.2074,
"step": 115
},
{
"epoch": 1.1483253588516746,
"grad_norm": 0.396484375,
"learning_rate": 0.00015384615384615385,
"loss": 1.1911,
"step": 120
},
{
"epoch": 1.1961722488038278,
"grad_norm": 1.0390625,
"learning_rate": 0.00016025641025641028,
"loss": 1.1684,
"step": 125
},
{
"epoch": 1.244019138755981,
"grad_norm": 0.453125,
"learning_rate": 0.0001666666666666667,
"loss": 1.1506,
"step": 130
},
{
"epoch": 1.291866028708134,
"grad_norm": 0.56640625,
"learning_rate": 0.0001730769230769231,
"loss": 1.1572,
"step": 135
},
{
"epoch": 1.339712918660287,
"grad_norm": 0.75,
"learning_rate": 0.0001794871794871795,
"loss": 1.136,
"step": 140
},
{
"epoch": 1.38755980861244,
"grad_norm": 0.7890625,
"learning_rate": 0.0001858974358974359,
"loss": 1.138,
"step": 145
},
{
"epoch": 1.4354066985645932,
"grad_norm": 0.2890625,
"learning_rate": 0.00019230769230769233,
"loss": 1.1339,
"step": 150
},
{
"epoch": 1.4832535885167464,
"grad_norm": 0.71875,
"learning_rate": 0.00019871794871794874,
"loss": 1.1258,
"step": 155
},
{
"epoch": 1.5311004784688995,
"grad_norm": 1.078125,
"learning_rate": 0.00019999599453798524,
"loss": 1.118,
"step": 160
},
{
"epoch": 1.5789473684210527,
"grad_norm": 1.1015625,
"learning_rate": 0.00019997972289848503,
"loss": 1.1211,
"step": 165
},
{
"epoch": 1.6267942583732058,
"grad_norm": 0.291015625,
"learning_rate": 0.0001999509367752813,
"loss": 1.109,
"step": 170
},
{
"epoch": 1.674641148325359,
"grad_norm": 0.27734375,
"learning_rate": 0.00019990963977153936,
"loss": 1.1016,
"step": 175
},
{
"epoch": 1.722488038277512,
"grad_norm": 0.37890625,
"learning_rate": 0.00019985583705641418,
"loss": 1.1047,
"step": 180
},
{
"epoch": 1.7703349282296652,
"grad_norm": 0.310546875,
"learning_rate": 0.00019978953536440336,
"loss": 1.0877,
"step": 185
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.267578125,
"learning_rate": 0.0001997107429945041,
"loss": 1.0849,
"step": 190
},
{
"epoch": 1.8660287081339713,
"grad_norm": 0.306640625,
"learning_rate": 0.00019961946980917456,
"loss": 1.0945,
"step": 195
},
{
"epoch": 1.9138755980861244,
"grad_norm": 0.376953125,
"learning_rate": 0.0001995157272330992,
"loss": 1.0796,
"step": 200
},
{
"epoch": 1.9617224880382775,
"grad_norm": 0.412109375,
"learning_rate": 0.00019939952825175888,
"loss": 1.0792,
"step": 205
},
{
"epoch": 2.0,
"eval_loss": 2.475158214569092,
"eval_runtime": 0.5364,
"eval_samples_per_second": 18.643,
"eval_steps_per_second": 1.864,
"step": 209
},
{
"epoch": 2.0095693779904304,
"grad_norm": 0.337890625,
"learning_rate": 0.0001992708874098054,
"loss": 1.0691,
"step": 210
},
{
"epoch": 2.0574162679425836,
"grad_norm": 0.326171875,
"learning_rate": 0.00019912982080924103,
"loss": 1.0586,
"step": 215
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.31640625,
"learning_rate": 0.00019897634610740287,
"loss": 1.0492,
"step": 220
},
{
"epoch": 2.15311004784689,
"grad_norm": 0.33203125,
"learning_rate": 0.0001988104825147528,
"loss": 1.0467,
"step": 225
},
{
"epoch": 2.200956937799043,
"grad_norm": 0.80078125,
"learning_rate": 0.00019863225079247285,
"loss": 1.0492,
"step": 230
},
{
"epoch": 2.248803827751196,
"grad_norm": 0.37890625,
"learning_rate": 0.00019844167324986657,
"loss": 1.0444,
"step": 235
},
{
"epoch": 2.2966507177033493,
"grad_norm": 0.396484375,
"learning_rate": 0.00019823877374156647,
"loss": 1.049,
"step": 240
},
{
"epoch": 2.3444976076555024,
"grad_norm": 0.353515625,
"learning_rate": 0.00019802357766454827,
"loss": 1.047,
"step": 245
},
{
"epoch": 2.3923444976076556,
"grad_norm": 0.57421875,
"learning_rate": 0.00019779611195495177,
"loss": 1.044,
"step": 250
},
{
"epoch": 2.4401913875598087,
"grad_norm": 0.341796875,
"learning_rate": 0.00019755640508470942,
"loss": 1.0424,
"step": 255
},
{
"epoch": 2.488038277511962,
"grad_norm": 0.3125,
"learning_rate": 0.00019730448705798239,
"loss": 1.0362,
"step": 260
},
{
"epoch": 2.535885167464115,
"grad_norm": 0.423828125,
"learning_rate": 0.00019704038940740505,
"loss": 1.031,
"step": 265
},
{
"epoch": 2.583732057416268,
"grad_norm": 0.57421875,
"learning_rate": 0.00019676414519013781,
"loss": 1.0416,
"step": 270
},
{
"epoch": 2.6315789473684212,
"grad_norm": 1.0859375,
"learning_rate": 0.0001964757889837296,
"loss": 1.0326,
"step": 275
},
{
"epoch": 2.679425837320574,
"grad_norm": 0.408203125,
"learning_rate": 0.0001961753568817896,
"loss": 1.0317,
"step": 280
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.3828125,
"learning_rate": 0.00019586288648946947,
"loss": 1.0449,
"step": 285
},
{
"epoch": 2.77511961722488,
"grad_norm": 0.396484375,
"learning_rate": 0.0001955384169187563,
"loss": 1.0297,
"step": 290
},
{
"epoch": 2.8229665071770333,
"grad_norm": 0.55078125,
"learning_rate": 0.00019520198878357703,
"loss": 1.0319,
"step": 295
},
{
"epoch": 2.8708133971291865,
"grad_norm": 0.5625,
"learning_rate": 0.00019485364419471454,
"loss": 1.031,
"step": 300
},
{
"epoch": 2.9186602870813396,
"grad_norm": 0.3359375,
"learning_rate": 0.00019449342675453707,
"loss": 1.0256,
"step": 305
},
{
"epoch": 2.9665071770334928,
"grad_norm": 0.3359375,
"learning_rate": 0.00019412138155154002,
"loss": 1.0312,
"step": 310
},
{
"epoch": 2.9952153110047846,
"eval_loss": 2.463235855102539,
"eval_runtime": 0.6563,
"eval_samples_per_second": 15.237,
"eval_steps_per_second": 1.524,
"step": 313
},
{
"epoch": 3.014354066985646,
"grad_norm": 0.37890625,
"learning_rate": 0.00019373755515470254,
"loss": 1.0296,
"step": 315
},
{
"epoch": 3.062200956937799,
"grad_norm": 0.330078125,
"learning_rate": 0.0001933419956076584,
"loss": 1.0058,
"step": 320
},
{
"epoch": 3.110047846889952,
"grad_norm": 0.34375,
"learning_rate": 0.00019293475242268223,
"loss": 1.0065,
"step": 325
},
{
"epoch": 3.1578947368421053,
"grad_norm": 0.40625,
"learning_rate": 0.00019251587657449236,
"loss": 1.0095,
"step": 330
},
{
"epoch": 3.2057416267942584,
"grad_norm": 0.8203125,
"learning_rate": 0.0001920854204938699,
"loss": 1.0087,
"step": 335
},
{
"epoch": 3.2535885167464116,
"grad_norm": 0.353515625,
"learning_rate": 0.00019164343806109632,
"loss": 1.0066,
"step": 340
},
{
"epoch": 3.3014354066985647,
"grad_norm": 0.486328125,
"learning_rate": 0.00019118998459920902,
"loss": 1.002,
"step": 345
},
{
"epoch": 3.349282296650718,
"grad_norm": 0.6484375,
"learning_rate": 0.00019072511686707663,
"loss": 1.0099,
"step": 350
},
{
"epoch": 3.397129186602871,
"grad_norm": 0.3671875,
"learning_rate": 0.00019024889305229456,
"loss": 0.9971,
"step": 355
},
{
"epoch": 3.444976076555024,
"grad_norm": 0.306640625,
"learning_rate": 0.0001897613727639014,
"loss": 0.9993,
"step": 360
},
{
"epoch": 3.492822966507177,
"grad_norm": 0.318359375,
"learning_rate": 0.00018926261702491797,
"loss": 1.0025,
"step": 365
},
{
"epoch": 3.5406698564593304,
"grad_norm": 0.46875,
"learning_rate": 0.00018875268826470872,
"loss": 0.9953,
"step": 370
},
{
"epoch": 3.588516746411483,
"grad_norm": 0.470703125,
"learning_rate": 0.0001882316503111678,
"loss": 0.9988,
"step": 375
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.71484375,
"learning_rate": 0.00018769956838272936,
"loss": 1.0103,
"step": 380
},
{
"epoch": 3.6842105263157894,
"grad_norm": 0.6171875,
"learning_rate": 0.00018715650908020427,
"loss": 1.0031,
"step": 385
},
{
"epoch": 3.7320574162679425,
"grad_norm": 0.330078125,
"learning_rate": 0.00018660254037844388,
"loss": 0.9989,
"step": 390
},
{
"epoch": 3.7799043062200957,
"grad_norm": 0.396484375,
"learning_rate": 0.00018603773161783124,
"loss": 0.9975,
"step": 395
},
{
"epoch": 3.827751196172249,
"grad_norm": 0.482421875,
"learning_rate": 0.00018546215349560203,
"loss": 0.9895,
"step": 400
},
{
"epoch": 3.875598086124402,
"grad_norm": 0.56640625,
"learning_rate": 0.00018487587805699526,
"loss": 0.9941,
"step": 405
},
{
"epoch": 3.923444976076555,
"grad_norm": 0.65625,
"learning_rate": 0.00018427897868623534,
"loss": 0.9996,
"step": 410
},
{
"epoch": 3.971291866028708,
"grad_norm": 0.8046875,
"learning_rate": 0.00018367153009734655,
"loss": 0.9957,
"step": 415
},
{
"epoch": 4.0,
"eval_loss": 2.470475912094116,
"eval_runtime": 0.5362,
"eval_samples_per_second": 18.649,
"eval_steps_per_second": 1.865,
"step": 418
},
{
"epoch": 4.019138755980861,
"grad_norm": 0.59375,
"learning_rate": 0.00018305360832480117,
"loss": 0.9875,
"step": 420
},
{
"epoch": 4.0669856459330145,
"grad_norm": 0.70703125,
"learning_rate": 0.00018242529071400214,
"loss": 0.9719,
"step": 425
},
{
"epoch": 4.114832535885167,
"grad_norm": 0.408203125,
"learning_rate": 0.00018178665591160172,
"loss": 0.9655,
"step": 430
},
{
"epoch": 4.162679425837321,
"grad_norm": 0.427734375,
"learning_rate": 0.00018113778385565733,
"loss": 0.9659,
"step": 435
},
{
"epoch": 4.2105263157894735,
"grad_norm": 0.625,
"learning_rate": 0.00018047875576562557,
"loss": 0.9758,
"step": 440
},
{
"epoch": 4.258373205741627,
"grad_norm": 0.55859375,
"learning_rate": 0.0001798096541321961,
"loss": 0.983,
"step": 445
},
{
"epoch": 4.30622009569378,
"grad_norm": 0.390625,
"learning_rate": 0.0001791305627069662,
"loss": 0.979,
"step": 450
},
{
"epoch": 4.354066985645933,
"grad_norm": 0.35546875,
"learning_rate": 0.00017844156649195759,
"loss": 0.9764,
"step": 455
},
{
"epoch": 4.401913875598086,
"grad_norm": 0.4453125,
"learning_rate": 0.0001777427517289766,
"loss": 0.9775,
"step": 460
},
{
"epoch": 4.44976076555024,
"grad_norm": 0.80078125,
"learning_rate": 0.00017703420588881946,
"loss": 0.9746,
"step": 465
},
{
"epoch": 4.497607655502392,
"grad_norm": 0.373046875,
"learning_rate": 0.00017631601766032336,
"loss": 0.972,
"step": 470
},
{
"epoch": 4.545454545454545,
"grad_norm": 0.44140625,
"learning_rate": 0.00017558827693926534,
"loss": 0.9814,
"step": 475
},
{
"epoch": 4.5933014354066986,
"grad_norm": 0.32421875,
"learning_rate": 0.00017485107481711012,
"loss": 0.9813,
"step": 480
},
{
"epoch": 4.641148325358852,
"grad_norm": 0.408203125,
"learning_rate": 0.00017410450356960795,
"loss": 0.9811,
"step": 485
},
{
"epoch": 4.688995215311005,
"grad_norm": 0.3515625,
"learning_rate": 0.0001733486566452446,
"loss": 0.9705,
"step": 490
},
{
"epoch": 4.7368421052631575,
"grad_norm": 0.353515625,
"learning_rate": 0.00017258362865354426,
"loss": 0.985,
"step": 495
},
{
"epoch": 4.784688995215311,
"grad_norm": 0.380859375,
"learning_rate": 0.0001718095153532274,
"loss": 0.9817,
"step": 500
},
{
"epoch": 4.832535885167464,
"grad_norm": 0.474609375,
"learning_rate": 0.00017102641364022457,
"loss": 0.9686,
"step": 505
},
{
"epoch": 4.880382775119617,
"grad_norm": 0.384765625,
"learning_rate": 0.00017023442153554777,
"loss": 0.9838,
"step": 510
},
{
"epoch": 4.92822966507177,
"grad_norm": 0.53515625,
"learning_rate": 0.00016943363817302135,
"loss": 0.9774,
"step": 515
},
{
"epoch": 4.976076555023924,
"grad_norm": 0.50390625,
"learning_rate": 0.0001686241637868734,
"loss": 0.9775,
"step": 520
},
{
"epoch": 4.9952153110047846,
"eval_loss": 2.476422071456909,
"eval_runtime": 0.6478,
"eval_samples_per_second": 15.436,
"eval_steps_per_second": 1.544,
"step": 522
},
{
"epoch": 5.023923444976076,
"grad_norm": 0.625,
"learning_rate": 0.0001678060996991891,
"loss": 0.9684,
"step": 525
},
{
"epoch": 5.07177033492823,
"grad_norm": 0.33203125,
"learning_rate": 0.00016697954830722868,
"loss": 0.9614,
"step": 530
},
{
"epoch": 5.119617224880383,
"grad_norm": 0.376953125,
"learning_rate": 0.00016614461307061,
"loss": 0.963,
"step": 535
},
{
"epoch": 5.167464114832536,
"grad_norm": 0.439453125,
"learning_rate": 0.0001653013984983585,
"loss": 0.9558,
"step": 540
},
{
"epoch": 5.215311004784689,
"grad_norm": 0.375,
"learning_rate": 0.00016445001013582608,
"loss": 0.9533,
"step": 545
},
{
"epoch": 5.2631578947368425,
"grad_norm": 0.40234375,
"learning_rate": 0.0001635905545514795,
"loss": 0.9664,
"step": 550
},
{
"epoch": 5.311004784688995,
"grad_norm": 0.380859375,
"learning_rate": 0.00016272313932356162,
"loss": 0.9552,
"step": 555
},
{
"epoch": 5.358851674641148,
"grad_norm": 0.416015625,
"learning_rate": 0.0001618478730266255,
"loss": 0.9505,
"step": 560
},
{
"epoch": 5.4066985645933014,
"grad_norm": 0.39453125,
"learning_rate": 0.00016096486521794434,
"loss": 0.964,
"step": 565
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.416015625,
"learning_rate": 0.0001600742264237979,
"loss": 0.9558,
"step": 570
},
{
"epoch": 5.502392344497608,
"grad_norm": 0.5625,
"learning_rate": 0.0001591760681256382,
"loss": 0.9573,
"step": 575
},
{
"epoch": 5.55023923444976,
"grad_norm": 0.3515625,
"learning_rate": 0.00015827050274613513,
"loss": 0.9587,
"step": 580
},
{
"epoch": 5.598086124401914,
"grad_norm": 0.421875,
"learning_rate": 0.0001573576436351046,
"loss": 0.9638,
"step": 585
},
{
"epoch": 5.645933014354067,
"grad_norm": 0.4140625,
"learning_rate": 0.0001564376050553205,
"loss": 0.9513,
"step": 590
},
{
"epoch": 5.69377990430622,
"grad_norm": 0.62109375,
"learning_rate": 0.0001555105021682123,
"loss": 0.9627,
"step": 595
},
{
"epoch": 5.741626794258373,
"grad_norm": 0.62890625,
"learning_rate": 0.00015457645101945046,
"loss": 0.9546,
"step": 600
},
{
"epoch": 5.7894736842105265,
"grad_norm": 0.361328125,
"learning_rate": 0.00015363556852442085,
"loss": 0.9481,
"step": 605
},
{
"epoch": 5.837320574162679,
"grad_norm": 0.462890625,
"learning_rate": 0.00015268797245359035,
"loss": 0.9629,
"step": 610
},
{
"epoch": 5.885167464114833,
"grad_norm": 0.33984375,
"learning_rate": 0.00015173378141776568,
"loss": 0.9519,
"step": 615
},
{
"epoch": 5.9330143540669855,
"grad_norm": 0.423828125,
"learning_rate": 0.0001507731148532468,
"loss": 0.9584,
"step": 620
},
{
"epoch": 5.980861244019139,
"grad_norm": 0.3828125,
"learning_rate": 0.00014980609300687683,
"loss": 0.9584,
"step": 625
},
{
"epoch": 6.0,
"eval_loss": 2.487025737762451,
"eval_runtime": 0.5347,
"eval_samples_per_second": 18.7,
"eval_steps_per_second": 1.87,
"step": 627
},
{
"epoch": 6.028708133971292,
"grad_norm": 0.361328125,
"learning_rate": 0.00014883283692099112,
"loss": 0.9435,
"step": 630
},
{
"epoch": 6.076555023923445,
"grad_norm": 0.388671875,
"learning_rate": 0.000147853468418266,
"loss": 0.9462,
"step": 635
},
{
"epoch": 6.124401913875598,
"grad_norm": 0.443359375,
"learning_rate": 0.00014686811008647038,
"loss": 0.9372,
"step": 640
},
{
"epoch": 6.172248803827751,
"grad_norm": 0.37109375,
"learning_rate": 0.00014587688526312143,
"loss": 0.9405,
"step": 645
},
{
"epoch": 6.220095693779904,
"grad_norm": 0.474609375,
"learning_rate": 0.00014487991802004623,
"loss": 0.942,
"step": 650
},
{
"epoch": 6.267942583732057,
"grad_norm": 0.3984375,
"learning_rate": 0.00014387733314785193,
"loss": 0.9495,
"step": 655
},
{
"epoch": 6.315789473684211,
"grad_norm": 0.38671875,
"learning_rate": 0.00014286925614030542,
"loss": 0.9415,
"step": 660
},
{
"epoch": 6.363636363636363,
"grad_norm": 0.390625,
"learning_rate": 0.00014185581317862546,
"loss": 0.94,
"step": 665
},
{
"epoch": 6.411483253588517,
"grad_norm": 0.48046875,
"learning_rate": 0.00014083713111568842,
"loss": 0.9344,
"step": 670
},
{
"epoch": 6.45933014354067,
"grad_norm": 0.439453125,
"learning_rate": 0.0001398133374601501,
"loss": 0.9438,
"step": 675
},
{
"epoch": 6.507177033492823,
"grad_norm": 0.333984375,
"learning_rate": 0.0001387845603604855,
"loss": 0.941,
"step": 680
},
{
"epoch": 6.555023923444976,
"grad_norm": 0.5078125,
"learning_rate": 0.00013775092858894837,
"loss": 0.9433,
"step": 685
},
{
"epoch": 6.6028708133971294,
"grad_norm": 0.3515625,
"learning_rate": 0.00013671257152545277,
"loss": 0.9433,
"step": 690
},
{
"epoch": 6.650717703349282,
"grad_norm": 0.451171875,
"learning_rate": 0.00013566961914137867,
"loss": 0.9435,
"step": 695
},
{
"epoch": 6.698564593301436,
"grad_norm": 0.41015625,
"learning_rate": 0.00013462220198330328,
"loss": 0.9431,
"step": 700
},
{
"epoch": 6.746411483253588,
"grad_norm": 0.44140625,
"learning_rate": 0.0001335704511566605,
"loss": 0.9409,
"step": 705
},
{
"epoch": 6.794258373205742,
"grad_norm": 0.546875,
"learning_rate": 0.0001325144983093305,
"loss": 0.9437,
"step": 710
},
{
"epoch": 6.842105263157895,
"grad_norm": 0.412109375,
"learning_rate": 0.00013145447561516138,
"loss": 0.9491,
"step": 715
},
{
"epoch": 6.889952153110048,
"grad_norm": 0.38671875,
"learning_rate": 0.0001303905157574247,
"loss": 0.9445,
"step": 720
},
{
"epoch": 6.937799043062201,
"grad_norm": 0.380859375,
"learning_rate": 0.00012932275191220776,
"loss": 0.9315,
"step": 725
},
{
"epoch": 6.985645933014354,
"grad_norm": 0.36328125,
"learning_rate": 0.0001282513177317437,
"loss": 0.9368,
"step": 730
},
{
"epoch": 6.9952153110047846,
"eval_loss": 2.497544765472412,
"eval_runtime": 0.6485,
"eval_samples_per_second": 15.419,
"eval_steps_per_second": 1.542,
"step": 731
},
{
"epoch": 7.033492822966507,
"grad_norm": 0.69921875,
"learning_rate": 0.00012717634732768243,
"loss": 0.9238,
"step": 735
},
{
"epoch": 7.08133971291866,
"grad_norm": 0.39453125,
"learning_rate": 0.00012609797525430373,
"loss": 0.9235,
"step": 740
},
{
"epoch": 7.1291866028708135,
"grad_norm": 0.3984375,
"learning_rate": 0.00012501633649167495,
"loss": 0.9148,
"step": 745
},
{
"epoch": 7.177033492822966,
"grad_norm": 0.455078125,
"learning_rate": 0.0001239315664287558,
"loss": 0.9182,
"step": 750
},
{
"epoch": 7.22488038277512,
"grad_norm": 0.435546875,
"learning_rate": 0.00012284380084645139,
"loss": 0.9237,
"step": 755
},
{
"epoch": 7.2727272727272725,
"grad_norm": 0.443359375,
"learning_rate": 0.00012175317590061674,
"loss": 0.93,
"step": 760
},
{
"epoch": 7.320574162679426,
"grad_norm": 0.75,
"learning_rate": 0.00012065982810501404,
"loss": 0.9268,
"step": 765
},
{
"epoch": 7.368421052631579,
"grad_norm": 0.48828125,
"learning_rate": 0.00011956389431422507,
"loss": 0.9214,
"step": 770
},
{
"epoch": 7.416267942583732,
"grad_norm": 0.6015625,
"learning_rate": 0.00011846551170652127,
"loss": 0.931,
"step": 775
},
{
"epoch": 7.464114832535885,
"grad_norm": 0.453125,
"learning_rate": 0.00011736481776669306,
"loss": 0.9316,
"step": 780
},
{
"epoch": 7.511961722488039,
"grad_norm": 0.369140625,
"learning_rate": 0.0001162619502688407,
"loss": 0.9272,
"step": 785
},
{
"epoch": 7.559808612440191,
"grad_norm": 0.435546875,
"learning_rate": 0.00011515704725912926,
"loss": 0.9219,
"step": 790
},
{
"epoch": 7.607655502392344,
"grad_norm": 0.42578125,
"learning_rate": 0.00011405024703850929,
"loss": 0.9363,
"step": 795
},
{
"epoch": 7.655502392344498,
"grad_norm": 0.5390625,
"learning_rate": 0.00011294168814540553,
"loss": 0.9388,
"step": 800
},
{
"epoch": 7.703349282296651,
"grad_norm": 0.431640625,
"learning_rate": 0.00011183150933837632,
"loss": 0.9284,
"step": 805
},
{
"epoch": 7.751196172248804,
"grad_norm": 0.353515625,
"learning_rate": 0.00011071984957874479,
"loss": 0.9222,
"step": 810
},
{
"epoch": 7.7990430622009566,
"grad_norm": 0.42578125,
"learning_rate": 0.00010960684801320536,
"loss": 0.9335,
"step": 815
},
{
"epoch": 7.84688995215311,
"grad_norm": 0.6171875,
"learning_rate": 0.00010849264395640649,
"loss": 0.9382,
"step": 820
},
{
"epoch": 7.894736842105263,
"grad_norm": 0.66015625,
"learning_rate": 0.00010737737687351284,
"loss": 0.9414,
"step": 825
},
{
"epoch": 7.942583732057416,
"grad_norm": 0.5234375,
"learning_rate": 0.0001062611863627482,
"loss": 0.9374,
"step": 830
},
{
"epoch": 7.990430622009569,
"grad_norm": 0.33984375,
"learning_rate": 0.00010514421213792205,
"loss": 0.93,
"step": 835
},
{
"epoch": 8.0,
"eval_loss": 2.506725311279297,
"eval_runtime": 0.5363,
"eval_samples_per_second": 18.647,
"eval_steps_per_second": 1.865,
"step": 836
},
{
"epoch": 8.038277511961722,
"grad_norm": 0.390625,
"learning_rate": 0.00010402659401094152,
"loss": 0.9129,
"step": 840
},
{
"epoch": 8.086124401913876,
"grad_norm": 0.35546875,
"learning_rate": 0.00010290847187431113,
"loss": 0.9107,
"step": 845
},
{
"epoch": 8.133971291866029,
"grad_norm": 0.36328125,
"learning_rate": 0.00010178998568362243,
"loss": 0.9226,
"step": 850
},
{
"epoch": 8.181818181818182,
"grad_norm": 0.345703125,
"learning_rate": 0.00010067127544003563,
"loss": 0.9184,
"step": 855
},
{
"epoch": 8.229665071770334,
"grad_norm": 0.35546875,
"learning_rate": 9.955248117275566e-05,
"loss": 0.915,
"step": 860
},
{
"epoch": 8.277511961722489,
"grad_norm": 0.396484375,
"learning_rate": 9.843374292150488e-05,
"loss": 0.9234,
"step": 865
},
{
"epoch": 8.325358851674642,
"grad_norm": 0.3671875,
"learning_rate": 9.73152007189939e-05,
"loss": 0.9169,
"step": 870
},
{
"epoch": 8.373205741626794,
"grad_norm": 0.373046875,
"learning_rate": 9.619699457339405e-05,
"loss": 0.9131,
"step": 875
},
{
"epoch": 8.421052631578947,
"grad_norm": 0.3671875,
"learning_rate": 9.507926445081219e-05,
"loss": 0.9189,
"step": 880
},
{
"epoch": 8.4688995215311,
"grad_norm": 0.359375,
"learning_rate": 9.396215025777139e-05,
"loss": 0.9125,
"step": 885
},
{
"epoch": 8.516746411483254,
"grad_norm": 0.341796875,
"learning_rate": 9.284579182369867e-05,
"loss": 0.9089,
"step": 890
},
{
"epoch": 8.564593301435407,
"grad_norm": 0.4453125,
"learning_rate": 9.173032888342244e-05,
"loss": 0.9221,
"step": 895
},
{
"epoch": 8.61244019138756,
"grad_norm": 0.5546875,
"learning_rate": 9.061590105968208e-05,
"loss": 0.9155,
"step": 900
},
{
"epoch": 8.660287081339712,
"grad_norm": 0.55859375,
"learning_rate": 8.950264784565112e-05,
"loss": 0.9286,
"step": 905
},
{
"epoch": 8.708133971291867,
"grad_norm": 0.36328125,
"learning_rate": 8.839070858747697e-05,
"loss": 0.9199,
"step": 910
},
{
"epoch": 8.75598086124402,
"grad_norm": 0.365234375,
"learning_rate": 8.728022246683894e-05,
"loss": 0.908,
"step": 915
},
{
"epoch": 8.803827751196172,
"grad_norm": 0.345703125,
"learning_rate": 8.617132848352671e-05,
"loss": 0.9169,
"step": 920
},
{
"epoch": 8.851674641148325,
"grad_norm": 0.390625,
"learning_rate": 8.506416543804182e-05,
"loss": 0.9238,
"step": 925
},
{
"epoch": 8.89952153110048,
"grad_norm": 0.5703125,
"learning_rate": 8.395887191422397e-05,
"loss": 0.9209,
"step": 930
},
{
"epoch": 8.947368421052632,
"grad_norm": 0.46875,
"learning_rate": 8.285558626190447e-05,
"loss": 0.9189,
"step": 935
},
{
"epoch": 8.995215311004785,
"grad_norm": 0.337890625,
"learning_rate": 8.175444657958876e-05,
"loss": 0.9195,
"step": 940
},
{
"epoch": 8.995215311004785,
"eval_loss": 2.5168216228485107,
"eval_runtime": 0.6511,
"eval_samples_per_second": 15.36,
"eval_steps_per_second": 1.536,
"step": 940
},
{
"epoch": 9.043062200956937,
"grad_norm": 0.40625,
"learning_rate": 8.065559069717088e-05,
"loss": 0.9021,
"step": 945
},
{
"epoch": 9.090909090909092,
"grad_norm": 0.41015625,
"learning_rate": 7.955915615868111e-05,
"loss": 0.9103,
"step": 950
},
{
"epoch": 9.138755980861244,
"grad_norm": 0.392578125,
"learning_rate": 7.846528020506957e-05,
"loss": 0.8983,
"step": 955
},
{
"epoch": 9.186602870813397,
"grad_norm": 0.36328125,
"learning_rate": 7.73740997570278e-05,
"loss": 0.9037,
"step": 960
},
{
"epoch": 9.23444976076555,
"grad_norm": 0.353515625,
"learning_rate": 7.628575139785024e-05,
"loss": 0.9036,
"step": 965
},
{
"epoch": 9.282296650717702,
"grad_norm": 0.388671875,
"learning_rate": 7.520037135633816e-05,
"loss": 0.9051,
"step": 970
},
{
"epoch": 9.330143540669857,
"grad_norm": 0.439453125,
"learning_rate": 7.411809548974792e-05,
"loss": 0.9186,
"step": 975
},
{
"epoch": 9.37799043062201,
"grad_norm": 0.365234375,
"learning_rate": 7.303905926678564e-05,
"loss": 0.9095,
"step": 980
},
{
"epoch": 9.425837320574162,
"grad_norm": 0.375,
"learning_rate": 7.196339775065042e-05,
"loss": 0.9069,
"step": 985
},
{
"epoch": 9.473684210526315,
"grad_norm": 0.376953125,
"learning_rate": 7.089124558212871e-05,
"loss": 0.908,
"step": 990
},
{
"epoch": 9.52153110047847,
"grad_norm": 0.38671875,
"learning_rate": 6.982273696274106e-05,
"loss": 0.9121,
"step": 995
},
{
"epoch": 9.569377990430622,
"grad_norm": 0.41796875,
"learning_rate": 6.875800563794425e-05,
"loss": 0.9097,
"step": 1000
},
{
"epoch": 9.617224880382775,
"grad_norm": 0.361328125,
"learning_rate": 6.769718488039023e-05,
"loss": 0.9101,
"step": 1005
},
{
"epoch": 9.665071770334928,
"grad_norm": 0.3515625,
"learning_rate": 6.664040747324437e-05,
"loss": 0.9117,
"step": 1010
},
{
"epoch": 9.712918660287082,
"grad_norm": 0.375,
"learning_rate": 6.558780569356507e-05,
"loss": 0.9106,
"step": 1015
},
{
"epoch": 9.760765550239235,
"grad_norm": 0.376953125,
"learning_rate": 6.453951129574644e-05,
"loss": 0.9037,
"step": 1020
},
{
"epoch": 9.808612440191387,
"grad_norm": 0.40234375,
"learning_rate": 6.349565549502676e-05,
"loss": 0.9033,
"step": 1025
},
{
"epoch": 9.85645933014354,
"grad_norm": 0.3515625,
"learning_rate": 6.245636895106402e-05,
"loss": 0.9156,
"step": 1030
},
{
"epoch": 9.904306220095695,
"grad_norm": 0.357421875,
"learning_rate": 6.142178175158149e-05,
"loss": 0.9082,
"step": 1035
},
{
"epoch": 9.952153110047847,
"grad_norm": 0.451171875,
"learning_rate": 6.039202339608432e-05,
"loss": 0.9147,
"step": 1040
},
{
"epoch": 10.0,
"grad_norm": 0.50390625,
"learning_rate": 5.9367222779650334e-05,
"loss": 0.912,
"step": 1045
},
{
"epoch": 10.0,
"eval_loss": 2.5271153450012207,
"eval_runtime": 0.5317,
"eval_samples_per_second": 18.809,
"eval_steps_per_second": 1.881,
"step": 1045
},
{
"epoch": 10.047846889952153,
"grad_norm": 0.439453125,
"learning_rate": 5.834750817679606e-05,
"loss": 0.9084,
"step": 1050
},
{
"epoch": 10.095693779904305,
"grad_norm": 0.3515625,
"learning_rate": 5.733300722542045e-05,
"loss": 0.897,
"step": 1055
},
{
"epoch": 10.14354066985646,
"grad_norm": 0.376953125,
"learning_rate": 5.6323846910828735e-05,
"loss": 0.8998,
"step": 1060
},
{
"epoch": 10.191387559808613,
"grad_norm": 0.349609375,
"learning_rate": 5.5320153549837415e-05,
"loss": 0.9026,
"step": 1065
},
{
"epoch": 10.239234449760765,
"grad_norm": 0.3515625,
"learning_rate": 5.432205277496327e-05,
"loss": 0.8996,
"step": 1070
},
{
"epoch": 10.287081339712918,
"grad_norm": 0.359375,
"learning_rate": 5.33296695186977e-05,
"loss": 0.9,
"step": 1075
},
{
"epoch": 10.334928229665072,
"grad_norm": 0.41015625,
"learning_rate": 5.234312799786921e-05,
"loss": 0.8988,
"step": 1080
},
{
"epoch": 10.382775119617225,
"grad_norm": 0.41015625,
"learning_rate": 5.1362551698094964e-05,
"loss": 0.8969,
"step": 1085
},
{
"epoch": 10.430622009569378,
"grad_norm": 0.35546875,
"learning_rate": 5.0388063358324134e-05,
"loss": 0.8954,
"step": 1090
},
{
"epoch": 10.47846889952153,
"grad_norm": 0.3515625,
"learning_rate": 4.9419784955474524e-05,
"loss": 0.8991,
"step": 1095
},
{
"epoch": 10.526315789473685,
"grad_norm": 0.359375,
"learning_rate": 4.845783768916482e-05,
"loss": 0.9074,
"step": 1100
},
{
"epoch": 10.574162679425838,
"grad_norm": 0.353515625,
"learning_rate": 4.7502341966544e-05,
"loss": 0.9012,
"step": 1105
},
{
"epoch": 10.62200956937799,
"grad_norm": 0.34765625,
"learning_rate": 4.6553417387219886e-05,
"loss": 0.8993,
"step": 1110
},
{
"epoch": 10.669856459330143,
"grad_norm": 0.333984375,
"learning_rate": 4.5611182728288895e-05,
"loss": 0.9073,
"step": 1115
},
{
"epoch": 10.717703349282296,
"grad_norm": 0.341796875,
"learning_rate": 4.467575592946864e-05,
"loss": 0.9073,
"step": 1120
},
{
"epoch": 10.76555023923445,
"grad_norm": 0.349609375,
"learning_rate": 4.374725407833532e-05,
"loss": 0.902,
"step": 1125
},
{
"epoch": 10.813397129186603,
"grad_norm": 0.380859375,
"learning_rate": 4.282579339566802e-05,
"loss": 0.8993,
"step": 1130
},
{
"epoch": 10.861244019138756,
"grad_norm": 0.37109375,
"learning_rate": 4.1911489220901236e-05,
"loss": 0.9002,
"step": 1135
},
{
"epoch": 10.909090909090908,
"grad_norm": 0.3515625,
"learning_rate": 4.100445599768774e-05,
"loss": 0.9115,
"step": 1140
},
{
"epoch": 10.956937799043063,
"grad_norm": 0.34375,
"learning_rate": 4.0104807259573716e-05,
"loss": 0.9003,
"step": 1145
},
{
"epoch": 10.995215311004785,
"eval_loss": 2.535585403442383,
"eval_runtime": 0.6791,
"eval_samples_per_second": 14.726,
"eval_steps_per_second": 1.473,
"step": 1149
},
{
"epoch": 11.004784688995215,
"grad_norm": 0.345703125,
"learning_rate": 3.9212655615787804e-05,
"loss": 0.9032,
"step": 1150
},
{
"epoch": 11.052631578947368,
"grad_norm": 0.34765625,
"learning_rate": 3.832811273714569e-05,
"loss": 0.902,
"step": 1155
},
{
"epoch": 11.10047846889952,
"grad_norm": 0.36328125,
"learning_rate": 3.745128934207225e-05,
"loss": 0.8894,
"step": 1160
},
{
"epoch": 11.148325358851675,
"grad_norm": 0.388671875,
"learning_rate": 3.6582295182742964e-05,
"loss": 0.8935,
"step": 1165
},
{
"epoch": 11.196172248803828,
"grad_norm": 0.365234375,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.8997,
"step": 1170
},
{
"epoch": 11.24401913875598,
"grad_norm": 0.3359375,
"learning_rate": 3.4868228666467704e-05,
"loss": 0.8972,
"step": 1175
},
{
"epoch": 11.291866028708133,
"grad_norm": 0.349609375,
"learning_rate": 3.402337085960119e-05,
"loss": 0.9028,
"step": 1180
},
{
"epoch": 11.339712918660288,
"grad_norm": 0.341796875,
"learning_rate": 3.318677136178228e-05,
"loss": 0.8943,
"step": 1185
},
{
"epoch": 11.38755980861244,
"grad_norm": 0.345703125,
"learning_rate": 3.235853489035241e-05,
"loss": 0.8851,
"step": 1190
},
{
"epoch": 11.435406698564593,
"grad_norm": 0.384765625,
"learning_rate": 3.153876511585122e-05,
"loss": 0.8917,
"step": 1195
},
{
"epoch": 11.483253588516746,
"grad_norm": 0.392578125,
"learning_rate": 3.072756464904006e-05,
"loss": 0.8974,
"step": 1200
},
{
"epoch": 11.5311004784689,
"grad_norm": 0.337890625,
"learning_rate": 2.9925035028058134e-05,
"loss": 0.8951,
"step": 1205
},
{
"epoch": 11.578947368421053,
"grad_norm": 0.330078125,
"learning_rate": 2.9131276705713006e-05,
"loss": 0.893,
"step": 1210
},
{
"epoch": 11.626794258373206,
"grad_norm": 0.345703125,
"learning_rate": 2.8346389036906828e-05,
"loss": 0.899,
"step": 1215
},
{
"epoch": 11.674641148325358,
"grad_norm": 0.337890625,
"learning_rate": 2.7570470266200176e-05,
"loss": 0.9037,
"step": 1220
},
{
"epoch": 11.722488038277511,
"grad_norm": 0.33984375,
"learning_rate": 2.68036175155147e-05,
"loss": 0.8917,
"step": 1225
},
{
"epoch": 11.770334928229666,
"grad_norm": 0.37890625,
"learning_rate": 2.6045926771976303e-05,
"loss": 0.9061,
"step": 1230
},
{
"epoch": 11.818181818181818,
"grad_norm": 0.3671875,
"learning_rate": 2.529749287590042e-05,
"loss": 0.9028,
"step": 1235
},
{
"epoch": 11.866028708133971,
"grad_norm": 0.337890625,
"learning_rate": 2.4558409508920986e-05,
"loss": 0.8946,
"step": 1240
},
{
"epoch": 11.913875598086124,
"grad_norm": 0.349609375,
"learning_rate": 2.382876918226409e-05,
"loss": 0.8938,
"step": 1245
},
{
"epoch": 11.961722488038278,
"grad_norm": 0.36328125,
"learning_rate": 2.3108663225168435e-05,
"loss": 0.9032,
"step": 1250
},
{
"epoch": 12.0,
"eval_loss": 2.5401320457458496,
"eval_runtime": 0.536,
"eval_samples_per_second": 18.658,
"eval_steps_per_second": 1.866,
"step": 1254
},
{
"epoch": 12.009569377990431,
"grad_norm": 0.33984375,
"learning_rate": 2.239818177345364e-05,
"loss": 0.9023,
"step": 1255
},
{
"epoch": 12.057416267942584,
"grad_norm": 0.337890625,
"learning_rate": 2.1697413758237784e-05,
"loss": 0.8902,
"step": 1260
},
{
"epoch": 12.105263157894736,
"grad_norm": 0.359375,
"learning_rate": 2.1006446894806065e-05,
"loss": 0.8958,
"step": 1265
},
{
"epoch": 12.15311004784689,
"grad_norm": 0.337890625,
"learning_rate": 2.032536767163141e-05,
"loss": 0.8949,
"step": 1270
},
{
"epoch": 12.200956937799043,
"grad_norm": 0.3671875,
"learning_rate": 1.965426133954854e-05,
"loss": 0.9023,
"step": 1275
},
{
"epoch": 12.248803827751196,
"grad_norm": 0.333984375,
"learning_rate": 1.8993211901083353e-05,
"loss": 0.895,
"step": 1280
},
{
"epoch": 12.296650717703349,
"grad_norm": 0.328125,
"learning_rate": 1.8342302099938057e-05,
"loss": 0.8925,
"step": 1285
},
{
"epoch": 12.344497607655502,
"grad_norm": 0.328125,
"learning_rate": 1.7701613410634365e-05,
"loss": 0.8986,
"step": 1290
},
{
"epoch": 12.392344497607656,
"grad_norm": 0.33203125,
"learning_rate": 1.7071226028315113e-05,
"loss": 0.8922,
"step": 1295
},
{
"epoch": 12.440191387559809,
"grad_norm": 0.35546875,
"learning_rate": 1.6451218858706374e-05,
"loss": 0.8878,
"step": 1300
},
{
"epoch": 12.488038277511961,
"grad_norm": 0.337890625,
"learning_rate": 1.584166950824061e-05,
"loss": 0.8992,
"step": 1305
},
{
"epoch": 12.535885167464114,
"grad_norm": 0.361328125,
"learning_rate": 1.5242654274342894e-05,
"loss": 0.8879,
"step": 1310
},
{
"epoch": 12.583732057416269,
"grad_norm": 0.353515625,
"learning_rate": 1.4654248135880621e-05,
"loss": 0.8942,
"step": 1315
},
{
"epoch": 12.631578947368421,
"grad_norm": 0.353515625,
"learning_rate": 1.4076524743778319e-05,
"loss": 0.8957,
"step": 1320
},
{
"epoch": 12.679425837320574,
"grad_norm": 0.33203125,
"learning_rate": 1.350955641179893e-05,
"loss": 0.8981,
"step": 1325
},
{
"epoch": 12.727272727272727,
"grad_norm": 0.353515625,
"learning_rate": 1.295341410749208e-05,
"loss": 0.8952,
"step": 1330
},
{
"epoch": 12.775119617224881,
"grad_norm": 0.34375,
"learning_rate": 1.2408167443311214e-05,
"loss": 0.8945,
"step": 1335
},
{
"epoch": 12.822966507177034,
"grad_norm": 0.359375,
"learning_rate": 1.1873884667900125e-05,
"loss": 0.8851,
"step": 1340
},
{
"epoch": 12.870813397129186,
"grad_norm": 0.34375,
"learning_rate": 1.1350632657550253e-05,
"loss": 0.8922,
"step": 1345
},
{
"epoch": 12.91866028708134,
"grad_norm": 0.3515625,
"learning_rate": 1.083847690782972e-05,
"loss": 0.8926,
"step": 1350
},
{
"epoch": 12.966507177033494,
"grad_norm": 0.349609375,
"learning_rate": 1.0337481525385362e-05,
"loss": 0.9006,
"step": 1355
},
{
"epoch": 12.995215311004785,
"eval_loss": 2.5425758361816406,
"eval_runtime": 0.6353,
"eval_samples_per_second": 15.741,
"eval_steps_per_second": 1.574,
"step": 1358
},
{
"epoch": 13.014354066985646,
"grad_norm": 0.345703125,
"learning_rate": 9.8477092199184e-06,
"loss": 0.9002,
"step": 1360
},
{
"epoch": 13.062200956937799,
"grad_norm": 0.34375,
"learning_rate": 9.369221296335006e-06,
"loss": 0.9007,
"step": 1365
},
{
"epoch": 13.110047846889952,
"grad_norm": 0.34765625,
"learning_rate": 8.902077647072881e-06,
"loss": 0.8882,
"step": 1370
},
{
"epoch": 13.157894736842104,
"grad_norm": 0.34375,
"learning_rate": 8.446336744604378e-06,
"loss": 0.888,
"step": 1375
},
{
"epoch": 13.205741626794259,
"grad_norm": 0.33203125,
"learning_rate": 8.002055634117578e-06,
"loss": 0.8869,
"step": 1380
},
{
"epoch": 13.253588516746412,
"grad_norm": 0.34375,
"learning_rate": 7.569289926375933e-06,
"loss": 0.8931,
"step": 1385
},
{
"epoch": 13.301435406698564,
"grad_norm": 0.326171875,
"learning_rate": 7.148093790757371e-06,
"loss": 0.8958,
"step": 1390
},
{
"epoch": 13.349282296650717,
"grad_norm": 0.330078125,
"learning_rate": 6.738519948473976e-06,
"loss": 0.8914,
"step": 1395
},
{
"epoch": 13.397129186602871,
"grad_norm": 0.349609375,
"learning_rate": 6.3406196659728465e-06,
"loss": 0.8975,
"step": 1400
},
{
"epoch": 13.444976076555024,
"grad_norm": 0.330078125,
"learning_rate": 5.954442748519073e-06,
"loss": 0.8908,
"step": 1405
},
{
"epoch": 13.492822966507177,
"grad_norm": 0.345703125,
"learning_rate": 5.580037533961546e-06,
"loss": 0.8938,
"step": 1410
},
{
"epoch": 13.54066985645933,
"grad_norm": 0.349609375,
"learning_rate": 5.217450886682584e-06,
"loss": 0.8958,
"step": 1415
},
{
"epoch": 13.588516746411484,
"grad_norm": 0.3359375,
"learning_rate": 4.866728191731829e-06,
"loss": 0.8905,
"step": 1420
},
{
"epoch": 13.636363636363637,
"grad_norm": 0.330078125,
"learning_rate": 4.527913349145441e-06,
"loss": 0.8995,
"step": 1425
},
{
"epoch": 13.68421052631579,
"grad_norm": 0.3671875,
"learning_rate": 4.20104876845111e-06,
"loss": 0.895,
"step": 1430
},
{
"epoch": 13.732057416267942,
"grad_norm": 0.33203125,
"learning_rate": 3.886175363359646e-06,
"loss": 0.8964,
"step": 1435
},
{
"epoch": 13.779904306220097,
"grad_norm": 0.34375,
"learning_rate": 3.5833325466437694e-06,
"loss": 0.891,
"step": 1440
},
{
"epoch": 13.82775119617225,
"grad_norm": 0.341796875,
"learning_rate": 3.2925582252048338e-06,
"loss": 0.8903,
"step": 1445
},
{
"epoch": 13.875598086124402,
"grad_norm": 0.3359375,
"learning_rate": 3.013888795328057e-06,
"loss": 0.8926,
"step": 1450
},
{
"epoch": 13.923444976076555,
"grad_norm": 0.330078125,
"learning_rate": 2.7473591381266708e-06,
"loss": 0.8921,
"step": 1455
},
{
"epoch": 13.971291866028707,
"grad_norm": 0.357421875,
"learning_rate": 2.4930026151759766e-06,
"loss": 0.9007,
"step": 1460
},
{
"epoch": 14.0,
"eval_loss": 2.5432519912719727,
"eval_runtime": 0.5348,
"eval_samples_per_second": 18.7,
"eval_steps_per_second": 1.87,
"step": 1463
},
{
"epoch": 14.019138755980862,
"grad_norm": 0.3359375,
"learning_rate": 2.250851064337367e-06,
"loss": 0.8857,
"step": 1465
},
{
"epoch": 14.066985645933014,
"grad_norm": 0.330078125,
"learning_rate": 2.0209347957732328e-06,
"loss": 0.897,
"step": 1470
},
{
"epoch": 14.114832535885167,
"grad_norm": 0.337890625,
"learning_rate": 1.8032825881530213e-06,
"loss": 0.8863,
"step": 1475
},
{
"epoch": 14.16267942583732,
"grad_norm": 0.3359375,
"learning_rate": 1.5979216850509848e-06,
"loss": 0.8968,
"step": 1480
},
{
"epoch": 14.210526315789474,
"grad_norm": 0.345703125,
"learning_rate": 1.404877791536141e-06,
"loss": 0.8934,
"step": 1485
},
{
"epoch": 14.258373205741627,
"grad_norm": 0.3671875,
"learning_rate": 1.2241750709546917e-06,
"loss": 0.8868,
"step": 1490
},
{
"epoch": 14.30622009569378,
"grad_norm": 0.341796875,
"learning_rate": 1.055836141905553e-06,
"loss": 0.8992,
"step": 1495
},
{
"epoch": 14.354066985645932,
"grad_norm": 0.337890625,
"learning_rate": 8.998820754091531e-07,
"loss": 0.8934,
"step": 1500
},
{
"epoch": 14.401913875598087,
"grad_norm": 0.3515625,
"learning_rate": 7.563323922699983e-07,
"loss": 0.8897,
"step": 1505
},
{
"epoch": 14.44976076555024,
"grad_norm": 0.328125,
"learning_rate": 6.25205060633205e-07,
"loss": 0.8929,
"step": 1510
},
{
"epoch": 14.497607655502392,
"grad_norm": 0.3359375,
"learning_rate": 5.065164937354428e-07,
"loss": 0.9045,
"step": 1515
},
{
"epoch": 14.545454545454545,
"grad_norm": 0.328125,
"learning_rate": 4.0028154785050063e-07,
"loss": 0.8877,
"step": 1520
},
{
"epoch": 14.593301435406698,
"grad_norm": 0.3515625,
"learning_rate": 3.065135204296965e-07,
"loss": 0.8995,
"step": 1525
},
{
"epoch": 14.641148325358852,
"grad_norm": 0.3359375,
"learning_rate": 2.2522414843748618e-07,
"loss": 0.8919,
"step": 1530
},
{
"epoch": 14.688995215311005,
"grad_norm": 0.353515625,
"learning_rate": 1.5642360688225e-07,
"loss": 0.8905,
"step": 1535
},
{
"epoch": 14.736842105263158,
"grad_norm": 0.33203125,
"learning_rate": 1.0012050754277802e-07,
"loss": 0.8943,
"step": 1540
},
{
"epoch": 14.78468899521531,
"grad_norm": 0.34375,
"learning_rate": 5.632189789027687e-08,
"loss": 0.8943,
"step": 1545
},
{
"epoch": 14.832535885167465,
"grad_norm": 0.330078125,
"learning_rate": 2.5033260206275277e-08,
"loss": 0.8969,
"step": 1550
},
{
"epoch": 14.880382775119617,
"grad_norm": 0.326171875,
"learning_rate": 6.25851089636198e-09,
"loss": 0.8944,
"step": 1555
},
{
"epoch": 14.92822966507177,
"grad_norm": 0.330078125,
"learning_rate": 0.0,
"loss": 0.896,
"step": 1560
},
{
"epoch": 14.92822966507177,
"eval_loss": 2.542541980743408,
"eval_runtime": 0.5324,
"eval_samples_per_second": 18.782,
"eval_steps_per_second": 1.878,
"step": 1560
},
{
"epoch": 14.92822966507177,
"step": 1560,
"total_flos": 1.2217365722824704e+18,
"train_loss": 1.0227742999027938,
"train_runtime": 5355.4835,
"train_samples_per_second": 18.668,
"train_steps_per_second": 0.291
}
],
"logging_steps": 5,
"max_steps": 1560,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 100,
"total_flos": 1.2217365722824704e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}