mistral-poe-10p-20th-0.8 / trainer_state.json
Jackie999's picture
Model save
e282b2e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1949,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.9867920763488255,
"learning_rate": 1.0256410256410257e-06,
"loss": 3.1404,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 1.0805236415283819,
"learning_rate": 5.128205128205128e-06,
"loss": 3.0206,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 1.088461828968177,
"learning_rate": 1.0256410256410256e-05,
"loss": 3.0901,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 1.2357171111134264,
"learning_rate": 1.5384615384615387e-05,
"loss": 3.1031,
"step": 15
},
{
"epoch": 0.01,
"grad_norm": 1.739041901525227,
"learning_rate": 2.0512820512820512e-05,
"loss": 2.9391,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 2.013913743156505,
"learning_rate": 2.564102564102564e-05,
"loss": 2.822,
"step": 25
},
{
"epoch": 0.02,
"grad_norm": 2.398142814112249,
"learning_rate": 3.0769230769230774e-05,
"loss": 2.5475,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 2.7503687163287225,
"learning_rate": 3.58974358974359e-05,
"loss": 2.1604,
"step": 35
},
{
"epoch": 0.02,
"grad_norm": 2.176951249507104,
"learning_rate": 4.1025641025641023e-05,
"loss": 1.8203,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 1.5478513051485294,
"learning_rate": 4.615384615384616e-05,
"loss": 1.5269,
"step": 45
},
{
"epoch": 0.03,
"grad_norm": 0.897483265235072,
"learning_rate": 5.128205128205128e-05,
"loss": 1.3682,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 0.894162871732136,
"learning_rate": 5.6410256410256414e-05,
"loss": 1.2891,
"step": 55
},
{
"epoch": 0.03,
"grad_norm": 0.8614246599006732,
"learning_rate": 6.153846153846155e-05,
"loss": 1.337,
"step": 60
},
{
"epoch": 0.03,
"grad_norm": 0.7541968897830927,
"learning_rate": 6.666666666666667e-05,
"loss": 1.2414,
"step": 65
},
{
"epoch": 0.04,
"grad_norm": 0.8776597324349444,
"learning_rate": 7.17948717948718e-05,
"loss": 1.1697,
"step": 70
},
{
"epoch": 0.04,
"grad_norm": 0.7079617838785108,
"learning_rate": 7.692307692307693e-05,
"loss": 1.2498,
"step": 75
},
{
"epoch": 0.04,
"grad_norm": 0.5939948296736147,
"learning_rate": 8.205128205128205e-05,
"loss": 1.0828,
"step": 80
},
{
"epoch": 0.04,
"grad_norm": 0.8578871018906925,
"learning_rate": 8.717948717948718e-05,
"loss": 1.1361,
"step": 85
},
{
"epoch": 0.05,
"grad_norm": 0.7029963269013992,
"learning_rate": 9.230769230769232e-05,
"loss": 1.1109,
"step": 90
},
{
"epoch": 0.05,
"grad_norm": 0.7504243519761483,
"learning_rate": 9.743589743589744e-05,
"loss": 1.1677,
"step": 95
},
{
"epoch": 0.05,
"grad_norm": 0.6443750018539233,
"learning_rate": 0.00010256410256410256,
"loss": 1.1607,
"step": 100
},
{
"epoch": 0.05,
"grad_norm": 0.7871252383024564,
"learning_rate": 0.0001076923076923077,
"loss": 1.1253,
"step": 105
},
{
"epoch": 0.06,
"grad_norm": 0.8312394547618829,
"learning_rate": 0.00011282051282051283,
"loss": 1.1161,
"step": 110
},
{
"epoch": 0.06,
"grad_norm": 0.8428399360884844,
"learning_rate": 0.00011794871794871796,
"loss": 1.0784,
"step": 115
},
{
"epoch": 0.06,
"grad_norm": 0.6825900451211728,
"learning_rate": 0.0001230769230769231,
"loss": 1.1021,
"step": 120
},
{
"epoch": 0.06,
"grad_norm": 0.7744294888114935,
"learning_rate": 0.00012820512820512823,
"loss": 1.0604,
"step": 125
},
{
"epoch": 0.07,
"grad_norm": 1.002646739548983,
"learning_rate": 0.00013333333333333334,
"loss": 1.0847,
"step": 130
},
{
"epoch": 0.07,
"grad_norm": 0.6012624344435019,
"learning_rate": 0.00013846153846153847,
"loss": 1.0181,
"step": 135
},
{
"epoch": 0.07,
"grad_norm": 0.8096262697642048,
"learning_rate": 0.0001435897435897436,
"loss": 1.0158,
"step": 140
},
{
"epoch": 0.07,
"grad_norm": 0.7557949179312086,
"learning_rate": 0.00014871794871794872,
"loss": 0.9636,
"step": 145
},
{
"epoch": 0.08,
"grad_norm": 0.8203564361587894,
"learning_rate": 0.00015384615384615385,
"loss": 1.1169,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 0.579163922703823,
"learning_rate": 0.00015897435897435896,
"loss": 1.0084,
"step": 155
},
{
"epoch": 0.08,
"grad_norm": 0.8545433716627759,
"learning_rate": 0.0001641025641025641,
"loss": 0.9985,
"step": 160
},
{
"epoch": 0.08,
"grad_norm": 0.7360453540530675,
"learning_rate": 0.00016923076923076923,
"loss": 1.0567,
"step": 165
},
{
"epoch": 0.09,
"grad_norm": 0.8805492524534003,
"learning_rate": 0.00017435897435897436,
"loss": 1.1034,
"step": 170
},
{
"epoch": 0.09,
"grad_norm": 0.7273871780027367,
"learning_rate": 0.0001794871794871795,
"loss": 1.1297,
"step": 175
},
{
"epoch": 0.09,
"grad_norm": 0.7038735687702271,
"learning_rate": 0.00018461538461538463,
"loss": 0.9889,
"step": 180
},
{
"epoch": 0.09,
"grad_norm": 0.7270115089083556,
"learning_rate": 0.00018974358974358974,
"loss": 1.0766,
"step": 185
},
{
"epoch": 0.1,
"grad_norm": 0.5737455545454937,
"learning_rate": 0.00019487179487179487,
"loss": 0.9499,
"step": 190
},
{
"epoch": 0.1,
"grad_norm": 0.6443494959803631,
"learning_rate": 0.0002,
"loss": 1.0763,
"step": 195
},
{
"epoch": 0.1,
"grad_norm": 0.6243474884139971,
"learning_rate": 0.00019999598996948235,
"loss": 1.0062,
"step": 200
},
{
"epoch": 0.11,
"grad_norm": 0.7975791739775852,
"learning_rate": 0.00019998396019953624,
"loss": 1.0289,
"step": 205
},
{
"epoch": 0.11,
"grad_norm": 0.6860096128890616,
"learning_rate": 0.0001999639116549566,
"loss": 0.9252,
"step": 210
},
{
"epoch": 0.11,
"grad_norm": 0.8108743145976218,
"learning_rate": 0.00019993584594364894,
"loss": 0.9773,
"step": 215
},
{
"epoch": 0.11,
"grad_norm": 0.5789407405430219,
"learning_rate": 0.0001998997653165004,
"loss": 1.0155,
"step": 220
},
{
"epoch": 0.12,
"grad_norm": 0.7363091759403909,
"learning_rate": 0.00019985567266719934,
"loss": 1.1001,
"step": 225
},
{
"epoch": 0.12,
"grad_norm": 0.8418654811864583,
"learning_rate": 0.00019980357153200315,
"loss": 1.0407,
"step": 230
},
{
"epoch": 0.12,
"grad_norm": 0.8063197989410974,
"learning_rate": 0.00019974346608945466,
"loss": 1.0382,
"step": 235
},
{
"epoch": 0.12,
"grad_norm": 0.5812499941487835,
"learning_rate": 0.00019967536116004698,
"loss": 1.0355,
"step": 240
},
{
"epoch": 0.13,
"grad_norm": 0.7538634529600193,
"learning_rate": 0.00019959926220583713,
"loss": 1.0133,
"step": 245
},
{
"epoch": 0.13,
"grad_norm": 0.7276811166344807,
"learning_rate": 0.00019951517533000764,
"loss": 0.9576,
"step": 250
},
{
"epoch": 0.13,
"grad_norm": 0.6007777697802229,
"learning_rate": 0.00019942310727637724,
"loss": 1.0232,
"step": 255
},
{
"epoch": 0.13,
"grad_norm": 0.6922378895554703,
"learning_rate": 0.00019932306542886009,
"loss": 1.0447,
"step": 260
},
{
"epoch": 0.14,
"grad_norm": 0.5806961337662231,
"learning_rate": 0.00019921505781087334,
"loss": 1.0641,
"step": 265
},
{
"epoch": 0.14,
"grad_norm": 0.5785730754024643,
"learning_rate": 0.00019909909308469398,
"loss": 1.0641,
"step": 270
},
{
"epoch": 0.14,
"grad_norm": 0.6439845195133915,
"learning_rate": 0.0001989751805507637,
"loss": 1.0379,
"step": 275
},
{
"epoch": 0.14,
"grad_norm": 0.7577913059227968,
"learning_rate": 0.00019884333014694345,
"loss": 1.0615,
"step": 280
},
{
"epoch": 0.15,
"grad_norm": 0.5837308337108719,
"learning_rate": 0.00019870355244771607,
"loss": 1.0336,
"step": 285
},
{
"epoch": 0.15,
"grad_norm": 0.7541736503066673,
"learning_rate": 0.00019855585866333835,
"loss": 1.0222,
"step": 290
},
{
"epoch": 0.15,
"grad_norm": 0.6795535317682991,
"learning_rate": 0.00019840026063894193,
"loss": 1.0258,
"step": 295
},
{
"epoch": 0.15,
"grad_norm": 0.689656925816617,
"learning_rate": 0.00019823677085358335,
"loss": 0.9866,
"step": 300
},
{
"epoch": 0.16,
"grad_norm": 0.7045654444597828,
"learning_rate": 0.00019806540241924317,
"loss": 1.0021,
"step": 305
},
{
"epoch": 0.16,
"grad_norm": 0.6180449086403829,
"learning_rate": 0.00019788616907977441,
"loss": 1.0219,
"step": 310
},
{
"epoch": 0.16,
"grad_norm": 0.5701105027464689,
"learning_rate": 0.00019769908520980034,
"loss": 0.9847,
"step": 315
},
{
"epoch": 0.16,
"grad_norm": 0.5537984015468944,
"learning_rate": 0.00019750416581356146,
"loss": 1.0,
"step": 320
},
{
"epoch": 0.17,
"grad_norm": 0.6833838044298907,
"learning_rate": 0.00019730142652371236,
"loss": 0.8635,
"step": 325
},
{
"epoch": 0.17,
"grad_norm": 0.5813455184047317,
"learning_rate": 0.0001970908836000678,
"loss": 0.997,
"step": 330
},
{
"epoch": 0.17,
"grad_norm": 0.6313319230462269,
"learning_rate": 0.00019687255392829877,
"loss": 0.9987,
"step": 335
},
{
"epoch": 0.17,
"grad_norm": 0.5573490795657496,
"learning_rate": 0.0001966464550185782,
"loss": 0.9653,
"step": 340
},
{
"epoch": 0.18,
"grad_norm": 0.6092024871043954,
"learning_rate": 0.0001964126050041767,
"loss": 0.9458,
"step": 345
},
{
"epoch": 0.18,
"grad_norm": 0.6115494865750609,
"learning_rate": 0.0001961710226400081,
"loss": 0.9615,
"step": 350
},
{
"epoch": 0.18,
"grad_norm": 0.5535897413532389,
"learning_rate": 0.00019592172730112544,
"loss": 0.9839,
"step": 355
},
{
"epoch": 0.18,
"grad_norm": 0.6263110834281318,
"learning_rate": 0.00019566473898116713,
"loss": 1.0255,
"step": 360
},
{
"epoch": 0.19,
"grad_norm": 0.5608728944736717,
"learning_rate": 0.0001954000782907532,
"loss": 0.9807,
"step": 365
},
{
"epoch": 0.19,
"grad_norm": 0.6198849215248433,
"learning_rate": 0.00019512776645583263,
"loss": 1.0045,
"step": 370
},
{
"epoch": 0.19,
"grad_norm": 0.6518869268765236,
"learning_rate": 0.00019484782531598073,
"loss": 1.0182,
"step": 375
},
{
"epoch": 0.19,
"grad_norm": 0.6147354518783447,
"learning_rate": 0.00019456027732264784,
"loss": 1.0562,
"step": 380
},
{
"epoch": 0.2,
"grad_norm": 0.7034046264875483,
"learning_rate": 0.00019426514553735848,
"loss": 1.0275,
"step": 385
},
{
"epoch": 0.2,
"grad_norm": 0.6470371702412977,
"learning_rate": 0.00019396245362986197,
"loss": 0.9855,
"step": 390
},
{
"epoch": 0.2,
"grad_norm": 0.8462303447592061,
"learning_rate": 0.00019365222587623405,
"loss": 0.9597,
"step": 395
},
{
"epoch": 0.21,
"grad_norm": 0.70696032039657,
"learning_rate": 0.00019333448715692995,
"loss": 0.9876,
"step": 400
},
{
"epoch": 0.21,
"grad_norm": 0.6992261363160817,
"learning_rate": 0.00019300926295478884,
"loss": 1.0496,
"step": 405
},
{
"epoch": 0.21,
"grad_norm": 0.6287969115482221,
"learning_rate": 0.0001926765793529902,
"loss": 0.9818,
"step": 410
},
{
"epoch": 0.21,
"grad_norm": 0.6033095827786854,
"learning_rate": 0.00019233646303296205,
"loss": 1.0182,
"step": 415
},
{
"epoch": 0.22,
"grad_norm": 0.5533011483845743,
"learning_rate": 0.00019198894127224074,
"loss": 0.9962,
"step": 420
},
{
"epoch": 0.22,
"grad_norm": 0.6201915047692829,
"learning_rate": 0.0001916340419422837,
"loss": 1.0262,
"step": 425
},
{
"epoch": 0.22,
"grad_norm": 0.5583086448116769,
"learning_rate": 0.00019127179350623372,
"loss": 0.9653,
"step": 430
},
{
"epoch": 0.22,
"grad_norm": 0.545706410257052,
"learning_rate": 0.0001909022250166365,
"loss": 0.9701,
"step": 435
},
{
"epoch": 0.23,
"grad_norm": 0.5871441440851668,
"learning_rate": 0.00019052536611311046,
"loss": 0.9989,
"step": 440
},
{
"epoch": 0.23,
"grad_norm": 0.6058835348436328,
"learning_rate": 0.00019014124701996973,
"loss": 1.0205,
"step": 445
},
{
"epoch": 0.23,
"grad_norm": 0.5424393589308903,
"learning_rate": 0.00018974989854379996,
"loss": 1.0115,
"step": 450
},
{
"epoch": 0.23,
"grad_norm": 0.5495968011417558,
"learning_rate": 0.00018935135207098785,
"loss": 0.9739,
"step": 455
},
{
"epoch": 0.24,
"grad_norm": 0.6460814347415735,
"learning_rate": 0.00018894563956520374,
"loss": 0.9834,
"step": 460
},
{
"epoch": 0.24,
"grad_norm": 0.6927571456774314,
"learning_rate": 0.00018853279356483826,
"loss": 1.0604,
"step": 465
},
{
"epoch": 0.24,
"grad_norm": 0.5972892728481296,
"learning_rate": 0.00018811284718039256,
"loss": 0.8846,
"step": 470
},
{
"epoch": 0.24,
"grad_norm": 0.6756823948196232,
"learning_rate": 0.00018768583409182305,
"loss": 0.9724,
"step": 475
},
{
"epoch": 0.25,
"grad_norm": 0.6414326267094836,
"learning_rate": 0.00018725178854584007,
"loss": 0.9999,
"step": 480
},
{
"epoch": 0.25,
"grad_norm": 0.4267450177221903,
"learning_rate": 0.00018681074535316125,
"loss": 1.0166,
"step": 485
},
{
"epoch": 0.25,
"grad_norm": 0.674540606199243,
"learning_rate": 0.00018636273988571991,
"loss": 0.9443,
"step": 490
},
{
"epoch": 0.25,
"grad_norm": 0.602538634498083,
"learning_rate": 0.0001859078080738279,
"loss": 0.8926,
"step": 495
},
{
"epoch": 0.26,
"grad_norm": 0.4830980379560266,
"learning_rate": 0.00018544598640329432,
"loss": 0.9935,
"step": 500
},
{
"epoch": 0.26,
"grad_norm": 0.5137026500621041,
"learning_rate": 0.00018497731191249894,
"loss": 1.0321,
"step": 505
},
{
"epoch": 0.26,
"grad_norm": 0.4971143852572462,
"learning_rate": 0.000184501822189422,
"loss": 0.9987,
"step": 510
},
{
"epoch": 0.26,
"grad_norm": 0.5832308344575914,
"learning_rate": 0.00018401955536862948,
"loss": 0.9982,
"step": 515
},
{
"epoch": 0.27,
"grad_norm": 0.5642536664799819,
"learning_rate": 0.0001835305501282148,
"loss": 1.0141,
"step": 520
},
{
"epoch": 0.27,
"grad_norm": 0.5581579974154871,
"learning_rate": 0.00018303484568669667,
"loss": 0.9638,
"step": 525
},
{
"epoch": 0.27,
"grad_norm": 0.5004766080374342,
"learning_rate": 0.00018253248179987388,
"loss": 0.9234,
"step": 530
},
{
"epoch": 0.27,
"grad_norm": 0.6210162393902359,
"learning_rate": 0.0001820234987576368,
"loss": 0.9425,
"step": 535
},
{
"epoch": 0.28,
"grad_norm": 0.6547040579736927,
"learning_rate": 0.00018150793738073602,
"loss": 0.9505,
"step": 540
},
{
"epoch": 0.28,
"grad_norm": 0.5834011700637657,
"learning_rate": 0.00018098583901750867,
"loss": 0.9746,
"step": 545
},
{
"epoch": 0.28,
"grad_norm": 0.541953374443734,
"learning_rate": 0.00018045724554056214,
"loss": 0.9509,
"step": 550
},
{
"epoch": 0.28,
"grad_norm": 0.6444191947988428,
"learning_rate": 0.0001799221993434159,
"loss": 1.0039,
"step": 555
},
{
"epoch": 0.29,
"grad_norm": 0.7964270243486418,
"learning_rate": 0.00017938074333710157,
"loss": 0.9757,
"step": 560
},
{
"epoch": 0.29,
"grad_norm": 0.5226746416776997,
"learning_rate": 0.00017883292094672128,
"loss": 0.9504,
"step": 565
},
{
"epoch": 0.29,
"grad_norm": 0.65124737755757,
"learning_rate": 0.00017827877610796514,
"loss": 0.9544,
"step": 570
},
{
"epoch": 0.3,
"grad_norm": 0.7158221455716812,
"learning_rate": 0.00017771835326358743,
"loss": 0.9516,
"step": 575
},
{
"epoch": 0.3,
"grad_norm": 0.49132330349359915,
"learning_rate": 0.00017715169735984233,
"loss": 0.8842,
"step": 580
},
{
"epoch": 0.3,
"grad_norm": 0.5678013679108999,
"learning_rate": 0.0001765788538428792,
"loss": 1.0361,
"step": 585
},
{
"epoch": 0.3,
"grad_norm": 0.823750077590662,
"learning_rate": 0.00017599986865509767,
"loss": 0.9684,
"step": 590
},
{
"epoch": 0.31,
"grad_norm": 0.5417330334399063,
"learning_rate": 0.00017541478823146327,
"loss": 0.9994,
"step": 595
},
{
"epoch": 0.31,
"grad_norm": 0.5198802461202927,
"learning_rate": 0.00017482365949578302,
"loss": 0.9508,
"step": 600
},
{
"epoch": 0.31,
"grad_norm": 0.6702176321184313,
"learning_rate": 0.00017422652985694237,
"loss": 1.0198,
"step": 605
},
{
"epoch": 0.31,
"grad_norm": 0.560905620678833,
"learning_rate": 0.00017362344720510278,
"loss": 0.938,
"step": 610
},
{
"epoch": 0.32,
"grad_norm": 0.5147053812587649,
"learning_rate": 0.00017301445990786102,
"loss": 1.0218,
"step": 615
},
{
"epoch": 0.32,
"grad_norm": 0.5230042702698835,
"learning_rate": 0.00017239961680637,
"loss": 0.9671,
"step": 620
},
{
"epoch": 0.32,
"grad_norm": 0.5089291221005183,
"learning_rate": 0.0001717789672114218,
"loss": 1.0768,
"step": 625
},
{
"epoch": 0.32,
"grad_norm": 0.5373454888150989,
"learning_rate": 0.0001711525608994927,
"loss": 0.9294,
"step": 630
},
{
"epoch": 0.33,
"grad_norm": 0.6573448357094268,
"learning_rate": 0.00017052044810875126,
"loss": 1.012,
"step": 635
},
{
"epoch": 0.33,
"grad_norm": 0.7255076412183874,
"learning_rate": 0.00016988267953502913,
"loss": 0.9455,
"step": 640
},
{
"epoch": 0.33,
"grad_norm": 0.5668899874656881,
"learning_rate": 0.00016923930632775516,
"loss": 0.9421,
"step": 645
},
{
"epoch": 0.33,
"grad_norm": 0.6473814859784376,
"learning_rate": 0.00016859038008585326,
"loss": 0.9557,
"step": 650
},
{
"epoch": 0.34,
"grad_norm": 0.5357232563827933,
"learning_rate": 0.0001679359528536041,
"loss": 0.9293,
"step": 655
},
{
"epoch": 0.34,
"grad_norm": 0.6962958792203687,
"learning_rate": 0.00016727607711647114,
"loss": 0.9858,
"step": 660
},
{
"epoch": 0.34,
"grad_norm": 0.6684228551139992,
"learning_rate": 0.00016661080579689132,
"loss": 0.9504,
"step": 665
},
{
"epoch": 0.34,
"grad_norm": 0.6904208242216073,
"learning_rate": 0.0001659401922500304,
"loss": 0.9549,
"step": 670
},
{
"epoch": 0.35,
"grad_norm": 0.6135377486595612,
"learning_rate": 0.00016526429025950424,
"loss": 0.9103,
"step": 675
},
{
"epoch": 0.35,
"grad_norm": 0.8048755936295905,
"learning_rate": 0.00016458315403306502,
"loss": 1.0206,
"step": 680
},
{
"epoch": 0.35,
"grad_norm": 0.5694485127709195,
"learning_rate": 0.0001638968381982538,
"loss": 0.9297,
"step": 685
},
{
"epoch": 0.35,
"grad_norm": 0.5009715669440923,
"learning_rate": 0.0001632053977980194,
"loss": 1.0001,
"step": 690
},
{
"epoch": 0.36,
"grad_norm": 0.7328949903159483,
"learning_rate": 0.000162508888286304,
"loss": 0.9723,
"step": 695
},
{
"epoch": 0.36,
"grad_norm": 0.4869756886514924,
"learning_rate": 0.00016180736552359553,
"loss": 0.9857,
"step": 700
},
{
"epoch": 0.36,
"grad_norm": 0.5988266011309781,
"learning_rate": 0.00016110088577244773,
"loss": 0.9974,
"step": 705
},
{
"epoch": 0.36,
"grad_norm": 0.5832139627460268,
"learning_rate": 0.00016038950569296785,
"loss": 0.9521,
"step": 710
},
{
"epoch": 0.37,
"grad_norm": 0.5451767530927838,
"learning_rate": 0.00015967328233827249,
"loss": 1.0182,
"step": 715
},
{
"epoch": 0.37,
"grad_norm": 0.5449980217944476,
"learning_rate": 0.00015895227314991178,
"loss": 0.9015,
"step": 720
},
{
"epoch": 0.37,
"grad_norm": 0.5907360254775007,
"learning_rate": 0.00015822653595326275,
"loss": 0.9417,
"step": 725
},
{
"epoch": 0.37,
"grad_norm": 0.5400537943088273,
"learning_rate": 0.00015749612895289152,
"loss": 0.9422,
"step": 730
},
{
"epoch": 0.38,
"grad_norm": 0.5064825015956783,
"learning_rate": 0.00015676111072788527,
"loss": 0.934,
"step": 735
},
{
"epoch": 0.38,
"grad_norm": 0.6571350146335108,
"learning_rate": 0.00015602154022715435,
"loss": 0.9773,
"step": 740
},
{
"epoch": 0.38,
"grad_norm": 0.5937852975224237,
"learning_rate": 0.0001552774767647043,
"loss": 0.9326,
"step": 745
},
{
"epoch": 0.38,
"grad_norm": 0.641806269703629,
"learning_rate": 0.0001545289800148789,
"loss": 0.9442,
"step": 750
},
{
"epoch": 0.39,
"grad_norm": 0.5528456624588054,
"learning_rate": 0.0001537761100075744,
"loss": 0.9647,
"step": 755
},
{
"epoch": 0.39,
"grad_norm": 0.48434800374011044,
"learning_rate": 0.00015301892712342482,
"loss": 1.0096,
"step": 760
},
{
"epoch": 0.39,
"grad_norm": 0.6244743068900727,
"learning_rate": 0.00015225749208895968,
"loss": 1.0001,
"step": 765
},
{
"epoch": 0.4,
"grad_norm": 0.5249083150123138,
"learning_rate": 0.0001514918659717335,
"loss": 1.0094,
"step": 770
},
{
"epoch": 0.4,
"grad_norm": 0.43566025199134073,
"learning_rate": 0.00015072211017542813,
"loss": 0.9764,
"step": 775
},
{
"epoch": 0.4,
"grad_norm": 0.5711743136651194,
"learning_rate": 0.00014994828643492827,
"loss": 0.9272,
"step": 780
},
{
"epoch": 0.4,
"grad_norm": 0.60505774234033,
"learning_rate": 0.00014917045681137026,
"loss": 0.9517,
"step": 785
},
{
"epoch": 0.41,
"grad_norm": 0.5907336852413628,
"learning_rate": 0.0001483886836871646,
"loss": 0.8963,
"step": 790
},
{
"epoch": 0.41,
"grad_norm": 0.5668039672917672,
"learning_rate": 0.00014760302976099304,
"loss": 0.9351,
"step": 795
},
{
"epoch": 0.41,
"grad_norm": 0.6183265688599311,
"learning_rate": 0.00014681355804278001,
"loss": 0.929,
"step": 800
},
{
"epoch": 0.41,
"grad_norm": 0.5228297188568685,
"learning_rate": 0.00014602033184863913,
"loss": 0.9423,
"step": 805
},
{
"epoch": 0.42,
"grad_norm": 0.5825920103885267,
"learning_rate": 0.00014522341479579533,
"loss": 1.0569,
"step": 810
},
{
"epoch": 0.42,
"grad_norm": 0.4464310996674067,
"learning_rate": 0.00014442287079748263,
"loss": 0.9586,
"step": 815
},
{
"epoch": 0.42,
"grad_norm": 0.6333648770684857,
"learning_rate": 0.00014361876405781832,
"loss": 0.9339,
"step": 820
},
{
"epoch": 0.42,
"grad_norm": 0.6843102633208052,
"learning_rate": 0.00014281115906665374,
"loss": 1.0101,
"step": 825
},
{
"epoch": 0.43,
"grad_norm": 0.682767304475877,
"learning_rate": 0.00014200012059440207,
"loss": 0.9401,
"step": 830
},
{
"epoch": 0.43,
"grad_norm": 0.6346670091662654,
"learning_rate": 0.00014118571368684383,
"loss": 1.0214,
"step": 835
},
{
"epoch": 0.43,
"grad_norm": 0.5285515290136157,
"learning_rate": 0.00014036800365991008,
"loss": 0.8947,
"step": 840
},
{
"epoch": 0.43,
"grad_norm": 0.6934341045263628,
"learning_rate": 0.00013954705609444404,
"loss": 0.9522,
"step": 845
},
{
"epoch": 0.44,
"grad_norm": 0.5590521203537681,
"learning_rate": 0.00013872293683094152,
"loss": 0.9858,
"step": 850
},
{
"epoch": 0.44,
"grad_norm": 0.5394025617625665,
"learning_rate": 0.00013789571196427055,
"loss": 0.9618,
"step": 855
},
{
"epoch": 0.44,
"grad_norm": 0.7093137352486473,
"learning_rate": 0.00013706544783837022,
"loss": 0.9181,
"step": 860
},
{
"epoch": 0.44,
"grad_norm": 0.5843800357504492,
"learning_rate": 0.00013623221104093025,
"loss": 1.0353,
"step": 865
},
{
"epoch": 0.45,
"grad_norm": 0.5282682802123938,
"learning_rate": 0.00013539606839805036,
"loss": 0.9598,
"step": 870
},
{
"epoch": 0.45,
"grad_norm": 0.6182336870717448,
"learning_rate": 0.00013455708696888085,
"loss": 0.9739,
"step": 875
},
{
"epoch": 0.45,
"grad_norm": 0.4944756900400104,
"learning_rate": 0.00013371533404024438,
"loss": 0.9666,
"step": 880
},
{
"epoch": 0.45,
"grad_norm": 0.5561462565668212,
"learning_rate": 0.00013287087712123962,
"loss": 0.993,
"step": 885
},
{
"epoch": 0.46,
"grad_norm": 0.5199945115266794,
"learning_rate": 0.00013202378393782692,
"loss": 0.9531,
"step": 890
},
{
"epoch": 0.46,
"grad_norm": 0.785481290043052,
"learning_rate": 0.00013117412242739655,
"loss": 0.9016,
"step": 895
},
{
"epoch": 0.46,
"grad_norm": 0.5729234206011395,
"learning_rate": 0.00013032196073332027,
"loss": 1.005,
"step": 900
},
{
"epoch": 0.46,
"grad_norm": 0.5585440149214443,
"learning_rate": 0.00012946736719948607,
"loss": 0.9502,
"step": 905
},
{
"epoch": 0.47,
"grad_norm": 0.4565863543737188,
"learning_rate": 0.000128610410364817,
"loss": 0.919,
"step": 910
},
{
"epoch": 0.47,
"grad_norm": 0.5786781967413278,
"learning_rate": 0.00012775115895777417,
"loss": 0.9903,
"step": 915
},
{
"epoch": 0.47,
"grad_norm": 0.6868391625974688,
"learning_rate": 0.00012688968189084493,
"loss": 0.9071,
"step": 920
},
{
"epoch": 0.47,
"grad_norm": 0.5949451575560524,
"learning_rate": 0.00012602604825501587,
"loss": 0.952,
"step": 925
},
{
"epoch": 0.48,
"grad_norm": 0.49941217932653975,
"learning_rate": 0.00012516032731423165,
"loss": 0.9553,
"step": 930
},
{
"epoch": 0.48,
"grad_norm": 0.49875673508692636,
"learning_rate": 0.00012429258849984014,
"loss": 0.9577,
"step": 935
},
{
"epoch": 0.48,
"grad_norm": 0.5758562331929419,
"learning_rate": 0.00012342290140502388,
"loss": 0.9588,
"step": 940
},
{
"epoch": 0.48,
"grad_norm": 0.5716459683552323,
"learning_rate": 0.00012255133577921868,
"loss": 0.9682,
"step": 945
},
{
"epoch": 0.49,
"grad_norm": 0.5751480135349096,
"learning_rate": 0.0001216779615225197,
"loss": 0.9077,
"step": 950
},
{
"epoch": 0.49,
"grad_norm": 0.5536118287704823,
"learning_rate": 0.00012080284868007541,
"loss": 0.9558,
"step": 955
},
{
"epoch": 0.49,
"grad_norm": 0.5588777035679103,
"learning_rate": 0.0001199260674364699,
"loss": 0.9967,
"step": 960
},
{
"epoch": 0.5,
"grad_norm": 0.5276201974330456,
"learning_rate": 0.00011904768811009405,
"loss": 0.9988,
"step": 965
},
{
"epoch": 0.5,
"grad_norm": 0.46984996689813247,
"learning_rate": 0.00011816778114750593,
"loss": 0.9758,
"step": 970
},
{
"epoch": 0.5,
"grad_norm": 0.5611961144848049,
"learning_rate": 0.00011728641711778103,
"loss": 0.985,
"step": 975
},
{
"epoch": 0.5,
"grad_norm": 0.6319408131083156,
"learning_rate": 0.00011640366670685248,
"loss": 0.8907,
"step": 980
},
{
"epoch": 0.51,
"grad_norm": 0.5293146044665091,
"learning_rate": 0.00011551960071184195,
"loss": 1.0671,
"step": 985
},
{
"epoch": 0.51,
"grad_norm": 0.6706471971919105,
"learning_rate": 0.00011463429003538196,
"loss": 0.9676,
"step": 990
},
{
"epoch": 0.51,
"grad_norm": 0.5204112131635378,
"learning_rate": 0.000113747805679929,
"loss": 0.9911,
"step": 995
},
{
"epoch": 0.51,
"grad_norm": 0.6549165813585143,
"learning_rate": 0.00011286021874206952,
"loss": 0.8786,
"step": 1000
},
{
"epoch": 0.52,
"grad_norm": 0.6283426473463526,
"learning_rate": 0.00011197160040681762,
"loss": 1.0463,
"step": 1005
},
{
"epoch": 0.52,
"grad_norm": 0.5247142018840861,
"learning_rate": 0.0001110820219419062,
"loss": 0.9606,
"step": 1010
},
{
"epoch": 0.52,
"grad_norm": 0.5412197805712075,
"learning_rate": 0.0001101915546920711,
"loss": 0.9537,
"step": 1015
},
{
"epoch": 0.52,
"grad_norm": 0.5798251496755219,
"learning_rate": 0.00010930027007332923,
"loss": 0.8932,
"step": 1020
},
{
"epoch": 0.53,
"grad_norm": 0.5193293183359045,
"learning_rate": 0.00010840823956725103,
"loss": 0.9508,
"step": 1025
},
{
"epoch": 0.53,
"grad_norm": 0.6391579358496834,
"learning_rate": 0.00010751553471522757,
"loss": 0.9459,
"step": 1030
},
{
"epoch": 0.53,
"grad_norm": 0.5227690643146805,
"learning_rate": 0.00010662222711273279,
"loss": 0.9126,
"step": 1035
},
{
"epoch": 0.53,
"grad_norm": 0.5308459210039934,
"learning_rate": 0.00010572838840358168,
"loss": 0.8953,
"step": 1040
},
{
"epoch": 0.54,
"grad_norm": 0.5973102406710197,
"learning_rate": 0.00010483409027418425,
"loss": 0.9044,
"step": 1045
},
{
"epoch": 0.54,
"grad_norm": 0.5525502762157085,
"learning_rate": 0.00010393940444779635,
"loss": 0.8888,
"step": 1050
},
{
"epoch": 0.54,
"grad_norm": 0.554344441815975,
"learning_rate": 0.00010304440267876727,
"loss": 0.9933,
"step": 1055
},
{
"epoch": 0.54,
"grad_norm": 0.6685703202794607,
"learning_rate": 0.00010214915674678523,
"loss": 1.0149,
"step": 1060
},
{
"epoch": 0.55,
"grad_norm": 0.6772754515780753,
"learning_rate": 0.00010125373845112034,
"loss": 1.0032,
"step": 1065
},
{
"epoch": 0.55,
"grad_norm": 0.6187824044367312,
"learning_rate": 0.00010035821960486643,
"loss": 0.9702,
"step": 1070
},
{
"epoch": 0.55,
"grad_norm": 0.597714433473837,
"learning_rate": 9.946267202918157e-05,
"loss": 0.9099,
"step": 1075
},
{
"epoch": 0.55,
"grad_norm": 0.7143634751478495,
"learning_rate": 9.856716754752796e-05,
"loss": 0.9121,
"step": 1080
},
{
"epoch": 0.56,
"grad_norm": 0.7288519087084562,
"learning_rate": 9.767177797991155e-05,
"loss": 0.9707,
"step": 1085
},
{
"epoch": 0.56,
"grad_norm": 0.49542828924180315,
"learning_rate": 9.677657513712221e-05,
"loss": 0.9351,
"step": 1090
},
{
"epoch": 0.56,
"grad_norm": 0.5452916350887772,
"learning_rate": 9.588163081497427e-05,
"loss": 0.9974,
"step": 1095
},
{
"epoch": 0.56,
"grad_norm": 0.5715663852183912,
"learning_rate": 9.498701678854865e-05,
"loss": 0.9487,
"step": 1100
},
{
"epoch": 0.57,
"grad_norm": 0.7054168386655822,
"learning_rate": 9.409280480643628e-05,
"loss": 1.0026,
"step": 1105
},
{
"epoch": 0.57,
"grad_norm": 0.6519243831429753,
"learning_rate": 9.319906658498389e-05,
"loss": 0.9671,
"step": 1110
},
{
"epoch": 0.57,
"grad_norm": 0.582887206130515,
"learning_rate": 9.230587380254237e-05,
"loss": 0.9386,
"step": 1115
},
{
"epoch": 0.57,
"grad_norm": 0.4949755086611896,
"learning_rate": 9.141329809371803e-05,
"loss": 0.9591,
"step": 1120
},
{
"epoch": 0.58,
"grad_norm": 0.4943961781824456,
"learning_rate": 9.052141104362748e-05,
"loss": 0.9073,
"step": 1125
},
{
"epoch": 0.58,
"grad_norm": 0.5270998178306187,
"learning_rate": 8.963028418215653e-05,
"loss": 1.0398,
"step": 1130
},
{
"epoch": 0.58,
"grad_norm": 0.5436414584750344,
"learning_rate": 8.873998897822336e-05,
"loss": 1.0081,
"step": 1135
},
{
"epoch": 0.58,
"grad_norm": 0.6006724397110956,
"learning_rate": 8.785059683404672e-05,
"loss": 0.907,
"step": 1140
},
{
"epoch": 0.59,
"grad_norm": 0.5858991337618975,
"learning_rate": 8.696217907941941e-05,
"loss": 0.9704,
"step": 1145
},
{
"epoch": 0.59,
"grad_norm": 0.6336960468249235,
"learning_rate": 8.607480696598762e-05,
"loss": 0.8657,
"step": 1150
},
{
"epoch": 0.59,
"grad_norm": 0.6821228089691559,
"learning_rate": 8.518855166153644e-05,
"loss": 0.9866,
"step": 1155
},
{
"epoch": 0.6,
"grad_norm": 0.6579782787846559,
"learning_rate": 8.43034842442822e-05,
"loss": 0.9269,
"step": 1160
},
{
"epoch": 0.6,
"grad_norm": 0.5982336421144313,
"learning_rate": 8.341967569717202e-05,
"loss": 0.8777,
"step": 1165
},
{
"epoch": 0.6,
"grad_norm": 0.5916145755048947,
"learning_rate": 8.253719690219079e-05,
"loss": 0.8993,
"step": 1170
},
{
"epoch": 0.6,
"grad_norm": 0.6628706839411986,
"learning_rate": 8.165611863467644e-05,
"loss": 0.9237,
"step": 1175
},
{
"epoch": 0.61,
"grad_norm": 0.6186499409261252,
"learning_rate": 8.077651155764387e-05,
"loss": 0.909,
"step": 1180
},
{
"epoch": 0.61,
"grad_norm": 0.6270566611648508,
"learning_rate": 7.98984462161175e-05,
"loss": 0.9872,
"step": 1185
},
{
"epoch": 0.61,
"grad_norm": 0.49188248769091625,
"learning_rate": 7.902199303147363e-05,
"loss": 0.9727,
"step": 1190
},
{
"epoch": 0.61,
"grad_norm": 0.6028335578974087,
"learning_rate": 7.814722229579264e-05,
"loss": 0.9669,
"step": 1195
},
{
"epoch": 0.62,
"grad_norm": 0.6973261056681701,
"learning_rate": 7.727420416622144e-05,
"loss": 0.9766,
"step": 1200
},
{
"epoch": 0.62,
"grad_norm": 0.5143256884961153,
"learning_rate": 7.640300865934687e-05,
"loss": 0.8867,
"step": 1205
},
{
"epoch": 0.62,
"grad_norm": 0.6299144486624495,
"learning_rate": 7.553370564558032e-05,
"loss": 0.9441,
"step": 1210
},
{
"epoch": 0.62,
"grad_norm": 0.5557528022666806,
"learning_rate": 7.46663648435541e-05,
"loss": 0.9413,
"step": 1215
},
{
"epoch": 0.63,
"grad_norm": 0.7226400816486974,
"learning_rate": 7.380105581452987e-05,
"loss": 0.9136,
"step": 1220
},
{
"epoch": 0.63,
"grad_norm": 0.7819213582462599,
"learning_rate": 7.293784795681994e-05,
"loss": 0.9883,
"step": 1225
},
{
"epoch": 0.63,
"grad_norm": 0.6958218647866733,
"learning_rate": 7.207681050022132e-05,
"loss": 0.9167,
"step": 1230
},
{
"epoch": 0.63,
"grad_norm": 0.6991056805756082,
"learning_rate": 7.121801250046363e-05,
"loss": 1.051,
"step": 1235
},
{
"epoch": 0.64,
"grad_norm": 0.6351495151123887,
"learning_rate": 7.036152283367056e-05,
"loss": 0.9071,
"step": 1240
},
{
"epoch": 0.64,
"grad_norm": 0.6164155295421646,
"learning_rate": 6.950741019083617e-05,
"loss": 0.9473,
"step": 1245
},
{
"epoch": 0.64,
"grad_norm": 0.5900471052118252,
"learning_rate": 6.865574307231575e-05,
"loss": 0.9172,
"step": 1250
},
{
"epoch": 0.64,
"grad_norm": 0.6427050961944598,
"learning_rate": 6.780658978233199e-05,
"loss": 0.9233,
"step": 1255
},
{
"epoch": 0.65,
"grad_norm": 0.5914275642400165,
"learning_rate": 6.696001842349702e-05,
"loss": 0.9553,
"step": 1260
},
{
"epoch": 0.65,
"grad_norm": 0.5903220050672707,
"learning_rate": 6.611609689135056e-05,
"loss": 1.0132,
"step": 1265
},
{
"epoch": 0.65,
"grad_norm": 0.6203225118528879,
"learning_rate": 6.527489286891459e-05,
"loss": 0.8127,
"step": 1270
},
{
"epoch": 0.65,
"grad_norm": 0.5856640681538932,
"learning_rate": 6.443647382126509e-05,
"loss": 0.9417,
"step": 1275
},
{
"epoch": 0.66,
"grad_norm": 0.5907416106814382,
"learning_rate": 6.360090699012145e-05,
"loss": 0.9256,
"step": 1280
},
{
"epoch": 0.66,
"grad_norm": 0.694762408414599,
"learning_rate": 6.27682593884535e-05,
"loss": 0.8563,
"step": 1285
},
{
"epoch": 0.66,
"grad_norm": 0.5748383692634573,
"learning_rate": 6.193859779510712e-05,
"loss": 0.9731,
"step": 1290
},
{
"epoch": 0.66,
"grad_norm": 0.5522664873126419,
"learning_rate": 6.111198874944845e-05,
"loss": 0.9628,
"step": 1295
},
{
"epoch": 0.67,
"grad_norm": 0.6244838837652286,
"learning_rate": 6.0288498546027536e-05,
"loss": 0.9977,
"step": 1300
},
{
"epoch": 0.67,
"grad_norm": 0.6593958989520354,
"learning_rate": 5.946819322926127e-05,
"loss": 0.8868,
"step": 1305
},
{
"epoch": 0.67,
"grad_norm": 0.6250444628536131,
"learning_rate": 5.865113858813673e-05,
"loss": 0.9323,
"step": 1310
},
{
"epoch": 0.67,
"grad_norm": 0.5603599185671703,
"learning_rate": 5.783740015093484e-05,
"loss": 0.9485,
"step": 1315
},
{
"epoch": 0.68,
"grad_norm": 0.5416452489859486,
"learning_rate": 5.702704317997492e-05,
"loss": 0.8906,
"step": 1320
},
{
"epoch": 0.68,
"grad_norm": 0.5838747222633762,
"learning_rate": 5.6220132666380635e-05,
"loss": 0.8753,
"step": 1325
},
{
"epoch": 0.68,
"grad_norm": 0.6367658524681706,
"learning_rate": 5.541673332486773e-05,
"loss": 0.9795,
"step": 1330
},
{
"epoch": 0.68,
"grad_norm": 0.5483558623464918,
"learning_rate": 5.4616909588553674e-05,
"loss": 1.0483,
"step": 1335
},
{
"epoch": 0.69,
"grad_norm": 0.5671871464464842,
"learning_rate": 5.3820725603790346e-05,
"loss": 1.0056,
"step": 1340
},
{
"epoch": 0.69,
"grad_norm": 0.5988569806406934,
"learning_rate": 5.30282452250193e-05,
"loss": 1.0,
"step": 1345
},
{
"epoch": 0.69,
"grad_norm": 0.6321287035374099,
"learning_rate": 5.223953200965055e-05,
"loss": 0.9543,
"step": 1350
},
{
"epoch": 0.7,
"grad_norm": 0.6084770034095928,
"learning_rate": 5.145464921296537e-05,
"loss": 1.0273,
"step": 1355
},
{
"epoch": 0.7,
"grad_norm": 0.6428630984078048,
"learning_rate": 5.067365978304315e-05,
"loss": 0.9044,
"step": 1360
},
{
"epoch": 0.7,
"grad_norm": 0.5893194855174623,
"learning_rate": 4.9896626355712805e-05,
"loss": 0.9063,
"step": 1365
},
{
"epoch": 0.7,
"grad_norm": 0.4921659875065239,
"learning_rate": 4.912361124952948e-05,
"loss": 0.9298,
"step": 1370
},
{
"epoch": 0.71,
"grad_norm": 0.5627425640573014,
"learning_rate": 4.835467646077656e-05,
"loss": 0.8949,
"step": 1375
},
{
"epoch": 0.71,
"grad_norm": 0.5451819842328514,
"learning_rate": 4.7589883658493296e-05,
"loss": 0.9023,
"step": 1380
},
{
"epoch": 0.71,
"grad_norm": 0.6670392381651776,
"learning_rate": 4.682929417952939e-05,
"loss": 0.8963,
"step": 1385
},
{
"epoch": 0.71,
"grad_norm": 0.5404964530016563,
"learning_rate": 4.6072969023625165e-05,
"loss": 0.9294,
"step": 1390
},
{
"epoch": 0.72,
"grad_norm": 0.5885948513715824,
"learning_rate": 4.532096884851978e-05,
"loss": 0.9305,
"step": 1395
},
{
"epoch": 0.72,
"grad_norm": 0.677308652772576,
"learning_rate": 4.457335396508631e-05,
"loss": 0.941,
"step": 1400
},
{
"epoch": 0.72,
"grad_norm": 0.5923886672950903,
"learning_rate": 4.383018433249464e-05,
"loss": 0.9247,
"step": 1405
},
{
"epoch": 0.72,
"grad_norm": 0.6068470747970809,
"learning_rate": 4.309151955340297e-05,
"loss": 0.9073,
"step": 1410
},
{
"epoch": 0.73,
"grad_norm": 0.6148911890194271,
"learning_rate": 4.2357418869177354e-05,
"loss": 0.9802,
"step": 1415
},
{
"epoch": 0.73,
"grad_norm": 0.588117904888433,
"learning_rate": 4.162794115514078e-05,
"loss": 0.9891,
"step": 1420
},
{
"epoch": 0.73,
"grad_norm": 0.6114108759667229,
"learning_rate": 4.0903144915851174e-05,
"loss": 0.8589,
"step": 1425
},
{
"epoch": 0.73,
"grad_norm": 0.6185666138350563,
"learning_rate": 4.018308828040924e-05,
"loss": 0.8638,
"step": 1430
},
{
"epoch": 0.74,
"grad_norm": 0.5480613590223707,
"learning_rate": 3.946782899779667e-05,
"loss": 0.9168,
"step": 1435
},
{
"epoch": 0.74,
"grad_norm": 0.6489537352043999,
"learning_rate": 3.875742443224451e-05,
"loss": 0.9301,
"step": 1440
},
{
"epoch": 0.74,
"grad_norm": 0.5367367394357284,
"learning_rate": 3.805193155863247e-05,
"loss": 0.8793,
"step": 1445
},
{
"epoch": 0.74,
"grad_norm": 0.6012102396037162,
"learning_rate": 3.7351406957919636e-05,
"loss": 0.9563,
"step": 1450
},
{
"epoch": 0.75,
"grad_norm": 0.590005525454247,
"learning_rate": 3.665590681260658e-05,
"loss": 0.9294,
"step": 1455
},
{
"epoch": 0.75,
"grad_norm": 0.6511563172493143,
"learning_rate": 3.59654869022294e-05,
"loss": 0.9818,
"step": 1460
},
{
"epoch": 0.75,
"grad_norm": 0.5452617923742074,
"learning_rate": 3.5280202598886324e-05,
"loss": 0.9657,
"step": 1465
},
{
"epoch": 0.75,
"grad_norm": 0.5760478140556998,
"learning_rate": 3.4600108862796796e-05,
"loss": 0.9278,
"step": 1470
},
{
"epoch": 0.76,
"grad_norm": 0.5911423702374802,
"learning_rate": 3.392526023789349e-05,
"loss": 0.9636,
"step": 1475
},
{
"epoch": 0.76,
"grad_norm": 0.5953216616642104,
"learning_rate": 3.325571084744803e-05,
"loss": 0.8844,
"step": 1480
},
{
"epoch": 0.76,
"grad_norm": 0.5981414617910379,
"learning_rate": 3.259151438973024e-05,
"loss": 1.0117,
"step": 1485
},
{
"epoch": 0.76,
"grad_norm": 0.5068066924211629,
"learning_rate": 3.1932724133701344e-05,
"loss": 0.8872,
"step": 1490
},
{
"epoch": 0.77,
"grad_norm": 0.6570028376509454,
"learning_rate": 3.1279392914742046e-05,
"loss": 0.9528,
"step": 1495
},
{
"epoch": 0.77,
"grad_norm": 0.5819613958583445,
"learning_rate": 3.06315731304148e-05,
"loss": 0.9808,
"step": 1500
},
{
"epoch": 0.77,
"grad_norm": 0.589225937599524,
"learning_rate": 2.998931673626175e-05,
"loss": 0.878,
"step": 1505
},
{
"epoch": 0.77,
"grad_norm": 0.6728179853090486,
"learning_rate": 2.935267524163774e-05,
"loss": 0.9823,
"step": 1510
},
{
"epoch": 0.78,
"grad_norm": 0.607677834187811,
"learning_rate": 2.872169970557913e-05,
"loss": 0.9809,
"step": 1515
},
{
"epoch": 0.78,
"grad_norm": 0.5993094591120297,
"learning_rate": 2.8096440732709083e-05,
"loss": 1.0067,
"step": 1520
},
{
"epoch": 0.78,
"grad_norm": 0.7163645293312754,
"learning_rate": 2.7476948469178887e-05,
"loss": 0.945,
"step": 1525
},
{
"epoch": 0.79,
"grad_norm": 0.8906369625548493,
"learning_rate": 2.6863272598646106e-05,
"loss": 0.9236,
"step": 1530
},
{
"epoch": 0.79,
"grad_norm": 0.6089231774752915,
"learning_rate": 2.625546233829016e-05,
"loss": 0.9728,
"step": 1535
},
{
"epoch": 0.79,
"grad_norm": 0.6104709388797124,
"learning_rate": 2.5653566434864928e-05,
"loss": 0.9479,
"step": 1540
},
{
"epoch": 0.79,
"grad_norm": 0.7791530184080401,
"learning_rate": 2.5057633160789184e-05,
"loss": 0.948,
"step": 1545
},
{
"epoch": 0.8,
"grad_norm": 0.709845131462345,
"learning_rate": 2.446771031027527e-05,
"loss": 1.0,
"step": 1550
},
{
"epoch": 0.8,
"grad_norm": 0.6080152903490742,
"learning_rate": 2.3883845195495878e-05,
"loss": 1.0058,
"step": 1555
},
{
"epoch": 0.8,
"grad_norm": 0.5438944886854205,
"learning_rate": 2.330608464278953e-05,
"loss": 0.953,
"step": 1560
},
{
"epoch": 0.8,
"grad_norm": 0.5930998056874279,
"learning_rate": 2.273447498890521e-05,
"loss": 1.0004,
"step": 1565
},
{
"epoch": 0.81,
"grad_norm": 0.5810558184625885,
"learning_rate": 2.2169062077286075e-05,
"loss": 0.8924,
"step": 1570
},
{
"epoch": 0.81,
"grad_norm": 0.5481485799413437,
"learning_rate": 2.1609891254392678e-05,
"loss": 0.8746,
"step": 1575
},
{
"epoch": 0.81,
"grad_norm": 0.6720183177684913,
"learning_rate": 2.1057007366066373e-05,
"loss": 0.9695,
"step": 1580
},
{
"epoch": 0.81,
"grad_norm": 0.6797442462916037,
"learning_rate": 2.0510454753932395e-05,
"loss": 0.8939,
"step": 1585
},
{
"epoch": 0.82,
"grad_norm": 0.5596464643777617,
"learning_rate": 1.9970277251843862e-05,
"loss": 0.9292,
"step": 1590
},
{
"epoch": 0.82,
"grad_norm": 0.6488035615139027,
"learning_rate": 1.9436518182366158e-05,
"loss": 0.9621,
"step": 1595
},
{
"epoch": 0.82,
"grad_norm": 0.4935882125762104,
"learning_rate": 1.8909220353302392e-05,
"loss": 0.9643,
"step": 1600
},
{
"epoch": 0.82,
"grad_norm": 0.6047717852333419,
"learning_rate": 1.838842605426031e-05,
"loss": 0.867,
"step": 1605
},
{
"epoch": 0.83,
"grad_norm": 0.5014441507225457,
"learning_rate": 1.7874177053260598e-05,
"loss": 0.8701,
"step": 1610
},
{
"epoch": 0.83,
"grad_norm": 0.7302416854127579,
"learning_rate": 1.736651459338695e-05,
"loss": 0.8848,
"step": 1615
},
{
"epoch": 0.83,
"grad_norm": 0.5429237046617295,
"learning_rate": 1.6865479389478545e-05,
"loss": 0.9346,
"step": 1620
},
{
"epoch": 0.83,
"grad_norm": 0.46994493569441587,
"learning_rate": 1.6371111624864543e-05,
"loss": 0.9287,
"step": 1625
},
{
"epoch": 0.84,
"grad_norm": 0.6773296484834816,
"learning_rate": 1.5883450948141377e-05,
"loss": 0.917,
"step": 1630
},
{
"epoch": 0.84,
"grad_norm": 0.5688638275907638,
"learning_rate": 1.540253646999299e-05,
"loss": 0.9712,
"step": 1635
},
{
"epoch": 0.84,
"grad_norm": 0.578080268472627,
"learning_rate": 1.4928406760054059e-05,
"loss": 0.9306,
"step": 1640
},
{
"epoch": 0.84,
"grad_norm": 0.6302794537726676,
"learning_rate": 1.4461099843816684e-05,
"loss": 0.9232,
"step": 1645
},
{
"epoch": 0.85,
"grad_norm": 0.49280186551718264,
"learning_rate": 1.4000653199580782e-05,
"loss": 0.9611,
"step": 1650
},
{
"epoch": 0.85,
"grad_norm": 0.6115403656603358,
"learning_rate": 1.3547103755448287e-05,
"loss": 0.9517,
"step": 1655
},
{
"epoch": 0.85,
"grad_norm": 0.51925307604453,
"learning_rate": 1.3100487886361379e-05,
"loss": 0.9937,
"step": 1660
},
{
"epoch": 0.85,
"grad_norm": 0.5064214043364438,
"learning_rate": 1.266084141118542e-05,
"loss": 0.9242,
"step": 1665
},
{
"epoch": 0.86,
"grad_norm": 0.5561993252855665,
"learning_rate": 1.2228199589835999e-05,
"loss": 0.9522,
"step": 1670
},
{
"epoch": 0.86,
"grad_norm": 0.71115653041109,
"learning_rate": 1.1802597120451286e-05,
"loss": 0.9338,
"step": 1675
},
{
"epoch": 0.86,
"grad_norm": 0.7080136085925859,
"learning_rate": 1.1384068136609105e-05,
"loss": 0.9668,
"step": 1680
},
{
"epoch": 0.86,
"grad_norm": 0.5456074918264524,
"learning_rate": 1.0972646204589377e-05,
"loss": 0.9033,
"step": 1685
},
{
"epoch": 0.87,
"grad_norm": 0.5871555079803276,
"learning_rate": 1.0568364320682178e-05,
"loss": 0.965,
"step": 1690
},
{
"epoch": 0.87,
"grad_norm": 0.7587850959355414,
"learning_rate": 1.0171254908541372e-05,
"loss": 0.8784,
"step": 1695
},
{
"epoch": 0.87,
"grad_norm": 0.5931254843624938,
"learning_rate": 9.781349816584162e-06,
"loss": 0.9237,
"step": 1700
},
{
"epoch": 0.87,
"grad_norm": 0.5522279462287333,
"learning_rate": 9.398680315436903e-06,
"loss": 0.9318,
"step": 1705
},
{
"epoch": 0.88,
"grad_norm": 0.6606437965491642,
"learning_rate": 9.023277095427173e-06,
"loss": 0.8832,
"step": 1710
},
{
"epoch": 0.88,
"grad_norm": 0.5347581974370543,
"learning_rate": 8.655170264122303e-06,
"loss": 0.9649,
"step": 1715
},
{
"epoch": 0.88,
"grad_norm": 0.569971554201631,
"learning_rate": 8.294389343914899e-06,
"loss": 0.9481,
"step": 1720
},
{
"epoch": 0.89,
"grad_norm": 0.5799598578609663,
"learning_rate": 7.940963269654922e-06,
"loss": 0.984,
"step": 1725
},
{
"epoch": 0.89,
"grad_norm": 0.508439501372817,
"learning_rate": 7.594920386329252e-06,
"loss": 0.9323,
"step": 1730
},
{
"epoch": 0.89,
"grad_norm": 0.5392312394716691,
"learning_rate": 7.256288446788362e-06,
"loss": 0.8642,
"step": 1735
},
{
"epoch": 0.89,
"grad_norm": 0.6372004622147013,
"learning_rate": 6.925094609520455e-06,
"loss": 0.9459,
"step": 1740
},
{
"epoch": 0.9,
"grad_norm": 0.5458674801987001,
"learning_rate": 6.601365436473439e-06,
"loss": 0.9105,
"step": 1745
},
{
"epoch": 0.9,
"grad_norm": 0.6319326369907332,
"learning_rate": 6.2851268909245865e-06,
"loss": 0.8993,
"step": 1750
},
{
"epoch": 0.9,
"grad_norm": 0.5794934716448419,
"learning_rate": 5.976404335398256e-06,
"loss": 0.9603,
"step": 1755
},
{
"epoch": 0.9,
"grad_norm": 0.4699717958115534,
"learning_rate": 5.675222529631841e-06,
"loss": 0.9261,
"step": 1760
},
{
"epoch": 0.91,
"grad_norm": 0.57505099385829,
"learning_rate": 5.381605628590003e-06,
"loss": 0.9259,
"step": 1765
},
{
"epoch": 0.91,
"grad_norm": 0.5384181396504312,
"learning_rate": 5.095577180527378e-06,
"loss": 0.945,
"step": 1770
},
{
"epoch": 0.91,
"grad_norm": 0.5763786879303221,
"learning_rate": 4.817160125100106e-06,
"loss": 0.9952,
"step": 1775
},
{
"epoch": 0.91,
"grad_norm": 0.6144158360042297,
"learning_rate": 4.546376791525975e-06,
"loss": 0.939,
"step": 1780
},
{
"epoch": 0.92,
"grad_norm": 0.5294965542763461,
"learning_rate": 4.2832488967935795e-06,
"loss": 0.9409,
"step": 1785
},
{
"epoch": 0.92,
"grad_norm": 0.5465968329315932,
"learning_rate": 4.02779754392072e-06,
"loss": 0.9537,
"step": 1790
},
{
"epoch": 0.92,
"grad_norm": 0.6825003581486967,
"learning_rate": 3.780043220261764e-06,
"loss": 0.962,
"step": 1795
},
{
"epoch": 0.92,
"grad_norm": 0.7740508723901224,
"learning_rate": 3.540005795864709e-06,
"loss": 0.8662,
"step": 1800
},
{
"epoch": 0.93,
"grad_norm": 0.6117641287526261,
"learning_rate": 3.3077045218775192e-06,
"loss": 0.9738,
"step": 1805
},
{
"epoch": 0.93,
"grad_norm": 0.6144596680128399,
"learning_rate": 3.0831580290041184e-06,
"loss": 0.9681,
"step": 1810
},
{
"epoch": 0.93,
"grad_norm": 0.4863368300808097,
"learning_rate": 2.8663843260103074e-06,
"loss": 0.9441,
"step": 1815
},
{
"epoch": 0.93,
"grad_norm": 0.6705366204063254,
"learning_rate": 2.6574007982793857e-06,
"loss": 0.9245,
"step": 1820
},
{
"epoch": 0.94,
"grad_norm": 0.5981824483720629,
"learning_rate": 2.456224206417812e-06,
"loss": 0.8876,
"step": 1825
},
{
"epoch": 0.94,
"grad_norm": 0.5935237688273688,
"learning_rate": 2.262870684911045e-06,
"loss": 0.9668,
"step": 1830
},
{
"epoch": 0.94,
"grad_norm": 0.5490125452588106,
"learning_rate": 2.0773557408295343e-06,
"loss": 0.9267,
"step": 1835
},
{
"epoch": 0.94,
"grad_norm": 0.5496625170920385,
"learning_rate": 1.8996942525850047e-06,
"loss": 0.9234,
"step": 1840
},
{
"epoch": 0.95,
"grad_norm": 0.5898640164879884,
"learning_rate": 1.7299004687372665e-06,
"loss": 0.9056,
"step": 1845
},
{
"epoch": 0.95,
"grad_norm": 0.5865173851650085,
"learning_rate": 1.5679880068514174e-06,
"loss": 0.8887,
"step": 1850
},
{
"epoch": 0.95,
"grad_norm": 0.6479065299224441,
"learning_rate": 1.4139698524057165e-06,
"loss": 0.9735,
"step": 1855
},
{
"epoch": 0.95,
"grad_norm": 0.605031411419469,
"learning_rate": 1.2678583577501624e-06,
"loss": 0.9502,
"step": 1860
},
{
"epoch": 0.96,
"grad_norm": 0.6422022183689675,
"learning_rate": 1.1296652411158182e-06,
"loss": 0.9532,
"step": 1865
},
{
"epoch": 0.96,
"grad_norm": 0.5683134883261641,
"learning_rate": 9.994015856749527e-07,
"loss": 0.9018,
"step": 1870
},
{
"epoch": 0.96,
"grad_norm": 0.5670406276249282,
"learning_rate": 8.770778386522627e-07,
"loss": 0.8641,
"step": 1875
},
{
"epoch": 0.96,
"grad_norm": 0.6402924492380171,
"learning_rate": 7.627038104869199e-07,
"loss": 0.9406,
"step": 1880
},
{
"epoch": 0.97,
"grad_norm": 0.5661451995345069,
"learning_rate": 6.562886740457797e-07,
"loss": 0.9253,
"step": 1885
},
{
"epoch": 0.97,
"grad_norm": 0.543051786191807,
"learning_rate": 5.578409638877457e-07,
"loss": 0.8438,
"step": 1890
},
{
"epoch": 0.97,
"grad_norm": 0.6344918861064833,
"learning_rate": 4.6736857557925227e-07,
"loss": 0.9096,
"step": 1895
},
{
"epoch": 0.97,
"grad_norm": 0.6321985312404009,
"learning_rate": 3.8487876506106966e-07,
"loss": 0.999,
"step": 1900
},
{
"epoch": 0.98,
"grad_norm": 0.5761138814263015,
"learning_rate": 3.1037814806634815e-07,
"loss": 0.8999,
"step": 1905
},
{
"epoch": 0.98,
"grad_norm": 0.563402854869171,
"learning_rate": 2.43872699590042e-07,
"loss": 0.8701,
"step": 1910
},
{
"epoch": 0.98,
"grad_norm": 0.5851940875823938,
"learning_rate": 1.8536775340970425e-07,
"loss": 0.9819,
"step": 1915
},
{
"epoch": 0.99,
"grad_norm": 0.680412012321503,
"learning_rate": 1.348680016577397e-07,
"loss": 0.9709,
"step": 1920
},
{
"epoch": 0.99,
"grad_norm": 0.6111077894411105,
"learning_rate": 9.237749444505062e-08,
"loss": 0.9239,
"step": 1925
},
{
"epoch": 0.99,
"grad_norm": 0.5854544638099273,
"learning_rate": 5.7899639536251883e-08,
"loss": 0.9427,
"step": 1930
},
{
"epoch": 0.99,
"grad_norm": 0.5999468918700691,
"learning_rate": 3.143720207635648e-08,
"loss": 0.9426,
"step": 1935
},
{
"epoch": 1.0,
"grad_norm": 0.690930860173269,
"learning_rate": 1.299230436898613e-08,
"loss": 0.9732,
"step": 1940
},
{
"epoch": 1.0,
"grad_norm": 0.6048544401601688,
"learning_rate": 2.566425706218567e-09,
"loss": 0.8806,
"step": 1945
},
{
"epoch": 1.0,
"eval_loss": NaN,
"eval_runtime": 1978.203,
"eval_samples_per_second": 3.504,
"eval_steps_per_second": 0.876,
"step": 1949
},
{
"epoch": 1.0,
"step": 1949,
"total_flos": 1.2135540828143616e+16,
"train_loss": 1.0064294319877019,
"train_runtime": 18083.1645,
"train_samples_per_second": 3.448,
"train_steps_per_second": 0.108
}
],
"logging_steps": 5,
"max_steps": 1949,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 1.2135540828143616e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}