math-llama-3-LORA-Arithmetic-4k / trainer_state.json
satvik-dixit's picture
Uploaded checkpoint-4000
91291d4 verified
raw
history blame contribute delete
No virus
64.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2,
"eval_steps": 1000,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 31.81644058227539,
"learning_rate": 8e-08,
"loss": 1.5083,
"step": 10
},
{
"epoch": 0.0,
"grad_norm": 16.249370574951172,
"learning_rate": 1.7000000000000001e-07,
"loss": 1.2724,
"step": 20
},
{
"epoch": 0.0,
"grad_norm": 16.85301971435547,
"learning_rate": 2.7e-07,
"loss": 1.2571,
"step": 30
},
{
"epoch": 0.0,
"grad_norm": 12.382726669311523,
"learning_rate": 3.7e-07,
"loss": 1.101,
"step": 40
},
{
"epoch": 0.0,
"grad_norm": 43.00498962402344,
"learning_rate": 4.6999999999999995e-07,
"loss": 1.2847,
"step": 50
},
{
"epoch": 0.0,
"grad_norm": 16.686534881591797,
"learning_rate": 5.6e-07,
"loss": 0.8594,
"step": 60
},
{
"epoch": 0.0,
"grad_norm": 23.844993591308594,
"learning_rate": 6.6e-07,
"loss": 0.7645,
"step": 70
},
{
"epoch": 0.0,
"grad_norm": 15.943971633911133,
"learning_rate": 7.599999999999999e-07,
"loss": 0.7681,
"step": 80
},
{
"epoch": 0.0,
"grad_norm": 12.707146644592285,
"learning_rate": 8.599999999999999e-07,
"loss": 0.8728,
"step": 90
},
{
"epoch": 0.01,
"grad_norm": 13.066338539123535,
"learning_rate": 9.6e-07,
"loss": 0.7508,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 21.36861801147461,
"learning_rate": 9.993939393939394e-07,
"loss": 0.999,
"step": 110
},
{
"epoch": 0.01,
"grad_norm": 23.451967239379883,
"learning_rate": 9.983838383838383e-07,
"loss": 0.8396,
"step": 120
},
{
"epoch": 0.01,
"grad_norm": 14.662145614624023,
"learning_rate": 9.973737373737373e-07,
"loss": 1.0884,
"step": 130
},
{
"epoch": 0.01,
"grad_norm": 9.530464172363281,
"learning_rate": 9.963636363636362e-07,
"loss": 0.8505,
"step": 140
},
{
"epoch": 0.01,
"grad_norm": 30.15620231628418,
"learning_rate": 9.953535353535352e-07,
"loss": 0.971,
"step": 150
},
{
"epoch": 0.01,
"grad_norm": 43.50494384765625,
"learning_rate": 9.943434343434343e-07,
"loss": 0.9758,
"step": 160
},
{
"epoch": 0.01,
"grad_norm": 18.704565048217773,
"learning_rate": 9.933333333333333e-07,
"loss": 1.1095,
"step": 170
},
{
"epoch": 0.01,
"grad_norm": 51.077552795410156,
"learning_rate": 9.923232323232322e-07,
"loss": 1.0251,
"step": 180
},
{
"epoch": 0.01,
"grad_norm": 32.405906677246094,
"learning_rate": 9.913131313131314e-07,
"loss": 0.8793,
"step": 190
},
{
"epoch": 0.01,
"grad_norm": 35.2808952331543,
"learning_rate": 9.903030303030303e-07,
"loss": 0.6785,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 17.738399505615234,
"learning_rate": 9.892929292929293e-07,
"loss": 0.8258,
"step": 210
},
{
"epoch": 0.01,
"grad_norm": 8.202994346618652,
"learning_rate": 9.882828282828282e-07,
"loss": 0.8756,
"step": 220
},
{
"epoch": 0.01,
"grad_norm": 16.26487159729004,
"learning_rate": 9.872727272727272e-07,
"loss": 1.0467,
"step": 230
},
{
"epoch": 0.01,
"grad_norm": 32.37431335449219,
"learning_rate": 9.862626262626263e-07,
"loss": 0.9119,
"step": 240
},
{
"epoch": 0.01,
"grad_norm": 5.586151599884033,
"learning_rate": 9.852525252525253e-07,
"loss": 0.8556,
"step": 250
},
{
"epoch": 0.01,
"grad_norm": 16.249475479125977,
"learning_rate": 9.842424242424242e-07,
"loss": 0.7945,
"step": 260
},
{
"epoch": 0.01,
"grad_norm": 60.965965270996094,
"learning_rate": 9.832323232323231e-07,
"loss": 1.0668,
"step": 270
},
{
"epoch": 0.01,
"grad_norm": 19.940616607666016,
"learning_rate": 9.82222222222222e-07,
"loss": 0.6501,
"step": 280
},
{
"epoch": 0.01,
"grad_norm": 26.526460647583008,
"learning_rate": 9.812121212121212e-07,
"loss": 1.0423,
"step": 290
},
{
"epoch": 0.01,
"grad_norm": 27.455795288085938,
"learning_rate": 9.802020202020202e-07,
"loss": 0.6693,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 18.113489151000977,
"learning_rate": 9.791919191919191e-07,
"loss": 0.4784,
"step": 310
},
{
"epoch": 0.02,
"grad_norm": 29.75452423095703,
"learning_rate": 9.78181818181818e-07,
"loss": 0.5004,
"step": 320
},
{
"epoch": 0.02,
"grad_norm": 24.82709503173828,
"learning_rate": 9.77171717171717e-07,
"loss": 0.8543,
"step": 330
},
{
"epoch": 0.02,
"grad_norm": 22.23027801513672,
"learning_rate": 9.761616161616162e-07,
"loss": 0.796,
"step": 340
},
{
"epoch": 0.02,
"grad_norm": 9.593890190124512,
"learning_rate": 9.751515151515151e-07,
"loss": 0.672,
"step": 350
},
{
"epoch": 0.02,
"grad_norm": 19.550216674804688,
"learning_rate": 9.74141414141414e-07,
"loss": 0.5915,
"step": 360
},
{
"epoch": 0.02,
"grad_norm": 18.101367950439453,
"learning_rate": 9.731313131313132e-07,
"loss": 0.8967,
"step": 370
},
{
"epoch": 0.02,
"grad_norm": 6.973883152008057,
"learning_rate": 9.721212121212122e-07,
"loss": 0.7841,
"step": 380
},
{
"epoch": 0.02,
"grad_norm": 18.076982498168945,
"learning_rate": 9.711111111111111e-07,
"loss": 0.7783,
"step": 390
},
{
"epoch": 0.02,
"grad_norm": 1.5223013162612915,
"learning_rate": 9.7010101010101e-07,
"loss": 0.8647,
"step": 400
},
{
"epoch": 0.02,
"grad_norm": 14.444634437561035,
"learning_rate": 9.69090909090909e-07,
"loss": 0.6745,
"step": 410
},
{
"epoch": 0.02,
"grad_norm": 41.90908432006836,
"learning_rate": 9.680808080808082e-07,
"loss": 1.0749,
"step": 420
},
{
"epoch": 0.02,
"grad_norm": 27.912317276000977,
"learning_rate": 9.67070707070707e-07,
"loss": 0.5588,
"step": 430
},
{
"epoch": 0.02,
"grad_norm": 22.31749725341797,
"learning_rate": 9.66060606060606e-07,
"loss": 0.7683,
"step": 440
},
{
"epoch": 0.02,
"grad_norm": 57.88829040527344,
"learning_rate": 9.65050505050505e-07,
"loss": 0.5442,
"step": 450
},
{
"epoch": 0.02,
"grad_norm": 43.25718688964844,
"learning_rate": 9.64040404040404e-07,
"loss": 0.8028,
"step": 460
},
{
"epoch": 0.02,
"grad_norm": 21.001134872436523,
"learning_rate": 9.630303030303029e-07,
"loss": 0.7472,
"step": 470
},
{
"epoch": 0.02,
"grad_norm": 13.919650077819824,
"learning_rate": 9.62020202020202e-07,
"loss": 0.5318,
"step": 480
},
{
"epoch": 0.02,
"grad_norm": 26.845335006713867,
"learning_rate": 9.61010101010101e-07,
"loss": 1.012,
"step": 490
},
{
"epoch": 0.03,
"grad_norm": 20.0955867767334,
"learning_rate": 9.6e-07,
"loss": 0.9174,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 9.6511812210083,
"learning_rate": 9.589898989898989e-07,
"loss": 0.691,
"step": 510
},
{
"epoch": 0.03,
"grad_norm": 12.875068664550781,
"learning_rate": 9.579797979797978e-07,
"loss": 0.7781,
"step": 520
},
{
"epoch": 0.03,
"grad_norm": 28.99612045288086,
"learning_rate": 9.56969696969697e-07,
"loss": 0.7791,
"step": 530
},
{
"epoch": 0.03,
"grad_norm": 47.6656379699707,
"learning_rate": 9.55959595959596e-07,
"loss": 0.6166,
"step": 540
},
{
"epoch": 0.03,
"grad_norm": 59.9251708984375,
"learning_rate": 9.549494949494948e-07,
"loss": 0.6836,
"step": 550
},
{
"epoch": 0.03,
"grad_norm": 34.26139831542969,
"learning_rate": 9.53939393939394e-07,
"loss": 0.4317,
"step": 560
},
{
"epoch": 0.03,
"grad_norm": 47.75581359863281,
"learning_rate": 9.529292929292929e-07,
"loss": 0.5685,
"step": 570
},
{
"epoch": 0.03,
"grad_norm": 63.76834487915039,
"learning_rate": 9.519191919191919e-07,
"loss": 0.8805,
"step": 580
},
{
"epoch": 0.03,
"grad_norm": 22.971323013305664,
"learning_rate": 9.509090909090908e-07,
"loss": 0.8103,
"step": 590
},
{
"epoch": 0.03,
"grad_norm": 42.151161193847656,
"learning_rate": 9.498989898989899e-07,
"loss": 0.7563,
"step": 600
},
{
"epoch": 0.03,
"grad_norm": 13.12915325164795,
"learning_rate": 9.488888888888888e-07,
"loss": 0.6844,
"step": 610
},
{
"epoch": 0.03,
"grad_norm": 37.901973724365234,
"learning_rate": 9.478787878787879e-07,
"loss": 0.7467,
"step": 620
},
{
"epoch": 0.03,
"grad_norm": 29.114883422851562,
"learning_rate": 9.468686868686868e-07,
"loss": 0.7759,
"step": 630
},
{
"epoch": 0.03,
"grad_norm": 37.42153549194336,
"learning_rate": 9.458585858585858e-07,
"loss": 0.7221,
"step": 640
},
{
"epoch": 0.03,
"grad_norm": 46.287559509277344,
"learning_rate": 9.448484848484848e-07,
"loss": 0.7219,
"step": 650
},
{
"epoch": 0.03,
"grad_norm": 76.34626770019531,
"learning_rate": 9.438383838383838e-07,
"loss": 0.6938,
"step": 660
},
{
"epoch": 0.03,
"grad_norm": 20.480300903320312,
"learning_rate": 9.428282828282827e-07,
"loss": 0.8135,
"step": 670
},
{
"epoch": 0.03,
"grad_norm": 9.508841514587402,
"learning_rate": 9.418181818181818e-07,
"loss": 0.6024,
"step": 680
},
{
"epoch": 0.03,
"grad_norm": 29.385066986083984,
"learning_rate": 9.408080808080807e-07,
"loss": 0.8891,
"step": 690
},
{
"epoch": 0.04,
"grad_norm": 13.06400203704834,
"learning_rate": 9.397979797979797e-07,
"loss": 0.6723,
"step": 700
},
{
"epoch": 0.04,
"grad_norm": 36.94926071166992,
"learning_rate": 9.387878787878788e-07,
"loss": 0.5682,
"step": 710
},
{
"epoch": 0.04,
"grad_norm": 30.849035263061523,
"learning_rate": 9.377777777777777e-07,
"loss": 0.6693,
"step": 720
},
{
"epoch": 0.04,
"grad_norm": 27.781005859375,
"learning_rate": 9.367676767676768e-07,
"loss": 0.7705,
"step": 730
},
{
"epoch": 0.04,
"grad_norm": 5.268486976623535,
"learning_rate": 9.357575757575757e-07,
"loss": 0.7672,
"step": 740
},
{
"epoch": 0.04,
"grad_norm": 70.79887390136719,
"learning_rate": 9.347474747474747e-07,
"loss": 0.7617,
"step": 750
},
{
"epoch": 0.04,
"grad_norm": 71.6796875,
"learning_rate": 9.337373737373737e-07,
"loss": 0.5425,
"step": 760
},
{
"epoch": 0.04,
"grad_norm": 21.205890655517578,
"learning_rate": 9.327272727272727e-07,
"loss": 0.6768,
"step": 770
},
{
"epoch": 0.04,
"grad_norm": 38.276634216308594,
"learning_rate": 9.317171717171717e-07,
"loss": 0.8681,
"step": 780
},
{
"epoch": 0.04,
"grad_norm": 17.904993057250977,
"learning_rate": 9.307070707070707e-07,
"loss": 0.7128,
"step": 790
},
{
"epoch": 0.04,
"grad_norm": 32.66667938232422,
"learning_rate": 9.296969696969696e-07,
"loss": 0.8662,
"step": 800
},
{
"epoch": 0.04,
"grad_norm": 7.020589828491211,
"learning_rate": 9.286868686868687e-07,
"loss": 0.7344,
"step": 810
},
{
"epoch": 0.04,
"grad_norm": 33.822608947753906,
"learning_rate": 9.276767676767676e-07,
"loss": 0.7825,
"step": 820
},
{
"epoch": 0.04,
"grad_norm": 21.147552490234375,
"learning_rate": 9.266666666666665e-07,
"loss": 0.7716,
"step": 830
},
{
"epoch": 0.04,
"grad_norm": 22.41349983215332,
"learning_rate": 9.256565656565656e-07,
"loss": 0.4673,
"step": 840
},
{
"epoch": 0.04,
"grad_norm": 24.813737869262695,
"learning_rate": 9.246464646464645e-07,
"loss": 0.7137,
"step": 850
},
{
"epoch": 0.04,
"grad_norm": 31.02164649963379,
"learning_rate": 9.236363636363636e-07,
"loss": 1.0195,
"step": 860
},
{
"epoch": 0.04,
"grad_norm": 19.22023582458496,
"learning_rate": 9.226262626262625e-07,
"loss": 0.8201,
"step": 870
},
{
"epoch": 0.04,
"grad_norm": 21.087970733642578,
"learning_rate": 9.216161616161616e-07,
"loss": 0.6296,
"step": 880
},
{
"epoch": 0.04,
"grad_norm": 8.970023155212402,
"learning_rate": 9.206060606060606e-07,
"loss": 0.3712,
"step": 890
},
{
"epoch": 0.04,
"grad_norm": 28.498889923095703,
"learning_rate": 9.195959595959596e-07,
"loss": 0.7953,
"step": 900
},
{
"epoch": 0.05,
"grad_norm": 21.259479522705078,
"learning_rate": 9.185858585858585e-07,
"loss": 0.7346,
"step": 910
},
{
"epoch": 0.05,
"grad_norm": 13.871064186096191,
"learning_rate": 9.175757575757576e-07,
"loss": 0.6644,
"step": 920
},
{
"epoch": 0.05,
"grad_norm": 44.91512680053711,
"learning_rate": 9.165656565656565e-07,
"loss": 0.5467,
"step": 930
},
{
"epoch": 0.05,
"grad_norm": 11.775423049926758,
"learning_rate": 9.155555555555556e-07,
"loss": 0.5149,
"step": 940
},
{
"epoch": 0.05,
"grad_norm": 35.62663650512695,
"learning_rate": 9.145454545454545e-07,
"loss": 0.6002,
"step": 950
},
{
"epoch": 0.05,
"grad_norm": 32.12046813964844,
"learning_rate": 9.135353535353535e-07,
"loss": 0.8875,
"step": 960
},
{
"epoch": 0.05,
"grad_norm": 37.11963653564453,
"learning_rate": 9.126262626262626e-07,
"loss": 0.9515,
"step": 970
},
{
"epoch": 0.05,
"grad_norm": 21.641096115112305,
"learning_rate": 9.116161616161616e-07,
"loss": 0.7849,
"step": 980
},
{
"epoch": 0.05,
"grad_norm": 29.253921508789062,
"learning_rate": 9.106060606060606e-07,
"loss": 0.5926,
"step": 990
},
{
"epoch": 0.05,
"grad_norm": 27.01972198486328,
"learning_rate": 9.095959595959596e-07,
"loss": 0.7061,
"step": 1000
},
{
"epoch": 0.05,
"eval_loss": 0.725902259349823,
"eval_runtime": 274.8847,
"eval_samples_per_second": 3.638,
"eval_steps_per_second": 3.638,
"step": 1000
},
{
"epoch": 0.05,
"grad_norm": 16.84354019165039,
"learning_rate": 9.085858585858586e-07,
"loss": 0.6186,
"step": 1010
},
{
"epoch": 0.05,
"grad_norm": 36.384700775146484,
"learning_rate": 9.075757575757576e-07,
"loss": 0.8362,
"step": 1020
},
{
"epoch": 0.05,
"grad_norm": 37.640892028808594,
"learning_rate": 9.065656565656565e-07,
"loss": 0.7901,
"step": 1030
},
{
"epoch": 0.05,
"grad_norm": 44.735076904296875,
"learning_rate": 9.055555555555556e-07,
"loss": 0.5116,
"step": 1040
},
{
"epoch": 0.05,
"grad_norm": 46.5770263671875,
"learning_rate": 9.045454545454545e-07,
"loss": 0.7274,
"step": 1050
},
{
"epoch": 0.05,
"grad_norm": 0.10406364500522614,
"learning_rate": 9.035353535353534e-07,
"loss": 0.4903,
"step": 1060
},
{
"epoch": 0.05,
"grad_norm": 18.263145446777344,
"learning_rate": 9.025252525252525e-07,
"loss": 0.7371,
"step": 1070
},
{
"epoch": 0.05,
"grad_norm": 33.42209243774414,
"learning_rate": 9.015151515151514e-07,
"loss": 0.8111,
"step": 1080
},
{
"epoch": 0.05,
"grad_norm": 48.237525939941406,
"learning_rate": 9.005050505050504e-07,
"loss": 0.6233,
"step": 1090
},
{
"epoch": 0.06,
"grad_norm": 1.5421327352523804,
"learning_rate": 8.994949494949494e-07,
"loss": 0.6609,
"step": 1100
},
{
"epoch": 0.06,
"grad_norm": 29.761442184448242,
"learning_rate": 8.984848484848484e-07,
"loss": 0.4407,
"step": 1110
},
{
"epoch": 0.06,
"grad_norm": 31.435007095336914,
"learning_rate": 8.974747474747474e-07,
"loss": 0.7207,
"step": 1120
},
{
"epoch": 0.06,
"grad_norm": 28.20315170288086,
"learning_rate": 8.964646464646465e-07,
"loss": 0.6879,
"step": 1130
},
{
"epoch": 0.06,
"grad_norm": 19.431041717529297,
"learning_rate": 8.954545454545454e-07,
"loss": 0.7292,
"step": 1140
},
{
"epoch": 0.06,
"grad_norm": 40.8221435546875,
"learning_rate": 8.944444444444445e-07,
"loss": 0.9112,
"step": 1150
},
{
"epoch": 0.06,
"grad_norm": 4.42428731918335,
"learning_rate": 8.934343434343434e-07,
"loss": 0.484,
"step": 1160
},
{
"epoch": 0.06,
"grad_norm": 24.146841049194336,
"learning_rate": 8.924242424242425e-07,
"loss": 0.4881,
"step": 1170
},
{
"epoch": 0.06,
"grad_norm": 52.02534866333008,
"learning_rate": 8.914141414141414e-07,
"loss": 0.8042,
"step": 1180
},
{
"epoch": 0.06,
"grad_norm": 26.81230926513672,
"learning_rate": 8.904040404040404e-07,
"loss": 0.6995,
"step": 1190
},
{
"epoch": 0.06,
"grad_norm": 9.775701522827148,
"learning_rate": 8.893939393939394e-07,
"loss": 0.8203,
"step": 1200
},
{
"epoch": 0.06,
"grad_norm": 20.65981674194336,
"learning_rate": 8.883838383838383e-07,
"loss": 0.9154,
"step": 1210
},
{
"epoch": 0.06,
"grad_norm": 20.086986541748047,
"learning_rate": 8.873737373737373e-07,
"loss": 0.4731,
"step": 1220
},
{
"epoch": 0.06,
"grad_norm": 20.6555233001709,
"learning_rate": 8.863636363636363e-07,
"loss": 0.7798,
"step": 1230
},
{
"epoch": 0.06,
"grad_norm": 13.60240364074707,
"learning_rate": 8.853535353535353e-07,
"loss": 0.6382,
"step": 1240
},
{
"epoch": 0.06,
"grad_norm": 20.39061737060547,
"learning_rate": 8.843434343434343e-07,
"loss": 0.6077,
"step": 1250
},
{
"epoch": 0.06,
"grad_norm": 14.251206398010254,
"learning_rate": 8.833333333333333e-07,
"loss": 0.5736,
"step": 1260
},
{
"epoch": 0.06,
"grad_norm": 63.768611907958984,
"learning_rate": 8.823232323232322e-07,
"loss": 0.7059,
"step": 1270
},
{
"epoch": 0.06,
"grad_norm": 44.81045150756836,
"learning_rate": 8.813131313131313e-07,
"loss": 0.6352,
"step": 1280
},
{
"epoch": 0.06,
"grad_norm": 15.952017784118652,
"learning_rate": 8.803030303030302e-07,
"loss": 0.7523,
"step": 1290
},
{
"epoch": 0.07,
"grad_norm": 27.234148025512695,
"learning_rate": 8.792929292929293e-07,
"loss": 0.4924,
"step": 1300
},
{
"epoch": 0.07,
"grad_norm": 44.29439163208008,
"learning_rate": 8.782828282828283e-07,
"loss": 0.6422,
"step": 1310
},
{
"epoch": 0.07,
"grad_norm": 35.106658935546875,
"learning_rate": 8.772727272727273e-07,
"loss": 0.655,
"step": 1320
},
{
"epoch": 0.07,
"grad_norm": 2.1618399620056152,
"learning_rate": 8.762626262626263e-07,
"loss": 0.6463,
"step": 1330
},
{
"epoch": 0.07,
"grad_norm": 36.30659866333008,
"learning_rate": 8.752525252525253e-07,
"loss": 0.5618,
"step": 1340
},
{
"epoch": 0.07,
"grad_norm": 18.705991744995117,
"learning_rate": 8.742424242424242e-07,
"loss": 0.5887,
"step": 1350
},
{
"epoch": 0.07,
"grad_norm": 2.828080654144287,
"learning_rate": 8.732323232323232e-07,
"loss": 0.6611,
"step": 1360
},
{
"epoch": 0.07,
"grad_norm": 47.979820251464844,
"learning_rate": 8.722222222222222e-07,
"loss": 0.4975,
"step": 1370
},
{
"epoch": 0.07,
"grad_norm": 25.263946533203125,
"learning_rate": 8.712121212121211e-07,
"loss": 0.761,
"step": 1380
},
{
"epoch": 0.07,
"grad_norm": 0.21978527307510376,
"learning_rate": 8.702020202020202e-07,
"loss": 0.5013,
"step": 1390
},
{
"epoch": 0.07,
"grad_norm": 15.600090026855469,
"learning_rate": 8.691919191919191e-07,
"loss": 0.5375,
"step": 1400
},
{
"epoch": 0.07,
"grad_norm": 25.815698623657227,
"learning_rate": 8.681818181818182e-07,
"loss": 0.8176,
"step": 1410
},
{
"epoch": 0.07,
"grad_norm": 44.83120346069336,
"learning_rate": 8.671717171717171e-07,
"loss": 0.5207,
"step": 1420
},
{
"epoch": 0.07,
"grad_norm": 20.984037399291992,
"learning_rate": 8.661616161616161e-07,
"loss": 0.5024,
"step": 1430
},
{
"epoch": 0.07,
"grad_norm": 26.290699005126953,
"learning_rate": 8.651515151515151e-07,
"loss": 0.7017,
"step": 1440
},
{
"epoch": 0.07,
"grad_norm": 23.3577880859375,
"learning_rate": 8.641414141414141e-07,
"loss": 0.6419,
"step": 1450
},
{
"epoch": 0.07,
"grad_norm": 21.74049949645996,
"learning_rate": 8.63131313131313e-07,
"loss": 0.5426,
"step": 1460
},
{
"epoch": 0.07,
"grad_norm": 0.5753927230834961,
"learning_rate": 8.62121212121212e-07,
"loss": 0.6417,
"step": 1470
},
{
"epoch": 0.07,
"grad_norm": 36.96406936645508,
"learning_rate": 8.611111111111111e-07,
"loss": 0.6999,
"step": 1480
},
{
"epoch": 0.07,
"grad_norm": 0.05758470296859741,
"learning_rate": 8.601010101010102e-07,
"loss": 0.816,
"step": 1490
},
{
"epoch": 0.07,
"grad_norm": 51.20552062988281,
"learning_rate": 8.590909090909091e-07,
"loss": 0.7171,
"step": 1500
},
{
"epoch": 0.08,
"grad_norm": 34.43236541748047,
"learning_rate": 8.58080808080808e-07,
"loss": 0.7448,
"step": 1510
},
{
"epoch": 0.08,
"grad_norm": 89.9273681640625,
"learning_rate": 8.570707070707071e-07,
"loss": 0.6324,
"step": 1520
},
{
"epoch": 0.08,
"grad_norm": 9.697078704833984,
"learning_rate": 8.56060606060606e-07,
"loss": 0.7951,
"step": 1530
},
{
"epoch": 0.08,
"grad_norm": 28.903240203857422,
"learning_rate": 8.55050505050505e-07,
"loss": 0.6224,
"step": 1540
},
{
"epoch": 0.08,
"grad_norm": 19.746826171875,
"learning_rate": 8.54040404040404e-07,
"loss": 0.4143,
"step": 1550
},
{
"epoch": 0.08,
"grad_norm": 33.706146240234375,
"learning_rate": 8.53030303030303e-07,
"loss": 0.6803,
"step": 1560
},
{
"epoch": 0.08,
"grad_norm": 24.144351959228516,
"learning_rate": 8.52020202020202e-07,
"loss": 0.6015,
"step": 1570
},
{
"epoch": 0.08,
"grad_norm": 34.93477249145508,
"learning_rate": 8.51010101010101e-07,
"loss": 0.5599,
"step": 1580
},
{
"epoch": 0.08,
"grad_norm": 31.863859176635742,
"learning_rate": 8.499999999999999e-07,
"loss": 0.7127,
"step": 1590
},
{
"epoch": 0.08,
"grad_norm": 48.534549713134766,
"learning_rate": 8.48989898989899e-07,
"loss": 0.7777,
"step": 1600
},
{
"epoch": 0.08,
"grad_norm": 35.08165740966797,
"learning_rate": 8.479797979797979e-07,
"loss": 0.6571,
"step": 1610
},
{
"epoch": 0.08,
"grad_norm": 33.532325744628906,
"learning_rate": 8.469696969696968e-07,
"loss": 0.7386,
"step": 1620
},
{
"epoch": 0.08,
"grad_norm": 0.014142200350761414,
"learning_rate": 8.459595959595959e-07,
"loss": 0.5505,
"step": 1630
},
{
"epoch": 0.08,
"grad_norm": 29.99919319152832,
"learning_rate": 8.449494949494948e-07,
"loss": 0.4274,
"step": 1640
},
{
"epoch": 0.08,
"grad_norm": 13.206497192382812,
"learning_rate": 8.43939393939394e-07,
"loss": 0.4399,
"step": 1650
},
{
"epoch": 0.08,
"grad_norm": 17.408052444458008,
"learning_rate": 8.429292929292929e-07,
"loss": 0.5262,
"step": 1660
},
{
"epoch": 0.08,
"grad_norm": 14.05802059173584,
"learning_rate": 8.419191919191919e-07,
"loss": 0.5503,
"step": 1670
},
{
"epoch": 0.08,
"grad_norm": 13.401651382446289,
"learning_rate": 8.409090909090909e-07,
"loss": 0.813,
"step": 1680
},
{
"epoch": 0.08,
"grad_norm": 28.40484619140625,
"learning_rate": 8.398989898989899e-07,
"loss": 0.7321,
"step": 1690
},
{
"epoch": 0.09,
"grad_norm": 39.133670806884766,
"learning_rate": 8.388888888888888e-07,
"loss": 0.7129,
"step": 1700
},
{
"epoch": 0.09,
"grad_norm": 26.99220848083496,
"learning_rate": 8.378787878787879e-07,
"loss": 0.645,
"step": 1710
},
{
"epoch": 0.09,
"grad_norm": 22.875778198242188,
"learning_rate": 8.368686868686868e-07,
"loss": 0.6779,
"step": 1720
},
{
"epoch": 0.09,
"grad_norm": 37.771705627441406,
"learning_rate": 8.358585858585859e-07,
"loss": 0.6104,
"step": 1730
},
{
"epoch": 0.09,
"grad_norm": 17.48222541809082,
"learning_rate": 8.348484848484848e-07,
"loss": 0.5982,
"step": 1740
},
{
"epoch": 0.09,
"grad_norm": 13.396077156066895,
"learning_rate": 8.338383838383838e-07,
"loss": 0.6295,
"step": 1750
},
{
"epoch": 0.09,
"grad_norm": 48.97739791870117,
"learning_rate": 8.328282828282828e-07,
"loss": 0.8439,
"step": 1760
},
{
"epoch": 0.09,
"grad_norm": 74.33853149414062,
"learning_rate": 8.318181818181817e-07,
"loss": 1.0566,
"step": 1770
},
{
"epoch": 0.09,
"grad_norm": 7.2910051345825195,
"learning_rate": 8.308080808080807e-07,
"loss": 0.5226,
"step": 1780
},
{
"epoch": 0.09,
"grad_norm": 27.389278411865234,
"learning_rate": 8.297979797979797e-07,
"loss": 1.1226,
"step": 1790
},
{
"epoch": 0.09,
"grad_norm": 9.284111976623535,
"learning_rate": 8.287878787878787e-07,
"loss": 0.4101,
"step": 1800
},
{
"epoch": 0.09,
"grad_norm": 8.962563514709473,
"learning_rate": 8.277777777777777e-07,
"loss": 0.5349,
"step": 1810
},
{
"epoch": 0.09,
"grad_norm": 47.56216049194336,
"learning_rate": 8.267676767676768e-07,
"loss": 0.6863,
"step": 1820
},
{
"epoch": 0.09,
"grad_norm": 31.547109603881836,
"learning_rate": 8.257575757575757e-07,
"loss": 0.7484,
"step": 1830
},
{
"epoch": 0.09,
"grad_norm": 8.890266418457031,
"learning_rate": 8.247474747474748e-07,
"loss": 0.7518,
"step": 1840
},
{
"epoch": 0.09,
"grad_norm": 25.44080352783203,
"learning_rate": 8.237373737373737e-07,
"loss": 0.8552,
"step": 1850
},
{
"epoch": 0.09,
"grad_norm": 19.411556243896484,
"learning_rate": 8.227272727272727e-07,
"loss": 0.6285,
"step": 1860
},
{
"epoch": 0.09,
"grad_norm": 5.5483574867248535,
"learning_rate": 8.217171717171717e-07,
"loss": 0.5478,
"step": 1870
},
{
"epoch": 0.09,
"grad_norm": 22.145336151123047,
"learning_rate": 8.207070707070707e-07,
"loss": 0.7499,
"step": 1880
},
{
"epoch": 0.09,
"grad_norm": 30.043731689453125,
"learning_rate": 8.196969696969697e-07,
"loss": 0.7252,
"step": 1890
},
{
"epoch": 0.1,
"grad_norm": 39.9935417175293,
"learning_rate": 8.186868686868687e-07,
"loss": 0.7348,
"step": 1900
},
{
"epoch": 0.1,
"grad_norm": 5.8807525634765625,
"learning_rate": 8.176767676767676e-07,
"loss": 0.381,
"step": 1910
},
{
"epoch": 0.1,
"grad_norm": 16.082597732543945,
"learning_rate": 8.166666666666666e-07,
"loss": 0.6413,
"step": 1920
},
{
"epoch": 0.1,
"grad_norm": 15.713370323181152,
"learning_rate": 8.156565656565656e-07,
"loss": 0.4765,
"step": 1930
},
{
"epoch": 0.1,
"grad_norm": 43.45647048950195,
"learning_rate": 8.146464646464645e-07,
"loss": 0.6653,
"step": 1940
},
{
"epoch": 0.1,
"grad_norm": 51.137046813964844,
"learning_rate": 8.136363636363636e-07,
"loss": 0.6915,
"step": 1950
},
{
"epoch": 0.1,
"grad_norm": 33.2936897277832,
"learning_rate": 8.126262626262625e-07,
"loss": 0.4993,
"step": 1960
},
{
"epoch": 0.1,
"grad_norm": 10.87304973602295,
"learning_rate": 8.116161616161616e-07,
"loss": 0.7893,
"step": 1970
},
{
"epoch": 0.1,
"grad_norm": 0.774446427822113,
"learning_rate": 8.106060606060605e-07,
"loss": 0.5946,
"step": 1980
},
{
"epoch": 0.1,
"grad_norm": 26.221094131469727,
"learning_rate": 8.095959595959596e-07,
"loss": 0.8534,
"step": 1990
},
{
"epoch": 0.1,
"grad_norm": 46.51724624633789,
"learning_rate": 8.085858585858586e-07,
"loss": 0.4862,
"step": 2000
},
{
"epoch": 0.1,
"eval_loss": 0.6236868500709534,
"eval_runtime": 274.2428,
"eval_samples_per_second": 3.646,
"eval_steps_per_second": 3.646,
"step": 2000
},
{
"epoch": 0.1,
"grad_norm": 15.500929832458496,
"learning_rate": 8.075757575757576e-07,
"loss": 0.6197,
"step": 2010
},
{
"epoch": 0.1,
"grad_norm": 7.338476657867432,
"learning_rate": 8.065656565656565e-07,
"loss": 0.591,
"step": 2020
},
{
"epoch": 0.1,
"grad_norm": 28.497867584228516,
"learning_rate": 8.055555555555556e-07,
"loss": 0.5593,
"step": 2030
},
{
"epoch": 0.1,
"grad_norm": 15.371612548828125,
"learning_rate": 8.045454545454545e-07,
"loss": 0.53,
"step": 2040
},
{
"epoch": 0.1,
"grad_norm": 38.33600616455078,
"learning_rate": 8.035353535353536e-07,
"loss": 1.0264,
"step": 2050
},
{
"epoch": 0.1,
"grad_norm": 17.20992088317871,
"learning_rate": 8.025252525252525e-07,
"loss": 0.4638,
"step": 2060
},
{
"epoch": 0.1,
"grad_norm": 58.250579833984375,
"learning_rate": 8.015151515151514e-07,
"loss": 0.5415,
"step": 2070
},
{
"epoch": 0.1,
"grad_norm": 20.2476806640625,
"learning_rate": 8.005050505050505e-07,
"loss": 0.5162,
"step": 2080
},
{
"epoch": 0.1,
"grad_norm": 27.523902893066406,
"learning_rate": 7.994949494949494e-07,
"loss": 0.4223,
"step": 2090
},
{
"epoch": 0.1,
"grad_norm": 25.663818359375,
"learning_rate": 7.984848484848484e-07,
"loss": 0.6016,
"step": 2100
},
{
"epoch": 0.11,
"grad_norm": 25.809614181518555,
"learning_rate": 7.974747474747474e-07,
"loss": 1.0505,
"step": 2110
},
{
"epoch": 0.11,
"grad_norm": 52.98808670043945,
"learning_rate": 7.964646464646464e-07,
"loss": 1.1373,
"step": 2120
},
{
"epoch": 0.11,
"grad_norm": 15.879914283752441,
"learning_rate": 7.954545454545454e-07,
"loss": 0.5527,
"step": 2130
},
{
"epoch": 0.11,
"grad_norm": 16.313142776489258,
"learning_rate": 7.944444444444444e-07,
"loss": 0.4782,
"step": 2140
},
{
"epoch": 0.11,
"grad_norm": 39.388511657714844,
"learning_rate": 7.934343434343433e-07,
"loss": 0.9302,
"step": 2150
},
{
"epoch": 0.11,
"grad_norm": 27.657390594482422,
"learning_rate": 7.924242424242425e-07,
"loss": 0.4342,
"step": 2160
},
{
"epoch": 0.11,
"grad_norm": 3.85066819190979,
"learning_rate": 7.914141414141414e-07,
"loss": 0.7398,
"step": 2170
},
{
"epoch": 0.11,
"grad_norm": 20.403371810913086,
"learning_rate": 7.904040404040404e-07,
"loss": 0.7303,
"step": 2180
},
{
"epoch": 0.11,
"grad_norm": 19.42263412475586,
"learning_rate": 7.893939393939394e-07,
"loss": 0.7445,
"step": 2190
},
{
"epoch": 0.11,
"grad_norm": 21.36237144470215,
"learning_rate": 7.883838383838383e-07,
"loss": 0.6936,
"step": 2200
},
{
"epoch": 0.11,
"grad_norm": 27.883874893188477,
"learning_rate": 7.873737373737374e-07,
"loss": 0.6665,
"step": 2210
},
{
"epoch": 0.11,
"grad_norm": 16.943950653076172,
"learning_rate": 7.863636363636363e-07,
"loss": 0.6188,
"step": 2220
},
{
"epoch": 0.11,
"grad_norm": 117.27821350097656,
"learning_rate": 7.853535353535353e-07,
"loss": 0.4547,
"step": 2230
},
{
"epoch": 0.11,
"grad_norm": 16.697284698486328,
"learning_rate": 7.843434343434343e-07,
"loss": 0.3462,
"step": 2240
},
{
"epoch": 0.11,
"grad_norm": 9.313762664794922,
"learning_rate": 7.833333333333333e-07,
"loss": 0.5729,
"step": 2250
},
{
"epoch": 0.11,
"grad_norm": 43.60643005371094,
"learning_rate": 7.823232323232322e-07,
"loss": 0.8304,
"step": 2260
},
{
"epoch": 0.11,
"grad_norm": 22.356792449951172,
"learning_rate": 7.813131313131313e-07,
"loss": 0.5437,
"step": 2270
},
{
"epoch": 0.11,
"grad_norm": 32.53278732299805,
"learning_rate": 7.803030303030302e-07,
"loss": 1.0315,
"step": 2280
},
{
"epoch": 0.11,
"grad_norm": 46.504573822021484,
"learning_rate": 7.792929292929293e-07,
"loss": 0.6465,
"step": 2290
},
{
"epoch": 0.12,
"grad_norm": 7.453057765960693,
"learning_rate": 7.782828282828282e-07,
"loss": 0.6023,
"step": 2300
},
{
"epoch": 0.12,
"grad_norm": 73.96940612792969,
"learning_rate": 7.772727272727272e-07,
"loss": 0.7558,
"step": 2310
},
{
"epoch": 0.12,
"grad_norm": 31.267314910888672,
"learning_rate": 7.762626262626262e-07,
"loss": 0.527,
"step": 2320
},
{
"epoch": 0.12,
"grad_norm": 41.19007873535156,
"learning_rate": 7.752525252525253e-07,
"loss": 0.7043,
"step": 2330
},
{
"epoch": 0.12,
"grad_norm": 21.09952735900879,
"learning_rate": 7.742424242424243e-07,
"loss": 0.9584,
"step": 2340
},
{
"epoch": 0.12,
"grad_norm": 42.14522933959961,
"learning_rate": 7.732323232323232e-07,
"loss": 0.5388,
"step": 2350
},
{
"epoch": 0.12,
"grad_norm": 13.435091972351074,
"learning_rate": 7.722222222222222e-07,
"loss": 0.6348,
"step": 2360
},
{
"epoch": 0.12,
"grad_norm": 26.72304916381836,
"learning_rate": 7.712121212121212e-07,
"loss": 0.3811,
"step": 2370
},
{
"epoch": 0.12,
"grad_norm": 43.16832733154297,
"learning_rate": 7.702020202020202e-07,
"loss": 0.7426,
"step": 2380
},
{
"epoch": 0.12,
"grad_norm": 3.5628132820129395,
"learning_rate": 7.691919191919191e-07,
"loss": 0.4845,
"step": 2390
},
{
"epoch": 0.12,
"grad_norm": 52.14829635620117,
"learning_rate": 7.681818181818182e-07,
"loss": 0.6397,
"step": 2400
},
{
"epoch": 0.12,
"grad_norm": 33.835205078125,
"learning_rate": 7.671717171717171e-07,
"loss": 0.6927,
"step": 2410
},
{
"epoch": 0.12,
"grad_norm": 33.98677444458008,
"learning_rate": 7.661616161616161e-07,
"loss": 0.6525,
"step": 2420
},
{
"epoch": 0.12,
"grad_norm": 23.454181671142578,
"learning_rate": 7.651515151515151e-07,
"loss": 0.7971,
"step": 2430
},
{
"epoch": 0.12,
"grad_norm": 39.06905746459961,
"learning_rate": 7.641414141414141e-07,
"loss": 0.516,
"step": 2440
},
{
"epoch": 0.12,
"grad_norm": 36.33759307861328,
"learning_rate": 7.631313131313131e-07,
"loss": 0.6733,
"step": 2450
},
{
"epoch": 0.12,
"grad_norm": 39.87421417236328,
"learning_rate": 7.621212121212121e-07,
"loss": 0.827,
"step": 2460
},
{
"epoch": 0.12,
"grad_norm": 27.0250244140625,
"learning_rate": 7.61111111111111e-07,
"loss": 0.7167,
"step": 2470
},
{
"epoch": 0.12,
"grad_norm": 42.34687042236328,
"learning_rate": 7.6010101010101e-07,
"loss": 0.8127,
"step": 2480
},
{
"epoch": 0.12,
"grad_norm": 20.025238037109375,
"learning_rate": 7.59090909090909e-07,
"loss": 0.5936,
"step": 2490
},
{
"epoch": 0.12,
"grad_norm": 2.185176372528076,
"learning_rate": 7.580808080808081e-07,
"loss": 0.4552,
"step": 2500
},
{
"epoch": 0.13,
"grad_norm": 10.323554039001465,
"learning_rate": 7.570707070707071e-07,
"loss": 0.6821,
"step": 2510
},
{
"epoch": 0.13,
"grad_norm": 59.452205657958984,
"learning_rate": 7.56060606060606e-07,
"loss": 0.8375,
"step": 2520
},
{
"epoch": 0.13,
"grad_norm": 0.1424858570098877,
"learning_rate": 7.550505050505051e-07,
"loss": 0.3893,
"step": 2530
},
{
"epoch": 0.13,
"grad_norm": 19.907209396362305,
"learning_rate": 7.54040404040404e-07,
"loss": 0.6082,
"step": 2540
},
{
"epoch": 0.13,
"grad_norm": 3.8457748889923096,
"learning_rate": 7.53030303030303e-07,
"loss": 0.7132,
"step": 2550
},
{
"epoch": 0.13,
"grad_norm": 12.082324028015137,
"learning_rate": 7.52020202020202e-07,
"loss": 0.7951,
"step": 2560
},
{
"epoch": 0.13,
"grad_norm": 24.198322296142578,
"learning_rate": 7.51010101010101e-07,
"loss": 0.5836,
"step": 2570
},
{
"epoch": 0.13,
"grad_norm": 25.63511085510254,
"learning_rate": 7.5e-07,
"loss": 0.6352,
"step": 2580
},
{
"epoch": 0.13,
"grad_norm": 24.314882278442383,
"learning_rate": 7.48989898989899e-07,
"loss": 0.7092,
"step": 2590
},
{
"epoch": 0.13,
"grad_norm": 10.859787940979004,
"learning_rate": 7.479797979797979e-07,
"loss": 0.5931,
"step": 2600
},
{
"epoch": 0.13,
"grad_norm": 13.863005638122559,
"learning_rate": 7.46969696969697e-07,
"loss": 0.5174,
"step": 2610
},
{
"epoch": 0.13,
"grad_norm": 22.458776473999023,
"learning_rate": 7.459595959595959e-07,
"loss": 0.4968,
"step": 2620
},
{
"epoch": 0.13,
"grad_norm": 49.36175537109375,
"learning_rate": 7.449494949494948e-07,
"loss": 0.6513,
"step": 2630
},
{
"epoch": 0.13,
"grad_norm": 7.616428375244141,
"learning_rate": 7.439393939393939e-07,
"loss": 0.4914,
"step": 2640
},
{
"epoch": 0.13,
"grad_norm": 4.416635036468506,
"learning_rate": 7.429292929292928e-07,
"loss": 0.2537,
"step": 2650
},
{
"epoch": 0.13,
"grad_norm": 16.390323638916016,
"learning_rate": 7.419191919191918e-07,
"loss": 0.7159,
"step": 2660
},
{
"epoch": 0.13,
"grad_norm": 5.848669052124023,
"learning_rate": 7.409090909090909e-07,
"loss": 0.4026,
"step": 2670
},
{
"epoch": 0.13,
"grad_norm": 26.498138427734375,
"learning_rate": 7.398989898989899e-07,
"loss": 0.5316,
"step": 2680
},
{
"epoch": 0.13,
"grad_norm": 20.28061866760254,
"learning_rate": 7.388888888888889e-07,
"loss": 0.8416,
"step": 2690
},
{
"epoch": 0.14,
"grad_norm": 29.66952133178711,
"learning_rate": 7.378787878787879e-07,
"loss": 0.6887,
"step": 2700
},
{
"epoch": 0.14,
"grad_norm": 18.624435424804688,
"learning_rate": 7.368686868686868e-07,
"loss": 0.4513,
"step": 2710
},
{
"epoch": 0.14,
"grad_norm": 22.85711669921875,
"learning_rate": 7.358585858585859e-07,
"loss": 0.894,
"step": 2720
},
{
"epoch": 0.14,
"grad_norm": 30.069570541381836,
"learning_rate": 7.348484848484848e-07,
"loss": 0.8273,
"step": 2730
},
{
"epoch": 0.14,
"grad_norm": 52.604408264160156,
"learning_rate": 7.338383838383839e-07,
"loss": 0.6306,
"step": 2740
},
{
"epoch": 0.14,
"grad_norm": 30.587234497070312,
"learning_rate": 7.328282828282828e-07,
"loss": 0.7653,
"step": 2750
},
{
"epoch": 0.14,
"grad_norm": 25.937252044677734,
"learning_rate": 7.318181818181818e-07,
"loss": 0.7205,
"step": 2760
},
{
"epoch": 0.14,
"grad_norm": 18.365734100341797,
"learning_rate": 7.308080808080808e-07,
"loss": 0.4825,
"step": 2770
},
{
"epoch": 0.14,
"grad_norm": 17.263690948486328,
"learning_rate": 7.297979797979797e-07,
"loss": 0.7032,
"step": 2780
},
{
"epoch": 0.14,
"grad_norm": 34.739078521728516,
"learning_rate": 7.287878787878787e-07,
"loss": 0.463,
"step": 2790
},
{
"epoch": 0.14,
"grad_norm": 10.641708374023438,
"learning_rate": 7.277777777777777e-07,
"loss": 0.4351,
"step": 2800
},
{
"epoch": 0.14,
"grad_norm": 75.48184967041016,
"learning_rate": 7.267676767676767e-07,
"loss": 0.7079,
"step": 2810
},
{
"epoch": 0.14,
"grad_norm": 45.05419921875,
"learning_rate": 7.257575757575756e-07,
"loss": 0.5934,
"step": 2820
},
{
"epoch": 0.14,
"grad_norm": 29.237016677856445,
"learning_rate": 7.247474747474747e-07,
"loss": 0.4162,
"step": 2830
},
{
"epoch": 0.14,
"grad_norm": 0.004866959527134895,
"learning_rate": 7.237373737373737e-07,
"loss": 0.7245,
"step": 2840
},
{
"epoch": 0.14,
"grad_norm": 128.69998168945312,
"learning_rate": 7.227272727272728e-07,
"loss": 0.4543,
"step": 2850
},
{
"epoch": 0.14,
"grad_norm": 0.031988680362701416,
"learning_rate": 7.217171717171717e-07,
"loss": 0.6369,
"step": 2860
},
{
"epoch": 0.14,
"grad_norm": 23.29071807861328,
"learning_rate": 7.207070707070707e-07,
"loss": 0.8627,
"step": 2870
},
{
"epoch": 0.14,
"grad_norm": 16.787782669067383,
"learning_rate": 7.196969696969697e-07,
"loss": 0.7068,
"step": 2880
},
{
"epoch": 0.14,
"grad_norm": 27.934904098510742,
"learning_rate": 7.186868686868687e-07,
"loss": 0.6029,
"step": 2890
},
{
"epoch": 0.14,
"grad_norm": 28.06117057800293,
"learning_rate": 7.176767676767677e-07,
"loss": 0.4879,
"step": 2900
},
{
"epoch": 0.15,
"grad_norm": 13.214853286743164,
"learning_rate": 7.166666666666667e-07,
"loss": 0.4323,
"step": 2910
},
{
"epoch": 0.15,
"grad_norm": 4.088837146759033,
"learning_rate": 7.156565656565656e-07,
"loss": 0.5254,
"step": 2920
},
{
"epoch": 0.15,
"grad_norm": 29.167570114135742,
"learning_rate": 7.146464646464646e-07,
"loss": 0.7426,
"step": 2930
},
{
"epoch": 0.15,
"grad_norm": 55.35223388671875,
"learning_rate": 7.136363636363636e-07,
"loss": 0.5928,
"step": 2940
},
{
"epoch": 0.15,
"grad_norm": 46.83303451538086,
"learning_rate": 7.126262626262625e-07,
"loss": 0.5281,
"step": 2950
},
{
"epoch": 0.15,
"grad_norm": 62.05622482299805,
"learning_rate": 7.116161616161616e-07,
"loss": 0.6006,
"step": 2960
},
{
"epoch": 0.15,
"grad_norm": 47.03242874145508,
"learning_rate": 7.106060606060605e-07,
"loss": 0.5264,
"step": 2970
},
{
"epoch": 0.15,
"grad_norm": 41.88718032836914,
"learning_rate": 7.095959595959596e-07,
"loss": 0.5463,
"step": 2980
},
{
"epoch": 0.15,
"grad_norm": 10.629190444946289,
"learning_rate": 7.085858585858585e-07,
"loss": 0.4873,
"step": 2990
},
{
"epoch": 0.15,
"grad_norm": 34.29298400878906,
"learning_rate": 7.075757575757575e-07,
"loss": 0.5327,
"step": 3000
},
{
"epoch": 0.15,
"eval_loss": 0.6308945417404175,
"eval_runtime": 273.2852,
"eval_samples_per_second": 3.659,
"eval_steps_per_second": 3.659,
"step": 3000
},
{
"epoch": 0.15,
"grad_norm": 57.80482482910156,
"learning_rate": 7.065656565656566e-07,
"loss": 0.5131,
"step": 3010
},
{
"epoch": 0.15,
"grad_norm": 15.977564811706543,
"learning_rate": 7.055555555555556e-07,
"loss": 0.7116,
"step": 3020
},
{
"epoch": 0.15,
"grad_norm": 24.813589096069336,
"learning_rate": 7.045454545454545e-07,
"loss": 0.8132,
"step": 3030
},
{
"epoch": 0.15,
"grad_norm": 22.984556198120117,
"learning_rate": 7.035353535353536e-07,
"loss": 0.9015,
"step": 3040
},
{
"epoch": 0.15,
"grad_norm": 15.78228759765625,
"learning_rate": 7.025252525252525e-07,
"loss": 0.5522,
"step": 3050
},
{
"epoch": 0.15,
"grad_norm": 19.144140243530273,
"learning_rate": 7.015151515151516e-07,
"loss": 0.5387,
"step": 3060
},
{
"epoch": 0.15,
"grad_norm": 47.44633483886719,
"learning_rate": 7.005050505050505e-07,
"loss": 0.5754,
"step": 3070
},
{
"epoch": 0.15,
"grad_norm": 36.51036834716797,
"learning_rate": 6.994949494949494e-07,
"loss": 0.8067,
"step": 3080
},
{
"epoch": 0.15,
"grad_norm": 0.8666948080062866,
"learning_rate": 6.984848484848485e-07,
"loss": 0.6058,
"step": 3090
},
{
"epoch": 0.15,
"grad_norm": 23.022371292114258,
"learning_rate": 6.974747474747474e-07,
"loss": 0.6043,
"step": 3100
},
{
"epoch": 0.16,
"grad_norm": 42.4169807434082,
"learning_rate": 6.964646464646464e-07,
"loss": 0.5526,
"step": 3110
},
{
"epoch": 0.16,
"grad_norm": 65.37395477294922,
"learning_rate": 6.954545454545454e-07,
"loss": 0.5219,
"step": 3120
},
{
"epoch": 0.16,
"grad_norm": 18.157527923583984,
"learning_rate": 6.944444444444444e-07,
"loss": 0.6028,
"step": 3130
},
{
"epoch": 0.16,
"grad_norm": 52.82014846801758,
"learning_rate": 6.934343434343434e-07,
"loss": 0.7391,
"step": 3140
},
{
"epoch": 0.16,
"grad_norm": 19.03993034362793,
"learning_rate": 6.924242424242424e-07,
"loss": 0.9328,
"step": 3150
},
{
"epoch": 0.16,
"grad_norm": 38.07152557373047,
"learning_rate": 6.914141414141413e-07,
"loss": 0.677,
"step": 3160
},
{
"epoch": 0.16,
"grad_norm": 10.974771499633789,
"learning_rate": 6.904040404040404e-07,
"loss": 0.638,
"step": 3170
},
{
"epoch": 0.16,
"grad_norm": 33.91596221923828,
"learning_rate": 6.894949494949494e-07,
"loss": 0.9438,
"step": 3180
},
{
"epoch": 0.16,
"grad_norm": 48.037166595458984,
"learning_rate": 6.884848484848485e-07,
"loss": 0.4394,
"step": 3190
},
{
"epoch": 0.16,
"grad_norm": 28.13571548461914,
"learning_rate": 6.874747474747474e-07,
"loss": 0.4743,
"step": 3200
},
{
"epoch": 0.16,
"grad_norm": 35.86332702636719,
"learning_rate": 6.864646464646464e-07,
"loss": 0.5854,
"step": 3210
},
{
"epoch": 0.16,
"grad_norm": 28.57064437866211,
"learning_rate": 6.854545454545454e-07,
"loss": 0.9211,
"step": 3220
},
{
"epoch": 0.16,
"grad_norm": 41.90704345703125,
"learning_rate": 6.844444444444444e-07,
"loss": 0.6961,
"step": 3230
},
{
"epoch": 0.16,
"grad_norm": 11.663514137268066,
"learning_rate": 6.834343434343434e-07,
"loss": 0.6082,
"step": 3240
},
{
"epoch": 0.16,
"grad_norm": 0.3548143804073334,
"learning_rate": 6.824242424242424e-07,
"loss": 0.5743,
"step": 3250
},
{
"epoch": 0.16,
"grad_norm": 49.9235954284668,
"learning_rate": 6.814141414141413e-07,
"loss": 0.8361,
"step": 3260
},
{
"epoch": 0.16,
"grad_norm": 63.49104309082031,
"learning_rate": 6.804040404040405e-07,
"loss": 0.6426,
"step": 3270
},
{
"epoch": 0.16,
"grad_norm": 34.379417419433594,
"learning_rate": 6.793939393939394e-07,
"loss": 0.6631,
"step": 3280
},
{
"epoch": 0.16,
"grad_norm": 38.65358352661133,
"learning_rate": 6.783838383838383e-07,
"loss": 0.7235,
"step": 3290
},
{
"epoch": 0.17,
"grad_norm": 65.52446746826172,
"learning_rate": 6.773737373737374e-07,
"loss": 0.718,
"step": 3300
},
{
"epoch": 0.17,
"grad_norm": 11.010351181030273,
"learning_rate": 6.763636363636363e-07,
"loss": 0.7492,
"step": 3310
},
{
"epoch": 0.17,
"grad_norm": 0.8809446096420288,
"learning_rate": 6.753535353535354e-07,
"loss": 0.685,
"step": 3320
},
{
"epoch": 0.17,
"grad_norm": 18.255809783935547,
"learning_rate": 6.743434343434343e-07,
"loss": 0.5533,
"step": 3330
},
{
"epoch": 0.17,
"grad_norm": 26.685049057006836,
"learning_rate": 6.733333333333333e-07,
"loss": 0.5235,
"step": 3340
},
{
"epoch": 0.17,
"grad_norm": 249.94290161132812,
"learning_rate": 6.723232323232323e-07,
"loss": 0.6984,
"step": 3350
},
{
"epoch": 0.17,
"grad_norm": 45.482479095458984,
"learning_rate": 6.713131313131313e-07,
"loss": 0.4921,
"step": 3360
},
{
"epoch": 0.17,
"grad_norm": 19.996156692504883,
"learning_rate": 6.703030303030302e-07,
"loss": 0.4737,
"step": 3370
},
{
"epoch": 0.17,
"grad_norm": 155.62306213378906,
"learning_rate": 6.692929292929293e-07,
"loss": 0.6733,
"step": 3380
},
{
"epoch": 0.17,
"grad_norm": 35.31591033935547,
"learning_rate": 6.682828282828282e-07,
"loss": 0.5598,
"step": 3390
},
{
"epoch": 0.17,
"grad_norm": 32.30759048461914,
"learning_rate": 6.672727272727273e-07,
"loss": 0.7611,
"step": 3400
},
{
"epoch": 0.17,
"grad_norm": 30.439285278320312,
"learning_rate": 6.662626262626262e-07,
"loss": 0.8177,
"step": 3410
},
{
"epoch": 0.17,
"grad_norm": 39.084266662597656,
"learning_rate": 6.652525252525251e-07,
"loss": 0.683,
"step": 3420
},
{
"epoch": 0.17,
"grad_norm": 28.271728515625,
"learning_rate": 6.642424242424242e-07,
"loss": 0.5019,
"step": 3430
},
{
"epoch": 0.17,
"grad_norm": 0.6252301931381226,
"learning_rate": 6.632323232323232e-07,
"loss": 0.4204,
"step": 3440
},
{
"epoch": 0.17,
"grad_norm": 39.03291702270508,
"learning_rate": 6.622222222222222e-07,
"loss": 0.4508,
"step": 3450
},
{
"epoch": 0.17,
"grad_norm": 4.38857364654541,
"learning_rate": 6.612121212121212e-07,
"loss": 0.5938,
"step": 3460
},
{
"epoch": 0.17,
"grad_norm": 24.514738082885742,
"learning_rate": 6.602020202020202e-07,
"loss": 0.7128,
"step": 3470
},
{
"epoch": 0.17,
"grad_norm": 45.85287857055664,
"learning_rate": 6.591919191919192e-07,
"loss": 0.7125,
"step": 3480
},
{
"epoch": 0.17,
"grad_norm": 0.022573018446564674,
"learning_rate": 6.581818181818182e-07,
"loss": 0.4128,
"step": 3490
},
{
"epoch": 0.17,
"grad_norm": 21.102893829345703,
"learning_rate": 6.571717171717171e-07,
"loss": 0.5341,
"step": 3500
},
{
"epoch": 0.18,
"grad_norm": 27.609329223632812,
"learning_rate": 6.561616161616162e-07,
"loss": 0.4808,
"step": 3510
},
{
"epoch": 0.18,
"grad_norm": 28.081892013549805,
"learning_rate": 6.551515151515151e-07,
"loss": 0.4426,
"step": 3520
},
{
"epoch": 0.18,
"grad_norm": 35.72859573364258,
"learning_rate": 6.541414141414141e-07,
"loss": 0.5072,
"step": 3530
},
{
"epoch": 0.18,
"grad_norm": 31.80050277709961,
"learning_rate": 6.531313131313131e-07,
"loss": 0.5102,
"step": 3540
},
{
"epoch": 0.18,
"grad_norm": 3.7258970737457275,
"learning_rate": 6.52121212121212e-07,
"loss": 0.4568,
"step": 3550
},
{
"epoch": 0.18,
"grad_norm": 23.297748565673828,
"learning_rate": 6.511111111111111e-07,
"loss": 0.5721,
"step": 3560
},
{
"epoch": 0.18,
"grad_norm": 63.63188171386719,
"learning_rate": 6.5010101010101e-07,
"loss": 0.6361,
"step": 3570
},
{
"epoch": 0.18,
"grad_norm": 38.485416412353516,
"learning_rate": 6.49090909090909e-07,
"loss": 0.4438,
"step": 3580
},
{
"epoch": 0.18,
"grad_norm": 40.09241485595703,
"learning_rate": 6.48080808080808e-07,
"loss": 0.732,
"step": 3590
},
{
"epoch": 0.18,
"grad_norm": 49.900917053222656,
"learning_rate": 6.47070707070707e-07,
"loss": 0.6085,
"step": 3600
},
{
"epoch": 0.18,
"grad_norm": 26.581472396850586,
"learning_rate": 6.46060606060606e-07,
"loss": 0.7013,
"step": 3610
},
{
"epoch": 0.18,
"grad_norm": 9.770761489868164,
"learning_rate": 6.450505050505051e-07,
"loss": 0.4664,
"step": 3620
},
{
"epoch": 0.18,
"grad_norm": 31.70376205444336,
"learning_rate": 6.44040404040404e-07,
"loss": 0.6208,
"step": 3630
},
{
"epoch": 0.18,
"grad_norm": 37.74061965942383,
"learning_rate": 6.430303030303031e-07,
"loss": 0.6137,
"step": 3640
},
{
"epoch": 0.18,
"grad_norm": 33.57075119018555,
"learning_rate": 6.42020202020202e-07,
"loss": 0.591,
"step": 3650
},
{
"epoch": 0.18,
"grad_norm": 52.23899841308594,
"learning_rate": 6.41010101010101e-07,
"loss": 0.8013,
"step": 3660
},
{
"epoch": 0.18,
"grad_norm": 62.8396110534668,
"learning_rate": 6.4e-07,
"loss": 0.7005,
"step": 3670
},
{
"epoch": 0.18,
"grad_norm": 36.724098205566406,
"learning_rate": 6.38989898989899e-07,
"loss": 0.8353,
"step": 3680
},
{
"epoch": 0.18,
"grad_norm": 2.098162889480591,
"learning_rate": 6.379797979797979e-07,
"loss": 0.5307,
"step": 3690
},
{
"epoch": 0.18,
"grad_norm": 41.13184356689453,
"learning_rate": 6.36969696969697e-07,
"loss": 0.5198,
"step": 3700
},
{
"epoch": 0.19,
"grad_norm": 46.46861267089844,
"learning_rate": 6.359595959595959e-07,
"loss": 0.4319,
"step": 3710
},
{
"epoch": 0.19,
"grad_norm": 65.68028259277344,
"learning_rate": 6.349494949494949e-07,
"loss": 0.5386,
"step": 3720
},
{
"epoch": 0.19,
"grad_norm": 5.740784168243408,
"learning_rate": 6.339393939393939e-07,
"loss": 0.6834,
"step": 3730
},
{
"epoch": 0.19,
"grad_norm": 15.918974876403809,
"learning_rate": 6.329292929292928e-07,
"loss": 0.6303,
"step": 3740
},
{
"epoch": 0.19,
"grad_norm": 5.351881980895996,
"learning_rate": 6.319191919191919e-07,
"loss": 0.3967,
"step": 3750
},
{
"epoch": 0.19,
"grad_norm": 76.57394409179688,
"learning_rate": 6.309090909090908e-07,
"loss": 0.5537,
"step": 3760
},
{
"epoch": 0.19,
"grad_norm": 17.062864303588867,
"learning_rate": 6.298989898989898e-07,
"loss": 0.6029,
"step": 3770
},
{
"epoch": 0.19,
"grad_norm": 39.9703254699707,
"learning_rate": 6.288888888888889e-07,
"loss": 0.3583,
"step": 3780
},
{
"epoch": 0.19,
"grad_norm": 61.81090545654297,
"learning_rate": 6.278787878787879e-07,
"loss": 0.5372,
"step": 3790
},
{
"epoch": 0.19,
"grad_norm": 25.4251708984375,
"learning_rate": 6.268686868686869e-07,
"loss": 0.4157,
"step": 3800
},
{
"epoch": 0.19,
"grad_norm": 3.276371479034424,
"learning_rate": 6.258585858585859e-07,
"loss": 0.6531,
"step": 3810
},
{
"epoch": 0.19,
"grad_norm": 47.67403030395508,
"learning_rate": 6.248484848484848e-07,
"loss": 0.6813,
"step": 3820
},
{
"epoch": 0.19,
"grad_norm": 13.192293167114258,
"learning_rate": 6.238383838383839e-07,
"loss": 0.4612,
"step": 3830
},
{
"epoch": 0.19,
"grad_norm": 0.580405056476593,
"learning_rate": 6.228282828282828e-07,
"loss": 0.3817,
"step": 3840
},
{
"epoch": 0.19,
"grad_norm": 17.856048583984375,
"learning_rate": 6.218181818181817e-07,
"loss": 0.7929,
"step": 3850
},
{
"epoch": 0.19,
"grad_norm": 33.620033264160156,
"learning_rate": 6.208080808080808e-07,
"loss": 0.5922,
"step": 3860
},
{
"epoch": 0.19,
"grad_norm": 33.94918441772461,
"learning_rate": 6.197979797979797e-07,
"loss": 0.4963,
"step": 3870
},
{
"epoch": 0.19,
"grad_norm": 15.053759574890137,
"learning_rate": 6.187878787878788e-07,
"loss": 0.6975,
"step": 3880
},
{
"epoch": 0.19,
"grad_norm": 44.79494857788086,
"learning_rate": 6.177777777777777e-07,
"loss": 0.6962,
"step": 3890
},
{
"epoch": 0.2,
"grad_norm": 9.810379028320312,
"learning_rate": 6.167676767676767e-07,
"loss": 0.3916,
"step": 3900
},
{
"epoch": 0.2,
"grad_norm": 54.83355712890625,
"learning_rate": 6.157575757575757e-07,
"loss": 0.6929,
"step": 3910
},
{
"epoch": 0.2,
"grad_norm": 20.908220291137695,
"learning_rate": 6.147474747474747e-07,
"loss": 0.7343,
"step": 3920
},
{
"epoch": 0.2,
"grad_norm": 48.0946044921875,
"learning_rate": 6.137373737373736e-07,
"loss": 0.5088,
"step": 3930
},
{
"epoch": 0.2,
"grad_norm": 37.02779006958008,
"learning_rate": 6.127272727272727e-07,
"loss": 0.5441,
"step": 3940
},
{
"epoch": 0.2,
"grad_norm": 16.095773696899414,
"learning_rate": 6.117171717171717e-07,
"loss": 0.7542,
"step": 3950
},
{
"epoch": 0.2,
"grad_norm": 48.33674621582031,
"learning_rate": 6.107070707070708e-07,
"loss": 0.4938,
"step": 3960
},
{
"epoch": 0.2,
"grad_norm": 33.02685546875,
"learning_rate": 6.096969696969697e-07,
"loss": 0.6449,
"step": 3970
},
{
"epoch": 0.2,
"grad_norm": 18.588003158569336,
"learning_rate": 6.086868686868687e-07,
"loss": 0.3599,
"step": 3980
},
{
"epoch": 0.2,
"grad_norm": 46.80269241333008,
"learning_rate": 6.076767676767677e-07,
"loss": 0.5536,
"step": 3990
},
{
"epoch": 0.2,
"grad_norm": 29.706817626953125,
"learning_rate": 6.066666666666666e-07,
"loss": 0.6733,
"step": 4000
},
{
"epoch": 0.2,
"eval_loss": 0.5796153545379639,
"eval_runtime": 272.8278,
"eval_samples_per_second": 3.665,
"eval_steps_per_second": 3.665,
"step": 4000
}
],
"logging_steps": 10,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2000,
"total_flos": 1.88564197343232e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}