LongWriter-llm-jp-3-3.7b-instruct / trainer_state.json
Kendamarron's picture
Upload folder using huggingface_hub
78fae8a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 792,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025252525252525255,
"grad_norm": 48.258295718819795,
"learning_rate": 1.2500000000000002e-07,
"loss": 1.9275,
"step": 1
},
{
"epoch": 0.005050505050505051,
"grad_norm": 39.460355933403136,
"learning_rate": 2.5000000000000004e-07,
"loss": 1.7561,
"step": 2
},
{
"epoch": 0.007575757575757576,
"grad_norm": 22.813327001920275,
"learning_rate": 3.75e-07,
"loss": 1.6095,
"step": 3
},
{
"epoch": 0.010101010101010102,
"grad_norm": 9.567786533547153,
"learning_rate": 5.000000000000001e-07,
"loss": 1.05,
"step": 4
},
{
"epoch": 0.012626262626262626,
"grad_norm": 11.057324080295228,
"learning_rate": 6.25e-07,
"loss": 1.0978,
"step": 5
},
{
"epoch": 0.015151515151515152,
"grad_norm": 10.813000642689964,
"learning_rate": 7.5e-07,
"loss": 1.1162,
"step": 6
},
{
"epoch": 0.017676767676767676,
"grad_norm": 14.29576325416322,
"learning_rate": 8.75e-07,
"loss": 1.453,
"step": 7
},
{
"epoch": 0.020202020202020204,
"grad_norm": 19.510849637766164,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.8516,
"step": 8
},
{
"epoch": 0.022727272727272728,
"grad_norm": 10.447146215690047,
"learning_rate": 1.125e-06,
"loss": 1.0555,
"step": 9
},
{
"epoch": 0.025252525252525252,
"grad_norm": 14.198165798946205,
"learning_rate": 1.25e-06,
"loss": 1.2898,
"step": 10
},
{
"epoch": 0.027777777777777776,
"grad_norm": 6.97099363867629,
"learning_rate": 1.3750000000000002e-06,
"loss": 1.1732,
"step": 11
},
{
"epoch": 0.030303030303030304,
"grad_norm": 16.436212851143544,
"learning_rate": 1.5e-06,
"loss": 1.2949,
"step": 12
},
{
"epoch": 0.03282828282828283,
"grad_norm": 5.462631864808542,
"learning_rate": 1.6250000000000001e-06,
"loss": 1.1545,
"step": 13
},
{
"epoch": 0.03535353535353535,
"grad_norm": 5.426645269399041,
"learning_rate": 1.75e-06,
"loss": 1.2389,
"step": 14
},
{
"epoch": 0.03787878787878788,
"grad_norm": 4.242657208123991,
"learning_rate": 1.8750000000000003e-06,
"loss": 1.1057,
"step": 15
},
{
"epoch": 0.04040404040404041,
"grad_norm": 3.830542824376302,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.9908,
"step": 16
},
{
"epoch": 0.04292929292929293,
"grad_norm": 10.554414354973694,
"learning_rate": 2.125e-06,
"loss": 1.607,
"step": 17
},
{
"epoch": 0.045454545454545456,
"grad_norm": 4.11242093919555,
"learning_rate": 2.25e-06,
"loss": 1.1206,
"step": 18
},
{
"epoch": 0.047979797979797977,
"grad_norm": 4.770147856118096,
"learning_rate": 2.375e-06,
"loss": 1.1279,
"step": 19
},
{
"epoch": 0.050505050505050504,
"grad_norm": 4.967319930046011,
"learning_rate": 2.5e-06,
"loss": 1.0411,
"step": 20
},
{
"epoch": 0.05303030303030303,
"grad_norm": 4.750682784525671,
"learning_rate": 2.6250000000000003e-06,
"loss": 1.0519,
"step": 21
},
{
"epoch": 0.05555555555555555,
"grad_norm": 9.126977208625986,
"learning_rate": 2.7500000000000004e-06,
"loss": 1.5272,
"step": 22
},
{
"epoch": 0.05808080808080808,
"grad_norm": 4.36519677416668,
"learning_rate": 2.875e-06,
"loss": 1.015,
"step": 23
},
{
"epoch": 0.06060606060606061,
"grad_norm": 3.897905574678489,
"learning_rate": 3e-06,
"loss": 1.0442,
"step": 24
},
{
"epoch": 0.06313131313131314,
"grad_norm": 3.329345295636704,
"learning_rate": 3.125e-06,
"loss": 1.0389,
"step": 25
},
{
"epoch": 0.06565656565656566,
"grad_norm": 2.7401444766221923,
"learning_rate": 3.2500000000000002e-06,
"loss": 1.0322,
"step": 26
},
{
"epoch": 0.06818181818181818,
"grad_norm": 5.404089478093299,
"learning_rate": 3.3750000000000003e-06,
"loss": 1.3973,
"step": 27
},
{
"epoch": 0.0707070707070707,
"grad_norm": 6.283799144295399,
"learning_rate": 3.5e-06,
"loss": 1.4233,
"step": 28
},
{
"epoch": 0.07323232323232323,
"grad_norm": 8.400353509063391,
"learning_rate": 3.625e-06,
"loss": 1.2831,
"step": 29
},
{
"epoch": 0.07575757575757576,
"grad_norm": 4.140515928616004,
"learning_rate": 3.7500000000000005e-06,
"loss": 1.1687,
"step": 30
},
{
"epoch": 0.07828282828282829,
"grad_norm": 3.182920202207571,
"learning_rate": 3.875e-06,
"loss": 1.0067,
"step": 31
},
{
"epoch": 0.08080808080808081,
"grad_norm": 3.145823942819353,
"learning_rate": 4.000000000000001e-06,
"loss": 0.9379,
"step": 32
},
{
"epoch": 0.08333333333333333,
"grad_norm": 3.361984353611541,
"learning_rate": 4.125e-06,
"loss": 1.0842,
"step": 33
},
{
"epoch": 0.08585858585858586,
"grad_norm": 2.555337152889591,
"learning_rate": 4.25e-06,
"loss": 1.0224,
"step": 34
},
{
"epoch": 0.08838383838383838,
"grad_norm": 2.1248975373902517,
"learning_rate": 4.3750000000000005e-06,
"loss": 0.9566,
"step": 35
},
{
"epoch": 0.09090909090909091,
"grad_norm": 4.077844289471019,
"learning_rate": 4.5e-06,
"loss": 1.0787,
"step": 36
},
{
"epoch": 0.09343434343434344,
"grad_norm": 2.4988162057193537,
"learning_rate": 4.625000000000001e-06,
"loss": 0.9788,
"step": 37
},
{
"epoch": 0.09595959595959595,
"grad_norm": 4.91530806428002,
"learning_rate": 4.75e-06,
"loss": 1.1895,
"step": 38
},
{
"epoch": 0.09848484848484848,
"grad_norm": 3.1370674461238295,
"learning_rate": 4.875e-06,
"loss": 1.0438,
"step": 39
},
{
"epoch": 0.10101010101010101,
"grad_norm": 6.436900873012234,
"learning_rate": 5e-06,
"loss": 1.3422,
"step": 40
},
{
"epoch": 0.10353535353535354,
"grad_norm": 3.723777854298738,
"learning_rate": 5.125e-06,
"loss": 1.1121,
"step": 41
},
{
"epoch": 0.10606060606060606,
"grad_norm": 2.326130796593331,
"learning_rate": 5.2500000000000006e-06,
"loss": 0.9572,
"step": 42
},
{
"epoch": 0.10858585858585859,
"grad_norm": 2.3056825233350957,
"learning_rate": 5.375e-06,
"loss": 0.9699,
"step": 43
},
{
"epoch": 0.1111111111111111,
"grad_norm": 2.301434699941695,
"learning_rate": 5.500000000000001e-06,
"loss": 0.9646,
"step": 44
},
{
"epoch": 0.11363636363636363,
"grad_norm": 2.3295434297445414,
"learning_rate": 5.625e-06,
"loss": 0.9376,
"step": 45
},
{
"epoch": 0.11616161616161616,
"grad_norm": 2.5050753125853684,
"learning_rate": 5.75e-06,
"loss": 0.9074,
"step": 46
},
{
"epoch": 0.11868686868686869,
"grad_norm": 2.0546516540207014,
"learning_rate": 5.8750000000000005e-06,
"loss": 0.9403,
"step": 47
},
{
"epoch": 0.12121212121212122,
"grad_norm": 2.0340809208467494,
"learning_rate": 6e-06,
"loss": 0.8159,
"step": 48
},
{
"epoch": 0.12373737373737374,
"grad_norm": 5.7278951619200305,
"learning_rate": 6.125000000000001e-06,
"loss": 0.9719,
"step": 49
},
{
"epoch": 0.12626262626262627,
"grad_norm": 2.1946953064416697,
"learning_rate": 6.25e-06,
"loss": 0.9277,
"step": 50
},
{
"epoch": 0.12878787878787878,
"grad_norm": 2.2697506248412886,
"learning_rate": 6.375e-06,
"loss": 0.9974,
"step": 51
},
{
"epoch": 0.13131313131313133,
"grad_norm": 2.055404666876581,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.9163,
"step": 52
},
{
"epoch": 0.13383838383838384,
"grad_norm": 3.833951924691502,
"learning_rate": 6.625e-06,
"loss": 1.0665,
"step": 53
},
{
"epoch": 0.13636363636363635,
"grad_norm": 2.0859633598583613,
"learning_rate": 6.750000000000001e-06,
"loss": 0.9686,
"step": 54
},
{
"epoch": 0.1388888888888889,
"grad_norm": 2.2814189437359973,
"learning_rate": 6.875e-06,
"loss": 0.92,
"step": 55
},
{
"epoch": 0.1414141414141414,
"grad_norm": 2.2509926193401437,
"learning_rate": 7e-06,
"loss": 1.0521,
"step": 56
},
{
"epoch": 0.14393939393939395,
"grad_norm": 1.9377141332945118,
"learning_rate": 7.125e-06,
"loss": 0.9191,
"step": 57
},
{
"epoch": 0.14646464646464646,
"grad_norm": 2.038161614478563,
"learning_rate": 7.25e-06,
"loss": 0.9109,
"step": 58
},
{
"epoch": 0.14898989898989898,
"grad_norm": 2.0952250926433833,
"learning_rate": 7.375000000000001e-06,
"loss": 0.8545,
"step": 59
},
{
"epoch": 0.15151515151515152,
"grad_norm": 2.3799049573624873,
"learning_rate": 7.500000000000001e-06,
"loss": 0.9854,
"step": 60
},
{
"epoch": 0.15404040404040403,
"grad_norm": 2.182205217955518,
"learning_rate": 7.625e-06,
"loss": 0.9525,
"step": 61
},
{
"epoch": 0.15656565656565657,
"grad_norm": 1.8042397573232205,
"learning_rate": 7.75e-06,
"loss": 0.7935,
"step": 62
},
{
"epoch": 0.1590909090909091,
"grad_norm": 2.227503618112689,
"learning_rate": 7.875e-06,
"loss": 0.958,
"step": 63
},
{
"epoch": 0.16161616161616163,
"grad_norm": 2.0235412226406453,
"learning_rate": 8.000000000000001e-06,
"loss": 0.8736,
"step": 64
},
{
"epoch": 0.16414141414141414,
"grad_norm": 2.22174067906374,
"learning_rate": 8.125000000000001e-06,
"loss": 1.0448,
"step": 65
},
{
"epoch": 0.16666666666666666,
"grad_norm": 16.13578957413344,
"learning_rate": 8.25e-06,
"loss": 1.3686,
"step": 66
},
{
"epoch": 0.1691919191919192,
"grad_norm": 2.240976064274845,
"learning_rate": 8.375e-06,
"loss": 1.0019,
"step": 67
},
{
"epoch": 0.1717171717171717,
"grad_norm": 2.701797726150564,
"learning_rate": 8.5e-06,
"loss": 1.0509,
"step": 68
},
{
"epoch": 0.17424242424242425,
"grad_norm": 2.163864338752041,
"learning_rate": 8.625000000000001e-06,
"loss": 0.8844,
"step": 69
},
{
"epoch": 0.17676767676767677,
"grad_norm": 1.8472579199669308,
"learning_rate": 8.750000000000001e-06,
"loss": 0.7104,
"step": 70
},
{
"epoch": 0.17929292929292928,
"grad_norm": 1.9451160626436041,
"learning_rate": 8.875e-06,
"loss": 0.8657,
"step": 71
},
{
"epoch": 0.18181818181818182,
"grad_norm": 2.479384644097427,
"learning_rate": 9e-06,
"loss": 1.0244,
"step": 72
},
{
"epoch": 0.18434343434343434,
"grad_norm": 2.3519375039595696,
"learning_rate": 9.125e-06,
"loss": 0.865,
"step": 73
},
{
"epoch": 0.18686868686868688,
"grad_norm": 3.64891542713101,
"learning_rate": 9.250000000000001e-06,
"loss": 1.2406,
"step": 74
},
{
"epoch": 0.1893939393939394,
"grad_norm": 2.1786264750334596,
"learning_rate": 9.375000000000001e-06,
"loss": 1.0481,
"step": 75
},
{
"epoch": 0.1919191919191919,
"grad_norm": 2.398938318191815,
"learning_rate": 9.5e-06,
"loss": 0.9315,
"step": 76
},
{
"epoch": 0.19444444444444445,
"grad_norm": 2.2908776549091043,
"learning_rate": 9.625e-06,
"loss": 0.9697,
"step": 77
},
{
"epoch": 0.19696969696969696,
"grad_norm": 4.637103885430685,
"learning_rate": 9.75e-06,
"loss": 0.9194,
"step": 78
},
{
"epoch": 0.1994949494949495,
"grad_norm": 2.3408650014502475,
"learning_rate": 9.875000000000001e-06,
"loss": 0.8787,
"step": 79
},
{
"epoch": 0.20202020202020202,
"grad_norm": 2.2192549284497334,
"learning_rate": 1e-05,
"loss": 1.0223,
"step": 80
},
{
"epoch": 0.20454545454545456,
"grad_norm": 2.34769771403462,
"learning_rate": 9.999951328014591e-06,
"loss": 0.9507,
"step": 81
},
{
"epoch": 0.20707070707070707,
"grad_norm": 2.0514445749453665,
"learning_rate": 9.999805313005946e-06,
"loss": 0.8148,
"step": 82
},
{
"epoch": 0.20959595959595959,
"grad_norm": 2.0024302071132483,
"learning_rate": 9.999561957816803e-06,
"loss": 0.9134,
"step": 83
},
{
"epoch": 0.21212121212121213,
"grad_norm": 2.165975206049464,
"learning_rate": 9.999221267184993e-06,
"loss": 0.8343,
"step": 84
},
{
"epoch": 0.21464646464646464,
"grad_norm": 2.085675902518532,
"learning_rate": 9.998783247743353e-06,
"loss": 0.9375,
"step": 85
},
{
"epoch": 0.21717171717171718,
"grad_norm": 2.2746707063658715,
"learning_rate": 9.998247908019594e-06,
"loss": 0.9274,
"step": 86
},
{
"epoch": 0.2196969696969697,
"grad_norm": 2.441957874747477,
"learning_rate": 9.99761525843613e-06,
"loss": 0.9075,
"step": 87
},
{
"epoch": 0.2222222222222222,
"grad_norm": 2.1436625663221194,
"learning_rate": 9.996885311309892e-06,
"loss": 0.8348,
"step": 88
},
{
"epoch": 0.22474747474747475,
"grad_norm": 2.4435892720895636,
"learning_rate": 9.996058080852067e-06,
"loss": 0.9895,
"step": 89
},
{
"epoch": 0.22727272727272727,
"grad_norm": 2.1086208840306027,
"learning_rate": 9.995133583167833e-06,
"loss": 0.9303,
"step": 90
},
{
"epoch": 0.2297979797979798,
"grad_norm": 2.1137431675963874,
"learning_rate": 9.994111836256049e-06,
"loss": 0.8749,
"step": 91
},
{
"epoch": 0.23232323232323232,
"grad_norm": 2.108061948140714,
"learning_rate": 9.992992860008893e-06,
"loss": 0.8981,
"step": 92
},
{
"epoch": 0.23484848484848486,
"grad_norm": 2.21561668526365,
"learning_rate": 9.991776676211483e-06,
"loss": 0.8946,
"step": 93
},
{
"epoch": 0.23737373737373738,
"grad_norm": 2.5154195166967344,
"learning_rate": 9.990463308541452e-06,
"loss": 0.9656,
"step": 94
},
{
"epoch": 0.2398989898989899,
"grad_norm": 2.4487280197393098,
"learning_rate": 9.989052782568484e-06,
"loss": 0.9718,
"step": 95
},
{
"epoch": 0.24242424242424243,
"grad_norm": 4.72297226748207,
"learning_rate": 9.987545125753818e-06,
"loss": 0.947,
"step": 96
},
{
"epoch": 0.24494949494949494,
"grad_norm": 2.158938229932845,
"learning_rate": 9.98594036744972e-06,
"loss": 0.8702,
"step": 97
},
{
"epoch": 0.2474747474747475,
"grad_norm": 2.047332726976967,
"learning_rate": 9.98423853889889e-06,
"loss": 0.8419,
"step": 98
},
{
"epoch": 0.25,
"grad_norm": 2.21925272607747,
"learning_rate": 9.982439673233885e-06,
"loss": 0.9074,
"step": 99
},
{
"epoch": 0.25252525252525254,
"grad_norm": 2.0534988916807335,
"learning_rate": 9.980543805476447e-06,
"loss": 0.9016,
"step": 100
},
{
"epoch": 0.255050505050505,
"grad_norm": 2.298269649345548,
"learning_rate": 9.978550972536834e-06,
"loss": 0.9077,
"step": 101
},
{
"epoch": 0.25757575757575757,
"grad_norm": 2.1666708895525715,
"learning_rate": 9.976461213213104e-06,
"loss": 0.936,
"step": 102
},
{
"epoch": 0.2601010101010101,
"grad_norm": 4.071904479587891,
"learning_rate": 9.974274568190349e-06,
"loss": 1.2813,
"step": 103
},
{
"epoch": 0.26262626262626265,
"grad_norm": 2.481557447519078,
"learning_rate": 9.971991080039912e-06,
"loss": 0.9918,
"step": 104
},
{
"epoch": 0.26515151515151514,
"grad_norm": 2.524820106383998,
"learning_rate": 9.96961079321855e-06,
"loss": 0.8811,
"step": 105
},
{
"epoch": 0.2676767676767677,
"grad_norm": 2.3458130025693222,
"learning_rate": 9.967133754067581e-06,
"loss": 0.8702,
"step": 106
},
{
"epoch": 0.2702020202020202,
"grad_norm": 2.362821949245319,
"learning_rate": 9.964560010811972e-06,
"loss": 0.8302,
"step": 107
},
{
"epoch": 0.2727272727272727,
"grad_norm": 1.8962200119601145,
"learning_rate": 9.961889613559396e-06,
"loss": 0.8755,
"step": 108
},
{
"epoch": 0.27525252525252525,
"grad_norm": 7.021936677812913,
"learning_rate": 9.95912261429927e-06,
"loss": 1.1003,
"step": 109
},
{
"epoch": 0.2777777777777778,
"grad_norm": 2.9357956476104574,
"learning_rate": 9.956259066901733e-06,
"loss": 0.8917,
"step": 110
},
{
"epoch": 0.2803030303030303,
"grad_norm": 5.604682172826816,
"learning_rate": 9.953299027116598e-06,
"loss": 1.1377,
"step": 111
},
{
"epoch": 0.2828282828282828,
"grad_norm": 2.505273663365931,
"learning_rate": 9.950242552572272e-06,
"loss": 0.8608,
"step": 112
},
{
"epoch": 0.28535353535353536,
"grad_norm": 2.318819448873007,
"learning_rate": 9.94708970277463e-06,
"loss": 0.7865,
"step": 113
},
{
"epoch": 0.2878787878787879,
"grad_norm": 2.2972429898836477,
"learning_rate": 9.943840539105853e-06,
"loss": 0.9088,
"step": 114
},
{
"epoch": 0.2904040404040404,
"grad_norm": 2.09343514786706,
"learning_rate": 9.940495124823241e-06,
"loss": 0.8695,
"step": 115
},
{
"epoch": 0.29292929292929293,
"grad_norm": 4.820925467370784,
"learning_rate": 9.937053525057977e-06,
"loss": 0.8001,
"step": 116
},
{
"epoch": 0.29545454545454547,
"grad_norm": 2.2270904713504627,
"learning_rate": 9.933515806813856e-06,
"loss": 0.9747,
"step": 117
},
{
"epoch": 0.29797979797979796,
"grad_norm": 2.0431448105301975,
"learning_rate": 9.92988203896599e-06,
"loss": 0.8518,
"step": 118
},
{
"epoch": 0.3005050505050505,
"grad_norm": 2.3907549205532352,
"learning_rate": 9.926152292259452e-06,
"loss": 0.9813,
"step": 119
},
{
"epoch": 0.30303030303030304,
"grad_norm": 2.0513095228592046,
"learning_rate": 9.922326639307918e-06,
"loss": 0.825,
"step": 120
},
{
"epoch": 0.3055555555555556,
"grad_norm": 2.004251263140837,
"learning_rate": 9.918405154592234e-06,
"loss": 0.7969,
"step": 121
},
{
"epoch": 0.30808080808080807,
"grad_norm": 2.547574729864397,
"learning_rate": 9.914387914458983e-06,
"loss": 0.8588,
"step": 122
},
{
"epoch": 0.3106060606060606,
"grad_norm": 2.275937959399851,
"learning_rate": 9.910274997118982e-06,
"loss": 0.8863,
"step": 123
},
{
"epoch": 0.31313131313131315,
"grad_norm": 1.9701811000702276,
"learning_rate": 9.906066482645774e-06,
"loss": 0.8108,
"step": 124
},
{
"epoch": 0.31565656565656564,
"grad_norm": 2.360990301413611,
"learning_rate": 9.90176245297406e-06,
"loss": 1.0052,
"step": 125
},
{
"epoch": 0.3181818181818182,
"grad_norm": 2.0617164801189727,
"learning_rate": 9.89736299189811e-06,
"loss": 0.947,
"step": 126
},
{
"epoch": 0.3207070707070707,
"grad_norm": 1.9841062952431117,
"learning_rate": 9.892868185070125e-06,
"loss": 0.8012,
"step": 127
},
{
"epoch": 0.32323232323232326,
"grad_norm": 2.324502782683919,
"learning_rate": 9.888278119998573e-06,
"loss": 0.8857,
"step": 128
},
{
"epoch": 0.32575757575757575,
"grad_norm": 1.884726123578641,
"learning_rate": 9.883592886046486e-06,
"loss": 0.8568,
"step": 129
},
{
"epoch": 0.3282828282828283,
"grad_norm": 2.0372622107562073,
"learning_rate": 9.878812574429722e-06,
"loss": 0.8853,
"step": 130
},
{
"epoch": 0.33080808080808083,
"grad_norm": 3.555056823505995,
"learning_rate": 9.873937278215181e-06,
"loss": 1.0668,
"step": 131
},
{
"epoch": 0.3333333333333333,
"grad_norm": 2.334110729476418,
"learning_rate": 9.868967092319003e-06,
"loss": 0.9339,
"step": 132
},
{
"epoch": 0.33585858585858586,
"grad_norm": 2.630108567168407,
"learning_rate": 9.863902113504713e-06,
"loss": 0.9012,
"step": 133
},
{
"epoch": 0.3383838383838384,
"grad_norm": 2.074825793064814,
"learning_rate": 9.858742440381343e-06,
"loss": 0.8642,
"step": 134
},
{
"epoch": 0.3409090909090909,
"grad_norm": 2.1705788123415877,
"learning_rate": 9.853488173401504e-06,
"loss": 0.8938,
"step": 135
},
{
"epoch": 0.3434343434343434,
"grad_norm": 2.0225980775293007,
"learning_rate": 9.848139414859441e-06,
"loss": 0.9019,
"step": 136
},
{
"epoch": 0.34595959595959597,
"grad_norm": 1.9632976484068687,
"learning_rate": 9.842696268889032e-06,
"loss": 0.7959,
"step": 137
},
{
"epoch": 0.3484848484848485,
"grad_norm": 2.076276682181062,
"learning_rate": 9.837158841461767e-06,
"loss": 0.857,
"step": 138
},
{
"epoch": 0.351010101010101,
"grad_norm": 1.910582730713414,
"learning_rate": 9.831527240384677e-06,
"loss": 0.8798,
"step": 139
},
{
"epoch": 0.35353535353535354,
"grad_norm": 2.921578731334097,
"learning_rate": 9.825801575298248e-06,
"loss": 0.7729,
"step": 140
},
{
"epoch": 0.3560606060606061,
"grad_norm": 2.117700092448324,
"learning_rate": 9.819981957674273e-06,
"loss": 0.8864,
"step": 141
},
{
"epoch": 0.35858585858585856,
"grad_norm": 2.0603912298419944,
"learning_rate": 9.814068500813692e-06,
"loss": 0.8729,
"step": 142
},
{
"epoch": 0.3611111111111111,
"grad_norm": 1.9790368784870478,
"learning_rate": 9.808061319844376e-06,
"loss": 0.8935,
"step": 143
},
{
"epoch": 0.36363636363636365,
"grad_norm": 1.9900951619374274,
"learning_rate": 9.801960531718898e-06,
"loss": 0.9169,
"step": 144
},
{
"epoch": 0.3661616161616162,
"grad_norm": 3.0505045248435536,
"learning_rate": 9.795766255212242e-06,
"loss": 0.777,
"step": 145
},
{
"epoch": 0.3686868686868687,
"grad_norm": 2.1044708983765887,
"learning_rate": 9.789478610919508e-06,
"loss": 0.8016,
"step": 146
},
{
"epoch": 0.3712121212121212,
"grad_norm": 2.307506347894947,
"learning_rate": 9.783097721253543e-06,
"loss": 0.904,
"step": 147
},
{
"epoch": 0.37373737373737376,
"grad_norm": 2.6700914211793068,
"learning_rate": 9.77662371044258e-06,
"loss": 0.814,
"step": 148
},
{
"epoch": 0.37626262626262624,
"grad_norm": 2.128113125559303,
"learning_rate": 9.770056704527797e-06,
"loss": 0.9349,
"step": 149
},
{
"epoch": 0.3787878787878788,
"grad_norm": 2.1648353809285403,
"learning_rate": 9.763396831360884e-06,
"loss": 0.898,
"step": 150
},
{
"epoch": 0.3813131313131313,
"grad_norm": 1.863562810265247,
"learning_rate": 9.756644220601541e-06,
"loss": 0.8154,
"step": 151
},
{
"epoch": 0.3838383838383838,
"grad_norm": 2.08267241663061,
"learning_rate": 9.749799003714954e-06,
"loss": 0.7789,
"step": 152
},
{
"epoch": 0.38636363636363635,
"grad_norm": 1.966986346341327,
"learning_rate": 9.742861313969246e-06,
"loss": 0.9254,
"step": 153
},
{
"epoch": 0.3888888888888889,
"grad_norm": 1.913415873237819,
"learning_rate": 9.735831286432869e-06,
"loss": 0.8697,
"step": 154
},
{
"epoch": 0.39141414141414144,
"grad_norm": 2.2248959778434294,
"learning_rate": 9.728709057971979e-06,
"loss": 0.9404,
"step": 155
},
{
"epoch": 0.3939393939393939,
"grad_norm": 1.8892163868156095,
"learning_rate": 9.721494767247779e-06,
"loss": 0.7971,
"step": 156
},
{
"epoch": 0.39646464646464646,
"grad_norm": 1.9995605934613117,
"learning_rate": 9.71418855471381e-06,
"loss": 0.8623,
"step": 157
},
{
"epoch": 0.398989898989899,
"grad_norm": 2.3873184975662203,
"learning_rate": 9.70679056261322e-06,
"loss": 0.9474,
"step": 158
},
{
"epoch": 0.4015151515151515,
"grad_norm": 2.0256435874976653,
"learning_rate": 9.699300934975993e-06,
"loss": 0.8555,
"step": 159
},
{
"epoch": 0.40404040404040403,
"grad_norm": 1.9071055516166688,
"learning_rate": 9.691719817616148e-06,
"loss": 0.8023,
"step": 160
},
{
"epoch": 0.4065656565656566,
"grad_norm": 3.6315046996199487,
"learning_rate": 9.6840473581289e-06,
"loss": 0.9962,
"step": 161
},
{
"epoch": 0.4090909090909091,
"grad_norm": 2.346167698527808,
"learning_rate": 9.676283705887783e-06,
"loss": 0.9006,
"step": 162
},
{
"epoch": 0.4116161616161616,
"grad_norm": 2.252415198524924,
"learning_rate": 9.668429012041742e-06,
"loss": 0.8686,
"step": 163
},
{
"epoch": 0.41414141414141414,
"grad_norm": 2.1712268580080543,
"learning_rate": 9.660483429512198e-06,
"loss": 0.8489,
"step": 164
},
{
"epoch": 0.4166666666666667,
"grad_norm": 2.400369253772225,
"learning_rate": 9.652447112990063e-06,
"loss": 0.9247,
"step": 165
},
{
"epoch": 0.41919191919191917,
"grad_norm": 1.9650591473408472,
"learning_rate": 9.644320218932723e-06,
"loss": 0.8414,
"step": 166
},
{
"epoch": 0.4217171717171717,
"grad_norm": 1.9882079036928393,
"learning_rate": 9.63610290556101e-06,
"loss": 0.8462,
"step": 167
},
{
"epoch": 0.42424242424242425,
"grad_norm": 1.9259611746962464,
"learning_rate": 9.627795332856107e-06,
"loss": 0.8237,
"step": 168
},
{
"epoch": 0.42676767676767674,
"grad_norm": 2.3668575403197383,
"learning_rate": 9.619397662556434e-06,
"loss": 1.0015,
"step": 169
},
{
"epoch": 0.4292929292929293,
"grad_norm": 2.4121091881310806,
"learning_rate": 9.61091005815451e-06,
"loss": 0.9617,
"step": 170
},
{
"epoch": 0.4318181818181818,
"grad_norm": 1.971563673812737,
"learning_rate": 9.602332684893754e-06,
"loss": 0.9116,
"step": 171
},
{
"epoch": 0.43434343434343436,
"grad_norm": 2.1760731194477936,
"learning_rate": 9.59366570976528e-06,
"loss": 0.8669,
"step": 172
},
{
"epoch": 0.43686868686868685,
"grad_norm": 2.0878750762773963,
"learning_rate": 9.584909301504649e-06,
"loss": 0.8803,
"step": 173
},
{
"epoch": 0.4393939393939394,
"grad_norm": 2.0502565788866622,
"learning_rate": 9.576063630588563e-06,
"loss": 0.8644,
"step": 174
},
{
"epoch": 0.44191919191919193,
"grad_norm": 2.0532371213874683,
"learning_rate": 9.567128869231575e-06,
"loss": 0.9064,
"step": 175
},
{
"epoch": 0.4444444444444444,
"grad_norm": 1.7359123512274788,
"learning_rate": 9.55810519138271e-06,
"loss": 0.7793,
"step": 176
},
{
"epoch": 0.44696969696969696,
"grad_norm": 2.0649902639480855,
"learning_rate": 9.548992772722097e-06,
"loss": 0.833,
"step": 177
},
{
"epoch": 0.4494949494949495,
"grad_norm": 2.0851927153389656,
"learning_rate": 9.53979179065754e-06,
"loss": 0.8882,
"step": 178
},
{
"epoch": 0.45202020202020204,
"grad_norm": 1.961154958147503,
"learning_rate": 9.530502424321062e-06,
"loss": 0.9064,
"step": 179
},
{
"epoch": 0.45454545454545453,
"grad_norm": 2.0859686773789936,
"learning_rate": 9.521124854565425e-06,
"loss": 0.8994,
"step": 180
},
{
"epoch": 0.45707070707070707,
"grad_norm": 2.003612926682745,
"learning_rate": 9.511659263960607e-06,
"loss": 0.8398,
"step": 181
},
{
"epoch": 0.4595959595959596,
"grad_norm": 1.7738515074791146,
"learning_rate": 9.50210583679024e-06,
"loss": 0.7957,
"step": 182
},
{
"epoch": 0.4621212121212121,
"grad_norm": 7.003506091645396,
"learning_rate": 9.492464759048033e-06,
"loss": 0.962,
"step": 183
},
{
"epoch": 0.46464646464646464,
"grad_norm": 1.8974156634529178,
"learning_rate": 9.482736218434144e-06,
"loss": 0.831,
"step": 184
},
{
"epoch": 0.4671717171717172,
"grad_norm": 2.225050134319501,
"learning_rate": 9.472920404351527e-06,
"loss": 0.91,
"step": 185
},
{
"epoch": 0.4696969696969697,
"grad_norm": 1.9983951157501365,
"learning_rate": 9.463017507902245e-06,
"loss": 0.8052,
"step": 186
},
{
"epoch": 0.4722222222222222,
"grad_norm": 1.8967122077813638,
"learning_rate": 9.453027721883751e-06,
"loss": 0.8748,
"step": 187
},
{
"epoch": 0.47474747474747475,
"grad_norm": 1.7957537461438238,
"learning_rate": 9.442951240785135e-06,
"loss": 0.8899,
"step": 188
},
{
"epoch": 0.4772727272727273,
"grad_norm": 1.8658746115672646,
"learning_rate": 9.432788260783333e-06,
"loss": 0.9029,
"step": 189
},
{
"epoch": 0.4797979797979798,
"grad_norm": 2.146859976441394,
"learning_rate": 9.422538979739307e-06,
"loss": 0.9431,
"step": 190
},
{
"epoch": 0.4823232323232323,
"grad_norm": 3.0014959843539297,
"learning_rate": 9.412203597194204e-06,
"loss": 0.9506,
"step": 191
},
{
"epoch": 0.48484848484848486,
"grad_norm": 1.9778497170928888,
"learning_rate": 9.401782314365458e-06,
"loss": 0.8288,
"step": 192
},
{
"epoch": 0.48737373737373735,
"grad_norm": 2.1367446852069416,
"learning_rate": 9.391275334142879e-06,
"loss": 0.8497,
"step": 193
},
{
"epoch": 0.4898989898989899,
"grad_norm": 2.2408999898099835,
"learning_rate": 9.380682861084703e-06,
"loss": 0.9065,
"step": 194
},
{
"epoch": 0.49242424242424243,
"grad_norm": 1.9859991078515866,
"learning_rate": 9.370005101413605e-06,
"loss": 0.8958,
"step": 195
},
{
"epoch": 0.494949494949495,
"grad_norm": 2.2539945653467193,
"learning_rate": 9.359242263012693e-06,
"loss": 0.9729,
"step": 196
},
{
"epoch": 0.49747474747474746,
"grad_norm": 1.9895090623859781,
"learning_rate": 9.348394555421454e-06,
"loss": 0.8944,
"step": 197
},
{
"epoch": 0.5,
"grad_norm": 1.6359618626667927,
"learning_rate": 9.33746218983167e-06,
"loss": 0.8089,
"step": 198
},
{
"epoch": 0.5025252525252525,
"grad_norm": 4.006494067652519,
"learning_rate": 9.32644537908332e-06,
"loss": 0.8762,
"step": 199
},
{
"epoch": 0.5050505050505051,
"grad_norm": 2.1878874894107696,
"learning_rate": 9.315344337660422e-06,
"loss": 0.914,
"step": 200
},
{
"epoch": 0.5075757575757576,
"grad_norm": 1.893335207175721,
"learning_rate": 9.304159281686867e-06,
"loss": 0.8459,
"step": 201
},
{
"epoch": 0.51010101010101,
"grad_norm": 2.1510241730461197,
"learning_rate": 9.29289042892221e-06,
"loss": 0.8279,
"step": 202
},
{
"epoch": 0.5126262626262627,
"grad_norm": 2.20068943496204,
"learning_rate": 9.281537998757421e-06,
"loss": 0.9085,
"step": 203
},
{
"epoch": 0.5151515151515151,
"grad_norm": 2.0306871317689694,
"learning_rate": 9.270102212210632e-06,
"loss": 0.877,
"step": 204
},
{
"epoch": 0.5176767676767676,
"grad_norm": 1.9346394998991387,
"learning_rate": 9.258583291922814e-06,
"loss": 0.8374,
"step": 205
},
{
"epoch": 0.5202020202020202,
"grad_norm": 2.133265797124652,
"learning_rate": 9.246981462153456e-06,
"loss": 0.9053,
"step": 206
},
{
"epoch": 0.5227272727272727,
"grad_norm": 3.983920120081825,
"learning_rate": 9.235296948776194e-06,
"loss": 1.1488,
"step": 207
},
{
"epoch": 0.5252525252525253,
"grad_norm": 1.7291092482153385,
"learning_rate": 9.223529979274411e-06,
"loss": 0.7214,
"step": 208
},
{
"epoch": 0.5277777777777778,
"grad_norm": 1.884323131029345,
"learning_rate": 9.211680782736818e-06,
"loss": 0.8794,
"step": 209
},
{
"epoch": 0.5303030303030303,
"grad_norm": 1.843020726005339,
"learning_rate": 9.19974958985298e-06,
"loss": 0.8302,
"step": 210
},
{
"epoch": 0.5328282828282829,
"grad_norm": 2.2050212625541326,
"learning_rate": 9.18773663290884e-06,
"loss": 0.9446,
"step": 211
},
{
"epoch": 0.5353535353535354,
"grad_norm": 1.9986214790446168,
"learning_rate": 9.175642145782179e-06,
"loss": 0.9071,
"step": 212
},
{
"epoch": 0.5378787878787878,
"grad_norm": 2.0103448807107616,
"learning_rate": 9.16346636393808e-06,
"loss": 0.8628,
"step": 213
},
{
"epoch": 0.5404040404040404,
"grad_norm": 2.139518942316662,
"learning_rate": 9.151209524424333e-06,
"loss": 0.8637,
"step": 214
},
{
"epoch": 0.5429292929292929,
"grad_norm": 2.2418959229034976,
"learning_rate": 9.138871865866824e-06,
"loss": 0.8706,
"step": 215
},
{
"epoch": 0.5454545454545454,
"grad_norm": 1.773597029759882,
"learning_rate": 9.126453628464889e-06,
"loss": 0.7889,
"step": 216
},
{
"epoch": 0.547979797979798,
"grad_norm": 1.8729030157765316,
"learning_rate": 9.113955053986632e-06,
"loss": 0.8058,
"step": 217
},
{
"epoch": 0.5505050505050505,
"grad_norm": 1.9444764336843339,
"learning_rate": 9.10137638576423e-06,
"loss": 0.8663,
"step": 218
},
{
"epoch": 0.553030303030303,
"grad_norm": 2.0434549326382503,
"learning_rate": 9.088717868689186e-06,
"loss": 0.8584,
"step": 219
},
{
"epoch": 0.5555555555555556,
"grad_norm": 2.0558840850064732,
"learning_rate": 9.07597974920756e-06,
"loss": 0.851,
"step": 220
},
{
"epoch": 0.5580808080808081,
"grad_norm": 2.3103795320233895,
"learning_rate": 9.063162275315182e-06,
"loss": 0.8978,
"step": 221
},
{
"epoch": 0.5606060606060606,
"grad_norm": 2.2882825680122902,
"learning_rate": 9.05026569655281e-06,
"loss": 0.8543,
"step": 222
},
{
"epoch": 0.5631313131313131,
"grad_norm": 1.858627081051704,
"learning_rate": 9.037290264001286e-06,
"loss": 0.761,
"step": 223
},
{
"epoch": 0.5656565656565656,
"grad_norm": 1.7723152331238994,
"learning_rate": 9.02423623027663e-06,
"loss": 0.7985,
"step": 224
},
{
"epoch": 0.5681818181818182,
"grad_norm": 2.3287920071686563,
"learning_rate": 9.011103849525139e-06,
"loss": 0.767,
"step": 225
},
{
"epoch": 0.5707070707070707,
"grad_norm": 2.1100618612768325,
"learning_rate": 8.997893377418432e-06,
"loss": 0.8963,
"step": 226
},
{
"epoch": 0.5732323232323232,
"grad_norm": 1.9141391941126942,
"learning_rate": 8.984605071148471e-06,
"loss": 0.9046,
"step": 227
},
{
"epoch": 0.5757575757575758,
"grad_norm": 1.945820371963421,
"learning_rate": 8.971239189422555e-06,
"loss": 0.9058,
"step": 228
},
{
"epoch": 0.5782828282828283,
"grad_norm": 1.8884920614522993,
"learning_rate": 8.957795992458285e-06,
"loss": 0.8176,
"step": 229
},
{
"epoch": 0.5808080808080808,
"grad_norm": 2.0068445281526897,
"learning_rate": 8.944275741978495e-06,
"loss": 0.896,
"step": 230
},
{
"epoch": 0.5833333333333334,
"grad_norm": 2.0849232104116457,
"learning_rate": 8.93067870120616e-06,
"loss": 0.8404,
"step": 231
},
{
"epoch": 0.5858585858585859,
"grad_norm": 1.9736679573902556,
"learning_rate": 8.917005134859263e-06,
"loss": 0.872,
"step": 232
},
{
"epoch": 0.5883838383838383,
"grad_norm": 1.8961261028927838,
"learning_rate": 8.90325530914566e-06,
"loss": 0.9361,
"step": 233
},
{
"epoch": 0.5909090909090909,
"grad_norm": 1.8401285397782126,
"learning_rate": 8.889429491757872e-06,
"loss": 0.7926,
"step": 234
},
{
"epoch": 0.5934343434343434,
"grad_norm": 2.054766421059747,
"learning_rate": 8.875527951867895e-06,
"loss": 0.8966,
"step": 235
},
{
"epoch": 0.5959595959595959,
"grad_norm": 1.8886359562660195,
"learning_rate": 8.861550960121946e-06,
"loss": 0.8905,
"step": 236
},
{
"epoch": 0.5984848484848485,
"grad_norm": 1.937935720120698,
"learning_rate": 8.8474987886352e-06,
"loss": 0.834,
"step": 237
},
{
"epoch": 0.601010101010101,
"grad_norm": 1.8714368764130227,
"learning_rate": 8.833371710986493e-06,
"loss": 0.8174,
"step": 238
},
{
"epoch": 0.6035353535353535,
"grad_norm": 2.0258318422464927,
"learning_rate": 8.819170002212992e-06,
"loss": 0.7216,
"step": 239
},
{
"epoch": 0.6060606060606061,
"grad_norm": 1.8436814588534356,
"learning_rate": 8.804893938804839e-06,
"loss": 0.8658,
"step": 240
},
{
"epoch": 0.6085858585858586,
"grad_norm": 2.168928207961233,
"learning_rate": 8.790543798699778e-06,
"loss": 0.9487,
"step": 241
},
{
"epoch": 0.6111111111111112,
"grad_norm": 1.9050814720096372,
"learning_rate": 8.77611986127773e-06,
"loss": 0.8827,
"step": 242
},
{
"epoch": 0.6136363636363636,
"grad_norm": 1.8056573206534934,
"learning_rate": 8.761622407355364e-06,
"loss": 0.7706,
"step": 243
},
{
"epoch": 0.6161616161616161,
"grad_norm": 2.2836971241612494,
"learning_rate": 8.747051719180626e-06,
"loss": 0.8907,
"step": 244
},
{
"epoch": 0.6186868686868687,
"grad_norm": 1.9155514580242967,
"learning_rate": 8.732408080427247e-06,
"loss": 0.8784,
"step": 245
},
{
"epoch": 0.6212121212121212,
"grad_norm": 1.9161016051397366,
"learning_rate": 8.717691776189214e-06,
"loss": 0.8644,
"step": 246
},
{
"epoch": 0.6237373737373737,
"grad_norm": 1.8827878094486539,
"learning_rate": 8.702903092975226e-06,
"loss": 0.8698,
"step": 247
},
{
"epoch": 0.6262626262626263,
"grad_norm": 1.976346169802583,
"learning_rate": 8.688042318703111e-06,
"loss": 0.7587,
"step": 248
},
{
"epoch": 0.6287878787878788,
"grad_norm": 1.992707486790293,
"learning_rate": 8.673109742694227e-06,
"loss": 0.8267,
"step": 249
},
{
"epoch": 0.6313131313131313,
"grad_norm": 1.9031318288232517,
"learning_rate": 8.65810565566782e-06,
"loss": 0.8433,
"step": 250
},
{
"epoch": 0.6338383838383839,
"grad_norm": 2.049254205073817,
"learning_rate": 8.643030349735373e-06,
"loss": 0.8608,
"step": 251
},
{
"epoch": 0.6363636363636364,
"grad_norm": 1.9644724605966701,
"learning_rate": 8.627884118394913e-06,
"loss": 0.8718,
"step": 252
},
{
"epoch": 0.6388888888888888,
"grad_norm": 1.6682264310796693,
"learning_rate": 8.612667256525305e-06,
"loss": 0.8475,
"step": 253
},
{
"epoch": 0.6414141414141414,
"grad_norm": 2.613490037685885,
"learning_rate": 8.597380060380493e-06,
"loss": 0.8479,
"step": 254
},
{
"epoch": 0.6439393939393939,
"grad_norm": 1.7693017368034187,
"learning_rate": 8.582022827583758e-06,
"loss": 0.8064,
"step": 255
},
{
"epoch": 0.6464646464646465,
"grad_norm": 2.169053605927329,
"learning_rate": 8.566595857121902e-06,
"loss": 0.88,
"step": 256
},
{
"epoch": 0.648989898989899,
"grad_norm": 1.9003182666970035,
"learning_rate": 8.551099449339438e-06,
"loss": 0.8081,
"step": 257
},
{
"epoch": 0.6515151515151515,
"grad_norm": 1.9280203257276212,
"learning_rate": 8.535533905932739e-06,
"loss": 0.8254,
"step": 258
},
{
"epoch": 0.6540404040404041,
"grad_norm": 2.043483601822406,
"learning_rate": 8.519899529944166e-06,
"loss": 0.8136,
"step": 259
},
{
"epoch": 0.6565656565656566,
"grad_norm": 1.9525398121644078,
"learning_rate": 8.504196625756166e-06,
"loss": 0.8316,
"step": 260
},
{
"epoch": 0.6590909090909091,
"grad_norm": 1.790052985892469,
"learning_rate": 8.48842549908535e-06,
"loss": 0.8771,
"step": 261
},
{
"epoch": 0.6616161616161617,
"grad_norm": 1.7510060656489281,
"learning_rate": 8.472586456976534e-06,
"loss": 0.7637,
"step": 262
},
{
"epoch": 0.6641414141414141,
"grad_norm": 1.7803060405827564,
"learning_rate": 8.456679807796774e-06,
"loss": 0.7646,
"step": 263
},
{
"epoch": 0.6666666666666666,
"grad_norm": 1.7745786411187556,
"learning_rate": 8.440705861229344e-06,
"loss": 0.7379,
"step": 264
},
{
"epoch": 0.6691919191919192,
"grad_norm": 3.974237965994564,
"learning_rate": 8.424664928267724e-06,
"loss": 0.9469,
"step": 265
},
{
"epoch": 0.6717171717171717,
"grad_norm": 1.7194267487763608,
"learning_rate": 8.408557321209534e-06,
"loss": 0.8575,
"step": 266
},
{
"epoch": 0.6742424242424242,
"grad_norm": 2.068265812287207,
"learning_rate": 8.392383353650463e-06,
"loss": 0.8644,
"step": 267
},
{
"epoch": 0.6767676767676768,
"grad_norm": 2.4222313987284623,
"learning_rate": 8.376143340478153e-06,
"loss": 0.9031,
"step": 268
},
{
"epoch": 0.6792929292929293,
"grad_norm": 2.1388997365568665,
"learning_rate": 8.35983759786608e-06,
"loss": 0.8744,
"step": 269
},
{
"epoch": 0.6818181818181818,
"grad_norm": 2.0727775382982743,
"learning_rate": 8.34346644326739e-06,
"loss": 0.8674,
"step": 270
},
{
"epoch": 0.6843434343434344,
"grad_norm": 1.925670858682619,
"learning_rate": 8.327030195408723e-06,
"loss": 0.8733,
"step": 271
},
{
"epoch": 0.6868686868686869,
"grad_norm": 1.9068865675896571,
"learning_rate": 8.310529174284004e-06,
"loss": 0.8998,
"step": 272
},
{
"epoch": 0.6893939393939394,
"grad_norm": 1.961892999999656,
"learning_rate": 8.293963701148215e-06,
"loss": 0.8285,
"step": 273
},
{
"epoch": 0.6919191919191919,
"grad_norm": 2.151487477045568,
"learning_rate": 8.277334098511147e-06,
"loss": 0.8789,
"step": 274
},
{
"epoch": 0.6944444444444444,
"grad_norm": 2.1206541612242216,
"learning_rate": 8.260640690131108e-06,
"loss": 0.9662,
"step": 275
},
{
"epoch": 0.696969696969697,
"grad_norm": 2.108614871486297,
"learning_rate": 8.243883801008632e-06,
"loss": 0.8475,
"step": 276
},
{
"epoch": 0.6994949494949495,
"grad_norm": 2.3758021548155663,
"learning_rate": 8.227063757380141e-06,
"loss": 0.9644,
"step": 277
},
{
"epoch": 0.702020202020202,
"grad_norm": 1.8968879049693967,
"learning_rate": 8.210180886711603e-06,
"loss": 0.8401,
"step": 278
},
{
"epoch": 0.7045454545454546,
"grad_norm": 2.069928978154748,
"learning_rate": 8.193235517692154e-06,
"loss": 0.8528,
"step": 279
},
{
"epoch": 0.7070707070707071,
"grad_norm": 1.7679958716225839,
"learning_rate": 8.176227980227693e-06,
"loss": 0.7431,
"step": 280
},
{
"epoch": 0.7095959595959596,
"grad_norm": 1.8371038119799812,
"learning_rate": 8.159158605434468e-06,
"loss": 0.8477,
"step": 281
},
{
"epoch": 0.7121212121212122,
"grad_norm": 2.0033292734118446,
"learning_rate": 8.142027725632622e-06,
"loss": 0.8595,
"step": 282
},
{
"epoch": 0.7146464646464646,
"grad_norm": 1.9666826549049625,
"learning_rate": 8.12483567433973e-06,
"loss": 0.8781,
"step": 283
},
{
"epoch": 0.7171717171717171,
"grad_norm": 2.0488854959000977,
"learning_rate": 8.107582786264299e-06,
"loss": 0.8168,
"step": 284
},
{
"epoch": 0.7196969696969697,
"grad_norm": 1.9832328693747412,
"learning_rate": 8.09026939729925e-06,
"loss": 0.8,
"step": 285
},
{
"epoch": 0.7222222222222222,
"grad_norm": 1.9572452802521947,
"learning_rate": 8.072895844515398e-06,
"loss": 0.8413,
"step": 286
},
{
"epoch": 0.7247474747474747,
"grad_norm": 1.7749354609626558,
"learning_rate": 8.055462466154862e-06,
"loss": 0.7075,
"step": 287
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.8371349797784025,
"learning_rate": 8.037969601624495e-06,
"loss": 0.7253,
"step": 288
},
{
"epoch": 0.7297979797979798,
"grad_norm": 1.8035197194891128,
"learning_rate": 8.020417591489279e-06,
"loss": 0.8174,
"step": 289
},
{
"epoch": 0.7323232323232324,
"grad_norm": 1.6306578456897418,
"learning_rate": 8.002806777465685e-06,
"loss": 0.738,
"step": 290
},
{
"epoch": 0.7348484848484849,
"grad_norm": 1.73807382690707,
"learning_rate": 7.985137502415027e-06,
"loss": 0.8181,
"step": 291
},
{
"epoch": 0.7373737373737373,
"grad_norm": 1.9090998993125434,
"learning_rate": 7.967410110336782e-06,
"loss": 0.7976,
"step": 292
},
{
"epoch": 0.73989898989899,
"grad_norm": 2.25275759204153,
"learning_rate": 7.9496249463619e-06,
"loss": 0.9196,
"step": 293
},
{
"epoch": 0.7424242424242424,
"grad_norm": 1.8106675829750696,
"learning_rate": 7.931782356746076e-06,
"loss": 0.7942,
"step": 294
},
{
"epoch": 0.7449494949494949,
"grad_norm": 1.5696903389204402,
"learning_rate": 7.913882688863015e-06,
"loss": 0.6422,
"step": 295
},
{
"epoch": 0.7474747474747475,
"grad_norm": 1.7649118446474967,
"learning_rate": 7.895926291197667e-06,
"loss": 0.7771,
"step": 296
},
{
"epoch": 0.75,
"grad_norm": 1.7680880977912667,
"learning_rate": 7.877913513339444e-06,
"loss": 0.81,
"step": 297
},
{
"epoch": 0.7525252525252525,
"grad_norm": 1.7662825615080433,
"learning_rate": 7.859844705975405e-06,
"loss": 0.8059,
"step": 298
},
{
"epoch": 0.7550505050505051,
"grad_norm": 2.062970191528715,
"learning_rate": 7.841720220883446e-06,
"loss": 0.9093,
"step": 299
},
{
"epoch": 0.7575757575757576,
"grad_norm": 2.028494596085333,
"learning_rate": 7.823540410925434e-06,
"loss": 0.8106,
"step": 300
},
{
"epoch": 0.76010101010101,
"grad_norm": 1.950977762384559,
"learning_rate": 7.80530563004035e-06,
"loss": 0.9017,
"step": 301
},
{
"epoch": 0.7626262626262627,
"grad_norm": 1.9040681093562302,
"learning_rate": 7.787016233237387e-06,
"loss": 0.8162,
"step": 302
},
{
"epoch": 0.7651515151515151,
"grad_norm": 2.5554666858923705,
"learning_rate": 7.768672576589046e-06,
"loss": 0.6554,
"step": 303
},
{
"epoch": 0.7676767676767676,
"grad_norm": 2.174576840851388,
"learning_rate": 7.750275017224208e-06,
"loss": 0.7403,
"step": 304
},
{
"epoch": 0.7702020202020202,
"grad_norm": 2.275541516995631,
"learning_rate": 7.731823913321162e-06,
"loss": 0.8125,
"step": 305
},
{
"epoch": 0.7727272727272727,
"grad_norm": 1.8953811386555475,
"learning_rate": 7.713319624100657e-06,
"loss": 0.8217,
"step": 306
},
{
"epoch": 0.7752525252525253,
"grad_norm": 1.9513924952808503,
"learning_rate": 7.69476250981889e-06,
"loss": 0.8352,
"step": 307
},
{
"epoch": 0.7777777777777778,
"grad_norm": 1.839984818748542,
"learning_rate": 7.676152931760496e-06,
"loss": 0.6912,
"step": 308
},
{
"epoch": 0.7803030303030303,
"grad_norm": 2.0007926943035814,
"learning_rate": 7.657491252231525e-06,
"loss": 0.8684,
"step": 309
},
{
"epoch": 0.7828282828282829,
"grad_norm": 1.73885712167699,
"learning_rate": 7.638777834552372e-06,
"loss": 0.8178,
"step": 310
},
{
"epoch": 0.7853535353535354,
"grad_norm": 1.7839085635872234,
"learning_rate": 7.620013043050713e-06,
"loss": 0.7378,
"step": 311
},
{
"epoch": 0.7878787878787878,
"grad_norm": 2.1524413742699524,
"learning_rate": 7.601197243054411e-06,
"loss": 0.8404,
"step": 312
},
{
"epoch": 0.7904040404040404,
"grad_norm": 2.2372700737848725,
"learning_rate": 7.582330800884405e-06,
"loss": 0.8999,
"step": 313
},
{
"epoch": 0.7929292929292929,
"grad_norm": 5.349986251635676,
"learning_rate": 7.563414083847573e-06,
"loss": 0.8751,
"step": 314
},
{
"epoch": 0.7954545454545454,
"grad_norm": 1.978351063182721,
"learning_rate": 7.544447460229587e-06,
"loss": 0.8098,
"step": 315
},
{
"epoch": 0.797979797979798,
"grad_norm": 1.9358727476086006,
"learning_rate": 7.525431299287737e-06,
"loss": 0.8489,
"step": 316
},
{
"epoch": 0.8005050505050505,
"grad_norm": 1.8443699582247326,
"learning_rate": 7.506365971243746e-06,
"loss": 0.7742,
"step": 317
},
{
"epoch": 0.803030303030303,
"grad_norm": 2.1270044970514643,
"learning_rate": 7.4872518472765594e-06,
"loss": 0.8629,
"step": 318
},
{
"epoch": 0.8055555555555556,
"grad_norm": 1.8897818831419784,
"learning_rate": 7.4680892995151264e-06,
"loss": 0.8344,
"step": 319
},
{
"epoch": 0.8080808080808081,
"grad_norm": 1.7533284139089222,
"learning_rate": 7.4488787010311425e-06,
"loss": 0.7815,
"step": 320
},
{
"epoch": 0.8106060606060606,
"grad_norm": 1.8217064441842095,
"learning_rate": 7.429620425831795e-06,
"loss": 0.8229,
"step": 321
},
{
"epoch": 0.8131313131313131,
"grad_norm": 1.8443774034643523,
"learning_rate": 7.4103148488524824e-06,
"loss": 0.8327,
"step": 322
},
{
"epoch": 0.8156565656565656,
"grad_norm": 1.71061861511042,
"learning_rate": 7.390962345949506e-06,
"loss": 0.8333,
"step": 323
},
{
"epoch": 0.8181818181818182,
"grad_norm": 1.9249282530467713,
"learning_rate": 7.371563293892761e-06,
"loss": 0.8619,
"step": 324
},
{
"epoch": 0.8207070707070707,
"grad_norm": 1.9078528629692102,
"learning_rate": 7.3521180703584025e-06,
"loss": 0.7976,
"step": 325
},
{
"epoch": 0.8232323232323232,
"grad_norm": 1.9553632923518558,
"learning_rate": 7.3326270539214826e-06,
"loss": 0.8674,
"step": 326
},
{
"epoch": 0.8257575757575758,
"grad_norm": 1.8649422226230008,
"learning_rate": 7.3130906240485886e-06,
"loss": 0.8757,
"step": 327
},
{
"epoch": 0.8282828282828283,
"grad_norm": 2.1503617282081366,
"learning_rate": 7.293509161090453e-06,
"loss": 0.8237,
"step": 328
},
{
"epoch": 0.8308080808080808,
"grad_norm": 1.7726773517694598,
"learning_rate": 7.273883046274547e-06,
"loss": 0.8622,
"step": 329
},
{
"epoch": 0.8333333333333334,
"grad_norm": 1.8618494109928494,
"learning_rate": 7.2542126616976596e-06,
"loss": 0.7441,
"step": 330
},
{
"epoch": 0.8358585858585859,
"grad_norm": 1.8445449110851162,
"learning_rate": 7.234498390318461e-06,
"loss": 0.8222,
"step": 331
},
{
"epoch": 0.8383838383838383,
"grad_norm": 1.9965907584719484,
"learning_rate": 7.214740615950041e-06,
"loss": 0.9287,
"step": 332
},
{
"epoch": 0.8409090909090909,
"grad_norm": 1.7235368892533713,
"learning_rate": 7.194939723252442e-06,
"loss": 0.7969,
"step": 333
},
{
"epoch": 0.8434343434343434,
"grad_norm": 1.963468754530328,
"learning_rate": 7.175096097725169e-06,
"loss": 0.8491,
"step": 334
},
{
"epoch": 0.8459595959595959,
"grad_norm": 1.7529060619384105,
"learning_rate": 7.155210125699683e-06,
"loss": 0.7452,
"step": 335
},
{
"epoch": 0.8484848484848485,
"grad_norm": 1.9094288990757897,
"learning_rate": 7.135282194331881e-06,
"loss": 0.8763,
"step": 336
},
{
"epoch": 0.851010101010101,
"grad_norm": 2.0615037709551767,
"learning_rate": 7.1153126915945535e-06,
"loss": 0.9062,
"step": 337
},
{
"epoch": 0.8535353535353535,
"grad_norm": 1.8985209620402874,
"learning_rate": 7.095302006269842e-06,
"loss": 0.7929,
"step": 338
},
{
"epoch": 0.8560606060606061,
"grad_norm": 1.6863709302006293,
"learning_rate": 7.07525052794166e-06,
"loss": 0.8013,
"step": 339
},
{
"epoch": 0.8585858585858586,
"grad_norm": 2.1003775551534556,
"learning_rate": 7.05515864698811e-06,
"loss": 0.8575,
"step": 340
},
{
"epoch": 0.8611111111111112,
"grad_norm": 1.7564985063360334,
"learning_rate": 7.035026754573888e-06,
"loss": 0.7298,
"step": 341
},
{
"epoch": 0.8636363636363636,
"grad_norm": 1.7926910561007185,
"learning_rate": 7.014855242642662e-06,
"loss": 0.7733,
"step": 342
},
{
"epoch": 0.8661616161616161,
"grad_norm": 1.8244531408739597,
"learning_rate": 6.994644503909449e-06,
"loss": 0.8262,
"step": 343
},
{
"epoch": 0.8686868686868687,
"grad_norm": 2.115750819763352,
"learning_rate": 6.974394931852957e-06,
"loss": 0.897,
"step": 344
},
{
"epoch": 0.8712121212121212,
"grad_norm": 1.8678684874487301,
"learning_rate": 6.954106920707939e-06,
"loss": 0.8675,
"step": 345
},
{
"epoch": 0.8737373737373737,
"grad_norm": 1.85583492582409,
"learning_rate": 6.933780865457508e-06,
"loss": 0.7939,
"step": 346
},
{
"epoch": 0.8762626262626263,
"grad_norm": 1.9773792302096544,
"learning_rate": 6.913417161825449e-06,
"loss": 0.8055,
"step": 347
},
{
"epoch": 0.8787878787878788,
"grad_norm": 1.8773933567758432,
"learning_rate": 6.893016206268518e-06,
"loss": 0.7558,
"step": 348
},
{
"epoch": 0.8813131313131313,
"grad_norm": 1.8741310548444627,
"learning_rate": 6.872578395968717e-06,
"loss": 0.8972,
"step": 349
},
{
"epoch": 0.8838383838383839,
"grad_norm": 2.0570232185809534,
"learning_rate": 6.85210412882557e-06,
"loss": 0.8892,
"step": 350
},
{
"epoch": 0.8863636363636364,
"grad_norm": 1.8334034813197582,
"learning_rate": 6.831593803448366e-06,
"loss": 0.8222,
"step": 351
},
{
"epoch": 0.8888888888888888,
"grad_norm": 2.0414631036488697,
"learning_rate": 6.811047819148413e-06,
"loss": 0.8271,
"step": 352
},
{
"epoch": 0.8914141414141414,
"grad_norm": 2.1395166290453798,
"learning_rate": 6.7904665759312475e-06,
"loss": 0.868,
"step": 353
},
{
"epoch": 0.8939393939393939,
"grad_norm": 2.1713852135930654,
"learning_rate": 6.769850474488859e-06,
"loss": 0.7536,
"step": 354
},
{
"epoch": 0.8964646464646465,
"grad_norm": 1.8552852467473009,
"learning_rate": 6.74919991619188e-06,
"loss": 0.8107,
"step": 355
},
{
"epoch": 0.898989898989899,
"grad_norm": 2.2131805499276904,
"learning_rate": 6.728515303081782e-06,
"loss": 0.9007,
"step": 356
},
{
"epoch": 0.9015151515151515,
"grad_norm": 1.8262108057888577,
"learning_rate": 6.70779703786304e-06,
"loss": 0.7663,
"step": 357
},
{
"epoch": 0.9040404040404041,
"grad_norm": 1.7654934724925795,
"learning_rate": 6.687045523895292e-06,
"loss": 0.8482,
"step": 358
},
{
"epoch": 0.9065656565656566,
"grad_norm": 2.0222716520146604,
"learning_rate": 6.666261165185496e-06,
"loss": 0.8425,
"step": 359
},
{
"epoch": 0.9090909090909091,
"grad_norm": 1.8146911931025558,
"learning_rate": 6.64544436638005e-06,
"loss": 0.7418,
"step": 360
},
{
"epoch": 0.9116161616161617,
"grad_norm": 2.0507487893390586,
"learning_rate": 6.6245955327569285e-06,
"loss": 0.9247,
"step": 361
},
{
"epoch": 0.9141414141414141,
"grad_norm": 1.8107811580495732,
"learning_rate": 6.603715070217779e-06,
"loss": 0.8117,
"step": 362
},
{
"epoch": 0.9166666666666666,
"grad_norm": 1.8109962374127375,
"learning_rate": 6.58280338528003e-06,
"loss": 0.856,
"step": 363
},
{
"epoch": 0.9191919191919192,
"grad_norm": 1.625858444932743,
"learning_rate": 6.561860885068972e-06,
"loss": 0.7155,
"step": 364
},
{
"epoch": 0.9217171717171717,
"grad_norm": 2.42542998546532,
"learning_rate": 6.540887977309829e-06,
"loss": 0.8522,
"step": 365
},
{
"epoch": 0.9242424242424242,
"grad_norm": 1.7089973860276078,
"learning_rate": 6.519885070319827e-06,
"loss": 0.7429,
"step": 366
},
{
"epoch": 0.9267676767676768,
"grad_norm": 1.8479090384035146,
"learning_rate": 6.498852573000236e-06,
"loss": 0.8447,
"step": 367
},
{
"epoch": 0.9292929292929293,
"grad_norm": 1.86953053668735,
"learning_rate": 6.477790894828422e-06,
"loss": 0.7887,
"step": 368
},
{
"epoch": 0.9318181818181818,
"grad_norm": 1.6296062073831334,
"learning_rate": 6.456700445849857e-06,
"loss": 0.7601,
"step": 369
},
{
"epoch": 0.9343434343434344,
"grad_norm": 1.8624127469307328,
"learning_rate": 6.435581636670154e-06,
"loss": 0.8909,
"step": 370
},
{
"epoch": 0.9368686868686869,
"grad_norm": 1.833395549417114,
"learning_rate": 6.414434878447061e-06,
"loss": 0.8493,
"step": 371
},
{
"epoch": 0.9393939393939394,
"grad_norm": 2.0178488016114864,
"learning_rate": 6.393260582882462e-06,
"loss": 0.8411,
"step": 372
},
{
"epoch": 0.9419191919191919,
"grad_norm": 1.914850188815976,
"learning_rate": 6.372059162214358e-06,
"loss": 0.8661,
"step": 373
},
{
"epoch": 0.9444444444444444,
"grad_norm": 1.9879270438336352,
"learning_rate": 6.350831029208844e-06,
"loss": 0.8128,
"step": 374
},
{
"epoch": 0.946969696969697,
"grad_norm": 2.6253123532004854,
"learning_rate": 6.329576597152072e-06,
"loss": 0.8627,
"step": 375
},
{
"epoch": 0.9494949494949495,
"grad_norm": 1.791873041866805,
"learning_rate": 6.308296279842204e-06,
"loss": 0.7766,
"step": 376
},
{
"epoch": 0.952020202020202,
"grad_norm": 1.7079757746619704,
"learning_rate": 6.28699049158136e-06,
"loss": 0.7453,
"step": 377
},
{
"epoch": 0.9545454545454546,
"grad_norm": 1.845568322512042,
"learning_rate": 6.265659647167542e-06,
"loss": 0.8093,
"step": 378
},
{
"epoch": 0.9570707070707071,
"grad_norm": 2.0015555278085726,
"learning_rate": 6.244304161886574e-06,
"loss": 0.8598,
"step": 379
},
{
"epoch": 0.9595959595959596,
"grad_norm": 1.8143209529259592,
"learning_rate": 6.222924451504001e-06,
"loss": 0.873,
"step": 380
},
{
"epoch": 0.9621212121212122,
"grad_norm": 1.6213521262139028,
"learning_rate": 6.2015209322570025e-06,
"loss": 0.7611,
"step": 381
},
{
"epoch": 0.9646464646464646,
"grad_norm": 1.6897764914341882,
"learning_rate": 6.180094020846291e-06,
"loss": 0.7779,
"step": 382
},
{
"epoch": 0.9671717171717171,
"grad_norm": 1.9367905211851992,
"learning_rate": 6.158644134427994e-06,
"loss": 0.8889,
"step": 383
},
{
"epoch": 0.9696969696969697,
"grad_norm": 1.806193962773895,
"learning_rate": 6.1371716906055336e-06,
"loss": 0.7863,
"step": 384
},
{
"epoch": 0.9722222222222222,
"grad_norm": 2.215869828808879,
"learning_rate": 6.1156771074214995e-06,
"loss": 0.8672,
"step": 385
},
{
"epoch": 0.9747474747474747,
"grad_norm": 1.9204499491761957,
"learning_rate": 6.094160803349508e-06,
"loss": 0.8485,
"step": 386
},
{
"epoch": 0.9772727272727273,
"grad_norm": 1.7621513716925183,
"learning_rate": 6.0726231972860535e-06,
"loss": 0.8573,
"step": 387
},
{
"epoch": 0.9797979797979798,
"grad_norm": 1.7565462800448088,
"learning_rate": 6.051064708542357e-06,
"loss": 0.8873,
"step": 388
},
{
"epoch": 0.9823232323232324,
"grad_norm": 3.1233477508371545,
"learning_rate": 6.029485756836195e-06,
"loss": 0.7988,
"step": 389
},
{
"epoch": 0.9848484848484849,
"grad_norm": 2.4834044663399504,
"learning_rate": 6.00788676228374e-06,
"loss": 0.9911,
"step": 390
},
{
"epoch": 0.9873737373737373,
"grad_norm": 1.8777459555278433,
"learning_rate": 5.986268145391369e-06,
"loss": 0.797,
"step": 391
},
{
"epoch": 0.98989898989899,
"grad_norm": 1.9272215318033203,
"learning_rate": 5.964630327047485e-06,
"loss": 0.8664,
"step": 392
},
{
"epoch": 0.9924242424242424,
"grad_norm": 1.7816565298428428,
"learning_rate": 5.9429737285143185e-06,
"loss": 0.8341,
"step": 393
},
{
"epoch": 0.9949494949494949,
"grad_norm": 1.8921034636140552,
"learning_rate": 5.921298771419731e-06,
"loss": 0.8737,
"step": 394
},
{
"epoch": 0.9974747474747475,
"grad_norm": 1.910658574213566,
"learning_rate": 5.8996058777489985e-06,
"loss": 0.8056,
"step": 395
},
{
"epoch": 1.0,
"grad_norm": 1.6982767713892397,
"learning_rate": 5.877895469836604e-06,
"loss": 0.7788,
"step": 396
},
{
"epoch": 1.0025252525252526,
"grad_norm": 2.0151819880247155,
"learning_rate": 5.85616797035801e-06,
"loss": 0.7555,
"step": 397
},
{
"epoch": 1.005050505050505,
"grad_norm": 1.8702264848272523,
"learning_rate": 5.8344238023214305e-06,
"loss": 0.697,
"step": 398
},
{
"epoch": 1.0075757575757576,
"grad_norm": 1.84888782343721,
"learning_rate": 5.8126633890595984e-06,
"loss": 0.6753,
"step": 399
},
{
"epoch": 1.0101010101010102,
"grad_norm": 1.9760780939231746,
"learning_rate": 5.790887154221521e-06,
"loss": 0.6932,
"step": 400
},
{
"epoch": 1.0126262626262625,
"grad_norm": 1.7434719776213805,
"learning_rate": 5.76909552176423e-06,
"loss": 0.6968,
"step": 401
},
{
"epoch": 1.0151515151515151,
"grad_norm": 1.939053013906518,
"learning_rate": 5.747288915944533e-06,
"loss": 0.6622,
"step": 402
},
{
"epoch": 1.0176767676767677,
"grad_norm": 1.98379651663197,
"learning_rate": 5.725467761310751e-06,
"loss": 0.6291,
"step": 403
},
{
"epoch": 1.02020202020202,
"grad_norm": 2.8308774274700683,
"learning_rate": 5.703632482694453e-06,
"loss": 0.717,
"step": 404
},
{
"epoch": 1.0227272727272727,
"grad_norm": 1.9314151274448212,
"learning_rate": 5.681783505202182e-06,
"loss": 0.6728,
"step": 405
},
{
"epoch": 1.0252525252525253,
"grad_norm": 1.661620173801216,
"learning_rate": 5.659921254207183e-06,
"loss": 0.6101,
"step": 406
},
{
"epoch": 1.0277777777777777,
"grad_norm": 2.02879334784291,
"learning_rate": 5.638046155341121e-06,
"loss": 0.6745,
"step": 407
},
{
"epoch": 1.0303030303030303,
"grad_norm": 1.8956630365787561,
"learning_rate": 5.616158634485793e-06,
"loss": 0.6528,
"step": 408
},
{
"epoch": 1.0328282828282829,
"grad_norm": 1.9529747368382415,
"learning_rate": 5.59425911776484e-06,
"loss": 0.6699,
"step": 409
},
{
"epoch": 1.0353535353535352,
"grad_norm": 1.6901717477408587,
"learning_rate": 5.572348031535442e-06,
"loss": 0.6979,
"step": 410
},
{
"epoch": 1.0378787878787878,
"grad_norm": 1.6337522869276964,
"learning_rate": 5.5504258023800286e-06,
"loss": 0.6081,
"step": 411
},
{
"epoch": 1.0404040404040404,
"grad_norm": 2.7856355643266753,
"learning_rate": 5.528492857097966e-06,
"loss": 0.6752,
"step": 412
},
{
"epoch": 1.0429292929292928,
"grad_norm": 2.147691396873091,
"learning_rate": 5.506549622697251e-06,
"loss": 0.7083,
"step": 413
},
{
"epoch": 1.0454545454545454,
"grad_norm": 1.890326421007081,
"learning_rate": 5.484596526386198e-06,
"loss": 0.7217,
"step": 414
},
{
"epoch": 1.047979797979798,
"grad_norm": 1.9354947081443337,
"learning_rate": 5.46263399556512e-06,
"loss": 0.7037,
"step": 415
},
{
"epoch": 1.0505050505050506,
"grad_norm": 2.2483738951676897,
"learning_rate": 5.44066245781801e-06,
"loss": 0.6724,
"step": 416
},
{
"epoch": 1.053030303030303,
"grad_norm": 2.5449624996362274,
"learning_rate": 5.418682340904211e-06,
"loss": 0.6882,
"step": 417
},
{
"epoch": 1.0555555555555556,
"grad_norm": 1.694702876681095,
"learning_rate": 5.396694072750099e-06,
"loss": 0.5977,
"step": 418
},
{
"epoch": 1.0580808080808082,
"grad_norm": 1.6399354114541165,
"learning_rate": 5.374698081440737e-06,
"loss": 0.6976,
"step": 419
},
{
"epoch": 1.0606060606060606,
"grad_norm": 1.8612106562079718,
"learning_rate": 5.352694795211555e-06,
"loss": 0.6719,
"step": 420
},
{
"epoch": 1.0631313131313131,
"grad_norm": 1.7307696435188145,
"learning_rate": 5.330684642440003e-06,
"loss": 0.6285,
"step": 421
},
{
"epoch": 1.0656565656565657,
"grad_norm": 2.4691548308604236,
"learning_rate": 5.308668051637213e-06,
"loss": 0.7327,
"step": 422
},
{
"epoch": 1.0681818181818181,
"grad_norm": 1.8077017073271224,
"learning_rate": 5.28664545143966e-06,
"loss": 0.7054,
"step": 423
},
{
"epoch": 1.0707070707070707,
"grad_norm": 1.7065000267250496,
"learning_rate": 5.2646172706008154e-06,
"loss": 0.7195,
"step": 424
},
{
"epoch": 1.0732323232323233,
"grad_norm": 1.9619543394731633,
"learning_rate": 5.242583937982798e-06,
"loss": 0.7328,
"step": 425
},
{
"epoch": 1.0757575757575757,
"grad_norm": 1.693517608989433,
"learning_rate": 5.220545882548024e-06,
"loss": 0.671,
"step": 426
},
{
"epoch": 1.0782828282828283,
"grad_norm": 1.9237914420729643,
"learning_rate": 5.198503533350859e-06,
"loss": 0.7175,
"step": 427
},
{
"epoch": 1.0808080808080809,
"grad_norm": 1.7394825016163988,
"learning_rate": 5.176457319529264e-06,
"loss": 0.6614,
"step": 428
},
{
"epoch": 1.0833333333333333,
"grad_norm": 1.665040555481496,
"learning_rate": 5.154407670296434e-06,
"loss": 0.6207,
"step": 429
},
{
"epoch": 1.0858585858585859,
"grad_norm": 1.9144929503047374,
"learning_rate": 5.132355014932455e-06,
"loss": 0.653,
"step": 430
},
{
"epoch": 1.0883838383838385,
"grad_norm": 2.019822960039854,
"learning_rate": 5.1102997827759324e-06,
"loss": 0.6457,
"step": 431
},
{
"epoch": 1.0909090909090908,
"grad_norm": 2.7788025514050623,
"learning_rate": 5.088242403215644e-06,
"loss": 0.802,
"step": 432
},
{
"epoch": 1.0934343434343434,
"grad_norm": 1.9708878396751732,
"learning_rate": 5.06618330568217e-06,
"loss": 0.7458,
"step": 433
},
{
"epoch": 1.095959595959596,
"grad_norm": 1.9581748488896618,
"learning_rate": 5.0441229196395416e-06,
"loss": 0.6966,
"step": 434
},
{
"epoch": 1.0984848484848484,
"grad_norm": 1.7245127313943982,
"learning_rate": 5.022061674576871e-06,
"loss": 0.6045,
"step": 435
},
{
"epoch": 1.101010101010101,
"grad_norm": 1.83316729599805,
"learning_rate": 5e-06,
"loss": 0.6681,
"step": 436
},
{
"epoch": 1.1035353535353536,
"grad_norm": 2.036141294067019,
"learning_rate": 4.97793832542313e-06,
"loss": 0.7294,
"step": 437
},
{
"epoch": 1.106060606060606,
"grad_norm": 2.1530005369504552,
"learning_rate": 4.955877080360462e-06,
"loss": 0.6774,
"step": 438
},
{
"epoch": 1.1085858585858586,
"grad_norm": 1.7223929410113543,
"learning_rate": 4.933816694317832e-06,
"loss": 0.6727,
"step": 439
},
{
"epoch": 1.1111111111111112,
"grad_norm": 1.8176287723669216,
"learning_rate": 4.911757596784358e-06,
"loss": 0.6739,
"step": 440
},
{
"epoch": 1.1136363636363635,
"grad_norm": 1.8275228102234706,
"learning_rate": 4.889700217224068e-06,
"loss": 0.7391,
"step": 441
},
{
"epoch": 1.1161616161616161,
"grad_norm": 1.8199747771104646,
"learning_rate": 4.867644985067548e-06,
"loss": 0.6942,
"step": 442
},
{
"epoch": 1.1186868686868687,
"grad_norm": 1.6763484984193686,
"learning_rate": 4.845592329703568e-06,
"loss": 0.634,
"step": 443
},
{
"epoch": 1.121212121212121,
"grad_norm": 1.831762311688415,
"learning_rate": 4.823542680470738e-06,
"loss": 0.6618,
"step": 444
},
{
"epoch": 1.1237373737373737,
"grad_norm": 1.7825984242405961,
"learning_rate": 4.801496466649143e-06,
"loss": 0.6576,
"step": 445
},
{
"epoch": 1.1262626262626263,
"grad_norm": 2.0226073897654695,
"learning_rate": 4.779454117451978e-06,
"loss": 0.694,
"step": 446
},
{
"epoch": 1.128787878787879,
"grad_norm": 1.661129371317442,
"learning_rate": 4.757416062017203e-06,
"loss": 0.6314,
"step": 447
},
{
"epoch": 1.1313131313131313,
"grad_norm": 2.3421784999535142,
"learning_rate": 4.7353827293991845e-06,
"loss": 0.7612,
"step": 448
},
{
"epoch": 1.1338383838383839,
"grad_norm": 1.680068360070798,
"learning_rate": 4.713354548560342e-06,
"loss": 0.6484,
"step": 449
},
{
"epoch": 1.1363636363636362,
"grad_norm": 1.7981146017286478,
"learning_rate": 4.691331948362789e-06,
"loss": 0.6893,
"step": 450
},
{
"epoch": 1.1388888888888888,
"grad_norm": 1.9252682229603164,
"learning_rate": 4.669315357559999e-06,
"loss": 0.6768,
"step": 451
},
{
"epoch": 1.1414141414141414,
"grad_norm": 2.0267347569333793,
"learning_rate": 4.647305204788445e-06,
"loss": 0.7787,
"step": 452
},
{
"epoch": 1.143939393939394,
"grad_norm": 1.8024146135884664,
"learning_rate": 4.625301918559264e-06,
"loss": 0.6817,
"step": 453
},
{
"epoch": 1.1464646464646464,
"grad_norm": 2.105549421601638,
"learning_rate": 4.603305927249902e-06,
"loss": 0.7381,
"step": 454
},
{
"epoch": 1.148989898989899,
"grad_norm": 1.5988279877052975,
"learning_rate": 4.5813176590957896e-06,
"loss": 0.6529,
"step": 455
},
{
"epoch": 1.1515151515151516,
"grad_norm": 3.19661858368343,
"learning_rate": 4.559337542181993e-06,
"loss": 0.5975,
"step": 456
},
{
"epoch": 1.154040404040404,
"grad_norm": 1.6181855725854595,
"learning_rate": 4.537366004434882e-06,
"loss": 0.6894,
"step": 457
},
{
"epoch": 1.1565656565656566,
"grad_norm": 1.7620224863908522,
"learning_rate": 4.5154034736138035e-06,
"loss": 0.7141,
"step": 458
},
{
"epoch": 1.1590909090909092,
"grad_norm": 2.2322609251857672,
"learning_rate": 4.49345037730275e-06,
"loss": 0.7007,
"step": 459
},
{
"epoch": 1.1616161616161615,
"grad_norm": 1.9528868021945258,
"learning_rate": 4.471507142902036e-06,
"loss": 0.6728,
"step": 460
},
{
"epoch": 1.1641414141414141,
"grad_norm": 1.5891676961221326,
"learning_rate": 4.449574197619973e-06,
"loss": 0.5974,
"step": 461
},
{
"epoch": 1.1666666666666667,
"grad_norm": 1.630153654711676,
"learning_rate": 4.427651968464559e-06,
"loss": 0.6072,
"step": 462
},
{
"epoch": 1.1691919191919191,
"grad_norm": 1.8365837107533884,
"learning_rate": 4.4057408822351625e-06,
"loss": 0.7207,
"step": 463
},
{
"epoch": 1.1717171717171717,
"grad_norm": 2.0660037606553954,
"learning_rate": 4.383841365514208e-06,
"loss": 0.6881,
"step": 464
},
{
"epoch": 1.1742424242424243,
"grad_norm": 1.548087949689816,
"learning_rate": 4.3619538446588804e-06,
"loss": 0.5739,
"step": 465
},
{
"epoch": 1.1767676767676767,
"grad_norm": 1.960031461373245,
"learning_rate": 4.340078745792818e-06,
"loss": 0.6825,
"step": 466
},
{
"epoch": 1.1792929292929293,
"grad_norm": 1.5803392889258359,
"learning_rate": 4.3182164947978215e-06,
"loss": 0.6669,
"step": 467
},
{
"epoch": 1.1818181818181819,
"grad_norm": 1.7287418038659104,
"learning_rate": 4.296367517305548e-06,
"loss": 0.6497,
"step": 468
},
{
"epoch": 1.1843434343434343,
"grad_norm": 1.7273792266910883,
"learning_rate": 4.274532238689248e-06,
"loss": 0.7003,
"step": 469
},
{
"epoch": 1.1868686868686869,
"grad_norm": 1.7545603399346041,
"learning_rate": 4.252711084055468e-06,
"loss": 0.653,
"step": 470
},
{
"epoch": 1.1893939393939394,
"grad_norm": 1.5430286005004747,
"learning_rate": 4.230904478235772e-06,
"loss": 0.5536,
"step": 471
},
{
"epoch": 1.1919191919191918,
"grad_norm": 2.7579983804870345,
"learning_rate": 4.209112845778481e-06,
"loss": 0.6275,
"step": 472
},
{
"epoch": 1.1944444444444444,
"grad_norm": 1.6373617698150473,
"learning_rate": 4.187336610940402e-06,
"loss": 0.6636,
"step": 473
},
{
"epoch": 1.196969696969697,
"grad_norm": 1.445317861340389,
"learning_rate": 4.165576197678571e-06,
"loss": 0.6443,
"step": 474
},
{
"epoch": 1.1994949494949494,
"grad_norm": 1.6807094693125937,
"learning_rate": 4.143832029641992e-06,
"loss": 0.6689,
"step": 475
},
{
"epoch": 1.202020202020202,
"grad_norm": 1.9526685990978059,
"learning_rate": 4.122104530163397e-06,
"loss": 0.7311,
"step": 476
},
{
"epoch": 1.2045454545454546,
"grad_norm": 1.5445379031033923,
"learning_rate": 4.100394122251002e-06,
"loss": 0.5916,
"step": 477
},
{
"epoch": 1.2070707070707072,
"grad_norm": 1.818230967267352,
"learning_rate": 4.0787012285802695e-06,
"loss": 0.742,
"step": 478
},
{
"epoch": 1.2095959595959596,
"grad_norm": 1.5522760322551075,
"learning_rate": 4.057026271485682e-06,
"loss": 0.5363,
"step": 479
},
{
"epoch": 1.2121212121212122,
"grad_norm": 2.06159425782242,
"learning_rate": 4.035369672952516e-06,
"loss": 0.6938,
"step": 480
},
{
"epoch": 1.2146464646464645,
"grad_norm": 2.004330399756548,
"learning_rate": 4.013731854608633e-06,
"loss": 0.7032,
"step": 481
},
{
"epoch": 1.2171717171717171,
"grad_norm": 1.868645867065866,
"learning_rate": 3.992113237716261e-06,
"loss": 0.6987,
"step": 482
},
{
"epoch": 1.2196969696969697,
"grad_norm": 1.6968848461806507,
"learning_rate": 3.9705142431638065e-06,
"loss": 0.6324,
"step": 483
},
{
"epoch": 1.2222222222222223,
"grad_norm": 1.9130899105972559,
"learning_rate": 3.948935291457645e-06,
"loss": 0.6817,
"step": 484
},
{
"epoch": 1.2247474747474747,
"grad_norm": 1.8392221262693993,
"learning_rate": 3.927376802713948e-06,
"loss": 0.6865,
"step": 485
},
{
"epoch": 1.2272727272727273,
"grad_norm": 1.9747299875665696,
"learning_rate": 3.905839196650494e-06,
"loss": 0.6938,
"step": 486
},
{
"epoch": 1.22979797979798,
"grad_norm": 1.9355488368581049,
"learning_rate": 3.884322892578503e-06,
"loss": 0.7514,
"step": 487
},
{
"epoch": 1.2323232323232323,
"grad_norm": 1.7715900482316334,
"learning_rate": 3.862828309394469e-06,
"loss": 0.6616,
"step": 488
},
{
"epoch": 1.2348484848484849,
"grad_norm": 2.1728552826181766,
"learning_rate": 3.841355865572009e-06,
"loss": 0.6806,
"step": 489
},
{
"epoch": 1.2373737373737375,
"grad_norm": 2.197194696198873,
"learning_rate": 3.8199059791537105e-06,
"loss": 0.7416,
"step": 490
},
{
"epoch": 1.2398989898989898,
"grad_norm": 1.7576488788486417,
"learning_rate": 3.7984790677429988e-06,
"loss": 0.7056,
"step": 491
},
{
"epoch": 1.2424242424242424,
"grad_norm": 2.00153591162574,
"learning_rate": 3.777075548496001e-06,
"loss": 0.678,
"step": 492
},
{
"epoch": 1.244949494949495,
"grad_norm": 1.6840649250655566,
"learning_rate": 3.755695838113427e-06,
"loss": 0.61,
"step": 493
},
{
"epoch": 1.2474747474747474,
"grad_norm": 1.6993135647865107,
"learning_rate": 3.7343403528324574e-06,
"loss": 0.643,
"step": 494
},
{
"epoch": 1.25,
"grad_norm": 1.9522911881118852,
"learning_rate": 3.713009508418643e-06,
"loss": 0.729,
"step": 495
},
{
"epoch": 1.2525252525252526,
"grad_norm": 1.8582890266190994,
"learning_rate": 3.6917037201577977e-06,
"loss": 0.6329,
"step": 496
},
{
"epoch": 1.255050505050505,
"grad_norm": 2.212756976633153,
"learning_rate": 3.6704234028479296e-06,
"loss": 0.6245,
"step": 497
},
{
"epoch": 1.2575757575757576,
"grad_norm": 2.005966833059071,
"learning_rate": 3.649168970791157e-06,
"loss": 0.6293,
"step": 498
},
{
"epoch": 1.2601010101010102,
"grad_norm": 1.5087645705397317,
"learning_rate": 3.6279408377856445e-06,
"loss": 0.6346,
"step": 499
},
{
"epoch": 1.2626262626262625,
"grad_norm": 2.149662935207517,
"learning_rate": 3.6067394171175397e-06,
"loss": 0.7184,
"step": 500
},
{
"epoch": 1.2626262626262625,
"eval_loss": 0.7672516107559204,
"eval_runtime": 2.0292,
"eval_samples_per_second": 15.769,
"eval_steps_per_second": 3.942,
"step": 500
},
{
"epoch": 1.2651515151515151,
"grad_norm": 1.8951250364648924,
"learning_rate": 3.5855651215529397e-06,
"loss": 0.7794,
"step": 501
},
{
"epoch": 1.2676767676767677,
"grad_norm": 1.6368252921929487,
"learning_rate": 3.564418363329848e-06,
"loss": 0.6315,
"step": 502
},
{
"epoch": 1.2702020202020203,
"grad_norm": 1.813482568571102,
"learning_rate": 3.5432995541501445e-06,
"loss": 0.6947,
"step": 503
},
{
"epoch": 1.2727272727272727,
"grad_norm": 2.3173998961345443,
"learning_rate": 3.5222091051715803e-06,
"loss": 0.7438,
"step": 504
},
{
"epoch": 1.2752525252525253,
"grad_norm": 1.8270637577921853,
"learning_rate": 3.501147426999764e-06,
"loss": 0.6922,
"step": 505
},
{
"epoch": 1.2777777777777777,
"grad_norm": 1.6289384596285112,
"learning_rate": 3.480114929680176e-06,
"loss": 0.6127,
"step": 506
},
{
"epoch": 1.2803030303030303,
"grad_norm": 1.9992734679654756,
"learning_rate": 3.4591120226901724e-06,
"loss": 0.6332,
"step": 507
},
{
"epoch": 1.2828282828282829,
"grad_norm": 1.6361784212565884,
"learning_rate": 3.4381391149310294e-06,
"loss": 0.6304,
"step": 508
},
{
"epoch": 1.2853535353535355,
"grad_norm": 1.7148841762554012,
"learning_rate": 3.417196614719972e-06,
"loss": 0.6726,
"step": 509
},
{
"epoch": 1.2878787878787878,
"grad_norm": 1.8287678394828801,
"learning_rate": 3.3962849297822225e-06,
"loss": 0.6734,
"step": 510
},
{
"epoch": 1.2904040404040404,
"grad_norm": 1.5632710434516484,
"learning_rate": 3.375404467243073e-06,
"loss": 0.6736,
"step": 511
},
{
"epoch": 1.2929292929292928,
"grad_norm": 2.6392219084368986,
"learning_rate": 3.35455563361995e-06,
"loss": 0.8411,
"step": 512
},
{
"epoch": 1.2954545454545454,
"grad_norm": 2.027430629804721,
"learning_rate": 3.333738834814506e-06,
"loss": 0.7028,
"step": 513
},
{
"epoch": 1.297979797979798,
"grad_norm": 1.9296910996214989,
"learning_rate": 3.3129544761047093e-06,
"loss": 0.6179,
"step": 514
},
{
"epoch": 1.3005050505050506,
"grad_norm": 1.7926415760895449,
"learning_rate": 3.292202962136962e-06,
"loss": 0.6375,
"step": 515
},
{
"epoch": 1.303030303030303,
"grad_norm": 1.8438249685756931,
"learning_rate": 3.271484696918218e-06,
"loss": 0.7365,
"step": 516
},
{
"epoch": 1.3055555555555556,
"grad_norm": 1.6905841642540418,
"learning_rate": 3.250800083808121e-06,
"loss": 0.6461,
"step": 517
},
{
"epoch": 1.308080808080808,
"grad_norm": 1.6350096615621776,
"learning_rate": 3.2301495255111426e-06,
"loss": 0.632,
"step": 518
},
{
"epoch": 1.3106060606060606,
"grad_norm": 1.6914285562196705,
"learning_rate": 3.2095334240687524e-06,
"loss": 0.7002,
"step": 519
},
{
"epoch": 1.3131313131313131,
"grad_norm": 1.7717194264063816,
"learning_rate": 3.1889521808515888e-06,
"loss": 0.7291,
"step": 520
},
{
"epoch": 1.3156565656565657,
"grad_norm": 1.8177330071407203,
"learning_rate": 3.168406196551635e-06,
"loss": 0.7159,
"step": 521
},
{
"epoch": 1.3181818181818181,
"grad_norm": 1.8893730024155937,
"learning_rate": 3.1478958711744324e-06,
"loss": 0.6586,
"step": 522
},
{
"epoch": 1.3207070707070707,
"grad_norm": 1.8614328973570116,
"learning_rate": 3.127421604031284e-06,
"loss": 0.7007,
"step": 523
},
{
"epoch": 1.3232323232323233,
"grad_norm": 1.8121478977506655,
"learning_rate": 3.1069837937314846e-06,
"loss": 0.6598,
"step": 524
},
{
"epoch": 1.3257575757575757,
"grad_norm": 1.7164037295955057,
"learning_rate": 3.0865828381745515e-06,
"loss": 0.6357,
"step": 525
},
{
"epoch": 1.3282828282828283,
"grad_norm": 1.8242041926310077,
"learning_rate": 3.0662191345424925e-06,
"loss": 0.6808,
"step": 526
},
{
"epoch": 1.3308080808080809,
"grad_norm": 1.5945895230095066,
"learning_rate": 3.045893079292063e-06,
"loss": 0.59,
"step": 527
},
{
"epoch": 1.3333333333333333,
"grad_norm": 1.7127301788900566,
"learning_rate": 3.0256050681470446e-06,
"loss": 0.6102,
"step": 528
},
{
"epoch": 1.3358585858585859,
"grad_norm": 1.6213847656730418,
"learning_rate": 3.005355496090553e-06,
"loss": 0.5986,
"step": 529
},
{
"epoch": 1.3383838383838385,
"grad_norm": 1.803345468640096,
"learning_rate": 2.9851447573573383e-06,
"loss": 0.6574,
"step": 530
},
{
"epoch": 1.3409090909090908,
"grad_norm": 1.948769116789314,
"learning_rate": 2.964973245426115e-06,
"loss": 0.7144,
"step": 531
},
{
"epoch": 1.3434343434343434,
"grad_norm": 2.1530786770684553,
"learning_rate": 2.9448413530118912e-06,
"loss": 0.7315,
"step": 532
},
{
"epoch": 1.345959595959596,
"grad_norm": 1.951303591556974,
"learning_rate": 2.9247494720583415e-06,
"loss": 0.6916,
"step": 533
},
{
"epoch": 1.3484848484848486,
"grad_norm": 1.5465134830650122,
"learning_rate": 2.904697993730159e-06,
"loss": 0.5869,
"step": 534
},
{
"epoch": 1.351010101010101,
"grad_norm": 2.007369432946237,
"learning_rate": 2.8846873084054478e-06,
"loss": 0.7287,
"step": 535
},
{
"epoch": 1.3535353535353536,
"grad_norm": 2.268634919376844,
"learning_rate": 2.8647178056681197e-06,
"loss": 0.6766,
"step": 536
},
{
"epoch": 1.356060606060606,
"grad_norm": 1.64407065544787,
"learning_rate": 2.8447898743003166e-06,
"loss": 0.5837,
"step": 537
},
{
"epoch": 1.3585858585858586,
"grad_norm": 1.880029236568523,
"learning_rate": 2.8249039022748315e-06,
"loss": 0.7076,
"step": 538
},
{
"epoch": 1.3611111111111112,
"grad_norm": 1.6753610025488719,
"learning_rate": 2.8050602767475595e-06,
"loss": 0.6846,
"step": 539
},
{
"epoch": 1.3636363636363638,
"grad_norm": 1.902000454798254,
"learning_rate": 2.785259384049959e-06,
"loss": 0.6646,
"step": 540
},
{
"epoch": 1.3661616161616161,
"grad_norm": 1.7934078666570006,
"learning_rate": 2.7655016096815395e-06,
"loss": 0.6899,
"step": 541
},
{
"epoch": 1.3686868686868687,
"grad_norm": 1.8057809326009877,
"learning_rate": 2.745787338302341e-06,
"loss": 0.6852,
"step": 542
},
{
"epoch": 1.371212121212121,
"grad_norm": 1.780947262728089,
"learning_rate": 2.726116953725454e-06,
"loss": 0.6935,
"step": 543
},
{
"epoch": 1.3737373737373737,
"grad_norm": 4.3530717407122275,
"learning_rate": 2.706490838909547e-06,
"loss": 0.714,
"step": 544
},
{
"epoch": 1.3762626262626263,
"grad_norm": 1.9411555437226586,
"learning_rate": 2.686909375951413e-06,
"loss": 0.6778,
"step": 545
},
{
"epoch": 1.378787878787879,
"grad_norm": 1.8610555614788364,
"learning_rate": 2.6673729460785174e-06,
"loss": 0.6211,
"step": 546
},
{
"epoch": 1.3813131313131313,
"grad_norm": 2.6184556708787126,
"learning_rate": 2.647881929641598e-06,
"loss": 0.6866,
"step": 547
},
{
"epoch": 1.3838383838383839,
"grad_norm": 1.6281107230784941,
"learning_rate": 2.628436706107238e-06,
"loss": 0.6197,
"step": 548
},
{
"epoch": 1.3863636363636362,
"grad_norm": 1.6595343137304726,
"learning_rate": 2.609037654050497e-06,
"loss": 0.6615,
"step": 549
},
{
"epoch": 1.3888888888888888,
"grad_norm": 1.6673402222678864,
"learning_rate": 2.5896851511475184e-06,
"loss": 0.5901,
"step": 550
},
{
"epoch": 1.3914141414141414,
"grad_norm": 2.302491758651808,
"learning_rate": 2.5703795741682053e-06,
"loss": 0.7658,
"step": 551
},
{
"epoch": 1.393939393939394,
"grad_norm": 1.6296237893429244,
"learning_rate": 2.5511212989688587e-06,
"loss": 0.666,
"step": 552
},
{
"epoch": 1.3964646464646464,
"grad_norm": 2.067659562978043,
"learning_rate": 2.5319107004848752e-06,
"loss": 0.6324,
"step": 553
},
{
"epoch": 1.398989898989899,
"grad_norm": 1.6066377017160183,
"learning_rate": 2.5127481527234397e-06,
"loss": 0.6672,
"step": 554
},
{
"epoch": 1.4015151515151514,
"grad_norm": 1.8400516884572484,
"learning_rate": 2.493634028756255e-06,
"loss": 0.7568,
"step": 555
},
{
"epoch": 1.404040404040404,
"grad_norm": 1.8971795842420967,
"learning_rate": 2.4745687007122636e-06,
"loss": 0.6081,
"step": 556
},
{
"epoch": 1.4065656565656566,
"grad_norm": 2.074344175960706,
"learning_rate": 2.455552539770414e-06,
"loss": 0.6835,
"step": 557
},
{
"epoch": 1.4090909090909092,
"grad_norm": 1.9985985778861037,
"learning_rate": 2.436585916152426e-06,
"loss": 0.7305,
"step": 558
},
{
"epoch": 1.4116161616161615,
"grad_norm": 1.7811956768872474,
"learning_rate": 2.4176691991155966e-06,
"loss": 0.7483,
"step": 559
},
{
"epoch": 1.4141414141414141,
"grad_norm": 1.6287671284457417,
"learning_rate": 2.3988027569455895e-06,
"loss": 0.6604,
"step": 560
},
{
"epoch": 1.4166666666666667,
"grad_norm": 1.5932467520799263,
"learning_rate": 2.379986956949289e-06,
"loss": 0.6696,
"step": 561
},
{
"epoch": 1.4191919191919191,
"grad_norm": 1.6031603536441323,
"learning_rate": 2.361222165447628e-06,
"loss": 0.5734,
"step": 562
},
{
"epoch": 1.4217171717171717,
"grad_norm": 1.8339186169542514,
"learning_rate": 2.3425087477684767e-06,
"loss": 0.6683,
"step": 563
},
{
"epoch": 1.4242424242424243,
"grad_norm": 1.7649111644578288,
"learning_rate": 2.323847068239504e-06,
"loss": 0.6564,
"step": 564
},
{
"epoch": 1.4267676767676767,
"grad_norm": 1.7321766945109096,
"learning_rate": 2.305237490181112e-06,
"loss": 0.6365,
"step": 565
},
{
"epoch": 1.4292929292929293,
"grad_norm": 2.0190477280634074,
"learning_rate": 2.2866803758993446e-06,
"loss": 0.7339,
"step": 566
},
{
"epoch": 1.4318181818181819,
"grad_norm": 1.6794056984497103,
"learning_rate": 2.2681760866788397e-06,
"loss": 0.5837,
"step": 567
},
{
"epoch": 1.4343434343434343,
"grad_norm": 1.7014245507418744,
"learning_rate": 2.2497249827757933e-06,
"loss": 0.661,
"step": 568
},
{
"epoch": 1.4368686868686869,
"grad_norm": 1.8691256550293922,
"learning_rate": 2.231327423410954e-06,
"loss": 0.6835,
"step": 569
},
{
"epoch": 1.4393939393939394,
"grad_norm": 1.6339075207343308,
"learning_rate": 2.2129837667626147e-06,
"loss": 0.5841,
"step": 570
},
{
"epoch": 1.441919191919192,
"grad_norm": 1.5860685183372798,
"learning_rate": 2.1946943699596516e-06,
"loss": 0.605,
"step": 571
},
{
"epoch": 1.4444444444444444,
"grad_norm": 1.8572941946426866,
"learning_rate": 2.176459589074566e-06,
"loss": 0.6841,
"step": 572
},
{
"epoch": 1.446969696969697,
"grad_norm": 1.7861127437673787,
"learning_rate": 2.158279779116555e-06,
"loss": 0.694,
"step": 573
},
{
"epoch": 1.4494949494949494,
"grad_norm": 1.7305571112075897,
"learning_rate": 2.1401552940245962e-06,
"loss": 0.6743,
"step": 574
},
{
"epoch": 1.452020202020202,
"grad_norm": 1.5967045155709891,
"learning_rate": 2.122086486660559e-06,
"loss": 0.6094,
"step": 575
},
{
"epoch": 1.4545454545454546,
"grad_norm": 1.8577075339501332,
"learning_rate": 2.1040737088023323e-06,
"loss": 0.7651,
"step": 576
},
{
"epoch": 1.4570707070707072,
"grad_norm": 1.726189282621056,
"learning_rate": 2.086117311136987e-06,
"loss": 0.6793,
"step": 577
},
{
"epoch": 1.4595959595959596,
"grad_norm": 1.690766865913275,
"learning_rate": 2.068217643253925e-06,
"loss": 0.6881,
"step": 578
},
{
"epoch": 1.4621212121212122,
"grad_norm": 1.624207744774946,
"learning_rate": 2.0503750536381016e-06,
"loss": 0.6569,
"step": 579
},
{
"epoch": 1.4646464646464645,
"grad_norm": 1.7904386347463785,
"learning_rate": 2.0325898896632178e-06,
"loss": 0.6764,
"step": 580
},
{
"epoch": 1.4671717171717171,
"grad_norm": 1.7125150031996876,
"learning_rate": 2.0148624975849755e-06,
"loss": 0.6754,
"step": 581
},
{
"epoch": 1.4696969696969697,
"grad_norm": 1.7187348343360536,
"learning_rate": 1.997193222534316e-06,
"loss": 0.7046,
"step": 582
},
{
"epoch": 1.4722222222222223,
"grad_norm": 1.917931828878889,
"learning_rate": 1.9795824085107217e-06,
"loss": 0.7151,
"step": 583
},
{
"epoch": 1.4747474747474747,
"grad_norm": 1.5192199933440467,
"learning_rate": 1.962030398375506e-06,
"loss": 0.5798,
"step": 584
},
{
"epoch": 1.4772727272727273,
"grad_norm": 2.0512930069007593,
"learning_rate": 1.9445375338451405e-06,
"loss": 0.6894,
"step": 585
},
{
"epoch": 1.4797979797979797,
"grad_norm": 1.8832475332645238,
"learning_rate": 1.927104155484602e-06,
"loss": 0.689,
"step": 586
},
{
"epoch": 1.4823232323232323,
"grad_norm": 1.782529987774134,
"learning_rate": 1.9097306027007495e-06,
"loss": 0.7041,
"step": 587
},
{
"epoch": 1.4848484848484849,
"grad_norm": 1.9555053069883206,
"learning_rate": 1.8924172137357038e-06,
"loss": 0.6622,
"step": 588
},
{
"epoch": 1.4873737373737375,
"grad_norm": 1.7053197030867797,
"learning_rate": 1.8751643256602714e-06,
"loss": 0.6673,
"step": 589
},
{
"epoch": 1.4898989898989898,
"grad_norm": 2.2735630376797835,
"learning_rate": 1.8579722743673773e-06,
"loss": 0.66,
"step": 590
},
{
"epoch": 1.4924242424242424,
"grad_norm": 1.738268204762897,
"learning_rate": 1.840841394565534e-06,
"loss": 0.6586,
"step": 591
},
{
"epoch": 1.494949494949495,
"grad_norm": 1.649342879222012,
"learning_rate": 1.8237720197723075e-06,
"loss": 0.6976,
"step": 592
},
{
"epoch": 1.4974747474747474,
"grad_norm": 1.9338125901586305,
"learning_rate": 1.806764482307848e-06,
"loss": 0.6995,
"step": 593
},
{
"epoch": 1.5,
"grad_norm": 1.5470373216575508,
"learning_rate": 1.789819113288397e-06,
"loss": 0.5431,
"step": 594
},
{
"epoch": 1.5025252525252526,
"grad_norm": 1.9810574766528408,
"learning_rate": 1.772936242619862e-06,
"loss": 0.6299,
"step": 595
},
{
"epoch": 1.5050505050505052,
"grad_norm": 1.8273017497040147,
"learning_rate": 1.75611619899137e-06,
"loss": 0.7285,
"step": 596
},
{
"epoch": 1.5075757575757576,
"grad_norm": 2.2389952418401324,
"learning_rate": 1.7393593098688933e-06,
"loss": 0.6912,
"step": 597
},
{
"epoch": 1.51010101010101,
"grad_norm": 2.1167128184663837,
"learning_rate": 1.7226659014888548e-06,
"loss": 0.5674,
"step": 598
},
{
"epoch": 1.5126262626262625,
"grad_norm": 1.5189372519313946,
"learning_rate": 1.706036298851787e-06,
"loss": 0.6164,
"step": 599
},
{
"epoch": 1.5151515151515151,
"grad_norm": 2.0517897654347412,
"learning_rate": 1.689470825715998e-06,
"loss": 0.7586,
"step": 600
},
{
"epoch": 1.5176767676767677,
"grad_norm": 1.6854349752110218,
"learning_rate": 1.672969804591279e-06,
"loss": 0.6682,
"step": 601
},
{
"epoch": 1.5202020202020203,
"grad_norm": 1.890320205166573,
"learning_rate": 1.6565335567326112e-06,
"loss": 0.7209,
"step": 602
},
{
"epoch": 1.5227272727272727,
"grad_norm": 1.7829138138501177,
"learning_rate": 1.6401624021339218e-06,
"loss": 0.6788,
"step": 603
},
{
"epoch": 1.5252525252525253,
"grad_norm": 1.8864040831252018,
"learning_rate": 1.6238566595218475e-06,
"loss": 0.691,
"step": 604
},
{
"epoch": 1.5277777777777777,
"grad_norm": 1.706813930915375,
"learning_rate": 1.6076166463495384e-06,
"loss": 0.6563,
"step": 605
},
{
"epoch": 1.5303030303030303,
"grad_norm": 1.8669005685652453,
"learning_rate": 1.591442678790467e-06,
"loss": 0.6403,
"step": 606
},
{
"epoch": 1.5328282828282829,
"grad_norm": 1.517294796063533,
"learning_rate": 1.575335071732278e-06,
"loss": 0.5987,
"step": 607
},
{
"epoch": 1.5353535353535355,
"grad_norm": 1.6407289000278769,
"learning_rate": 1.5592941387706562e-06,
"loss": 0.637,
"step": 608
},
{
"epoch": 1.5378787878787878,
"grad_norm": 1.6128958774877917,
"learning_rate": 1.5433201922032266e-06,
"loss": 0.619,
"step": 609
},
{
"epoch": 1.5404040404040404,
"grad_norm": 1.860507402064171,
"learning_rate": 1.5274135430234654e-06,
"loss": 0.6624,
"step": 610
},
{
"epoch": 1.5429292929292928,
"grad_norm": 1.7193759916923494,
"learning_rate": 1.5115745009146521e-06,
"loss": 0.6791,
"step": 611
},
{
"epoch": 1.5454545454545454,
"grad_norm": 1.4718917168427323,
"learning_rate": 1.4958033742438348e-06,
"loss": 0.5304,
"step": 612
},
{
"epoch": 1.547979797979798,
"grad_norm": 1.8013812172501598,
"learning_rate": 1.4801004700558358e-06,
"loss": 0.7007,
"step": 613
},
{
"epoch": 1.5505050505050506,
"grad_norm": 1.6866327135314982,
"learning_rate": 1.4644660940672628e-06,
"loss": 0.7416,
"step": 614
},
{
"epoch": 1.553030303030303,
"grad_norm": 1.51261281151324,
"learning_rate": 1.448900550660564e-06,
"loss": 0.6064,
"step": 615
},
{
"epoch": 1.5555555555555556,
"grad_norm": 1.5622715563882266,
"learning_rate": 1.4334041428781003e-06,
"loss": 0.594,
"step": 616
},
{
"epoch": 1.558080808080808,
"grad_norm": 1.8019263250670579,
"learning_rate": 1.4179771724162428e-06,
"loss": 0.6374,
"step": 617
},
{
"epoch": 1.5606060606060606,
"grad_norm": 1.825688860602252,
"learning_rate": 1.4026199396195078e-06,
"loss": 0.697,
"step": 618
},
{
"epoch": 1.5631313131313131,
"grad_norm": 1.7526908861748036,
"learning_rate": 1.3873327434746975e-06,
"loss": 0.6335,
"step": 619
},
{
"epoch": 1.5656565656565657,
"grad_norm": 1.697579840706317,
"learning_rate": 1.3721158816050872e-06,
"loss": 0.6855,
"step": 620
},
{
"epoch": 1.5681818181818183,
"grad_norm": 1.7940704919312578,
"learning_rate": 1.3569696502646274e-06,
"loss": 0.6542,
"step": 621
},
{
"epoch": 1.5707070707070707,
"grad_norm": 1.7049660777299587,
"learning_rate": 1.3418943443321807e-06,
"loss": 0.611,
"step": 622
},
{
"epoch": 1.573232323232323,
"grad_norm": 1.4831000798463816,
"learning_rate": 1.326890257305774e-06,
"loss": 0.5863,
"step": 623
},
{
"epoch": 1.5757575757575757,
"grad_norm": 1.7387915508538903,
"learning_rate": 1.3119576812968893e-06,
"loss": 0.6183,
"step": 624
},
{
"epoch": 1.5782828282828283,
"grad_norm": 1.6617812103928422,
"learning_rate": 1.2970969070247742e-06,
"loss": 0.6581,
"step": 625
},
{
"epoch": 1.5808080808080809,
"grad_norm": 1.5443549815199022,
"learning_rate": 1.282308223810786e-06,
"loss": 0.5937,
"step": 626
},
{
"epoch": 1.5833333333333335,
"grad_norm": 1.7206885547433117,
"learning_rate": 1.2675919195727537e-06,
"loss": 0.6497,
"step": 627
},
{
"epoch": 1.5858585858585859,
"grad_norm": 1.641246412713275,
"learning_rate": 1.252948280819375e-06,
"loss": 0.6634,
"step": 628
},
{
"epoch": 1.5883838383838382,
"grad_norm": 1.658941492636846,
"learning_rate": 1.2383775926446367e-06,
"loss": 0.589,
"step": 629
},
{
"epoch": 1.5909090909090908,
"grad_norm": 1.5853192673433159,
"learning_rate": 1.2238801387222716e-06,
"loss": 0.6252,
"step": 630
},
{
"epoch": 1.5934343434343434,
"grad_norm": 1.6646772150567526,
"learning_rate": 1.2094562013002237e-06,
"loss": 0.6597,
"step": 631
},
{
"epoch": 1.595959595959596,
"grad_norm": 2.0083218895497685,
"learning_rate": 1.1951060611951615e-06,
"loss": 0.7277,
"step": 632
},
{
"epoch": 1.5984848484848486,
"grad_norm": 2.0275628557504826,
"learning_rate": 1.1808299977870087e-06,
"loss": 0.6469,
"step": 633
},
{
"epoch": 1.601010101010101,
"grad_norm": 1.8159491851063516,
"learning_rate": 1.1666282890135083e-06,
"loss": 0.6836,
"step": 634
},
{
"epoch": 1.6035353535353534,
"grad_norm": 1.4670418065728978,
"learning_rate": 1.1525012113648004e-06,
"loss": 0.6143,
"step": 635
},
{
"epoch": 1.606060606060606,
"grad_norm": 1.6568607880578878,
"learning_rate": 1.1384490398780563e-06,
"loss": 0.6267,
"step": 636
},
{
"epoch": 1.6085858585858586,
"grad_norm": 1.89040441717663,
"learning_rate": 1.1244720481321058e-06,
"loss": 0.7109,
"step": 637
},
{
"epoch": 1.6111111111111112,
"grad_norm": 1.7316246669693305,
"learning_rate": 1.1105705082421303e-06,
"loss": 0.6787,
"step": 638
},
{
"epoch": 1.6136363636363638,
"grad_norm": 1.5718455684033728,
"learning_rate": 1.0967446908543417e-06,
"loss": 0.6459,
"step": 639
},
{
"epoch": 1.6161616161616161,
"grad_norm": 1.6321561282478754,
"learning_rate": 1.0829948651407374e-06,
"loss": 0.6678,
"step": 640
},
{
"epoch": 1.6186868686868687,
"grad_norm": 1.8318481430425226,
"learning_rate": 1.069321298793843e-06,
"loss": 0.6666,
"step": 641
},
{
"epoch": 1.621212121212121,
"grad_norm": 1.6333997342164235,
"learning_rate": 1.0557242580215066e-06,
"loss": 0.6853,
"step": 642
},
{
"epoch": 1.6237373737373737,
"grad_norm": 1.6163309463871554,
"learning_rate": 1.0422040075417157e-06,
"loss": 0.6852,
"step": 643
},
{
"epoch": 1.6262626262626263,
"grad_norm": 1.6322852310416047,
"learning_rate": 1.0287608105774456e-06,
"loss": 0.6145,
"step": 644
},
{
"epoch": 1.628787878787879,
"grad_norm": 1.7668701556839317,
"learning_rate": 1.0153949288515301e-06,
"loss": 0.7112,
"step": 645
},
{
"epoch": 1.6313131313131313,
"grad_norm": 2.0171547072042744,
"learning_rate": 1.002106622581569e-06,
"loss": 0.7314,
"step": 646
},
{
"epoch": 1.6338383838383839,
"grad_norm": 1.64868407848908,
"learning_rate": 9.888961504748613e-07,
"loss": 0.692,
"step": 647
},
{
"epoch": 1.6363636363636362,
"grad_norm": 1.5344002959052583,
"learning_rate": 9.757637697233723e-07,
"loss": 0.638,
"step": 648
},
{
"epoch": 1.6388888888888888,
"grad_norm": 1.664905994309316,
"learning_rate": 9.627097359987153e-07,
"loss": 0.6118,
"step": 649
},
{
"epoch": 1.6414141414141414,
"grad_norm": 2.3728427557483496,
"learning_rate": 9.497343034471896e-07,
"loss": 0.6701,
"step": 650
},
{
"epoch": 1.643939393939394,
"grad_norm": 1.6805849400740638,
"learning_rate": 9.368377246848176e-07,
"loss": 0.6732,
"step": 651
},
{
"epoch": 1.6464646464646466,
"grad_norm": 1.8244295966470996,
"learning_rate": 9.240202507924412e-07,
"loss": 0.6367,
"step": 652
},
{
"epoch": 1.648989898989899,
"grad_norm": 1.7964300030600315,
"learning_rate": 9.112821313108155e-07,
"loss": 0.5869,
"step": 653
},
{
"epoch": 1.6515151515151514,
"grad_norm": 1.5376235904587343,
"learning_rate": 8.986236142357707e-07,
"loss": 0.622,
"step": 654
},
{
"epoch": 1.654040404040404,
"grad_norm": 1.931394367647239,
"learning_rate": 8.860449460133696e-07,
"loss": 0.7682,
"step": 655
},
{
"epoch": 1.6565656565656566,
"grad_norm": 1.7593814077117897,
"learning_rate": 8.735463715351139e-07,
"loss": 0.7115,
"step": 656
},
{
"epoch": 1.6590909090909092,
"grad_norm": 1.8874340722561462,
"learning_rate": 8.611281341331768e-07,
"loss": 0.7355,
"step": 657
},
{
"epoch": 1.6616161616161618,
"grad_norm": 1.851442452550926,
"learning_rate": 8.487904755756676e-07,
"loss": 0.6798,
"step": 658
},
{
"epoch": 1.6641414141414141,
"grad_norm": 1.8201116026297284,
"learning_rate": 8.365336360619214e-07,
"loss": 0.6086,
"step": 659
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.5891962208788368,
"learning_rate": 8.243578542178227e-07,
"loss": 0.5905,
"step": 660
},
{
"epoch": 1.6691919191919191,
"grad_norm": 1.9794096135920143,
"learning_rate": 8.122633670911617e-07,
"loss": 0.6325,
"step": 661
},
{
"epoch": 1.6717171717171717,
"grad_norm": 1.678006921233951,
"learning_rate": 8.002504101470204e-07,
"loss": 0.6384,
"step": 662
},
{
"epoch": 1.6742424242424243,
"grad_norm": 1.471103490165062,
"learning_rate": 7.883192172631837e-07,
"loss": 0.6832,
"step": 663
},
{
"epoch": 1.676767676767677,
"grad_norm": 1.8904234330788816,
"learning_rate": 7.764700207255904e-07,
"loss": 0.6889,
"step": 664
},
{
"epoch": 1.6792929292929293,
"grad_norm": 1.6089769360522201,
"learning_rate": 7.647030512238074e-07,
"loss": 0.6078,
"step": 665
},
{
"epoch": 1.6818181818181817,
"grad_norm": 1.6538394654953832,
"learning_rate": 7.530185378465459e-07,
"loss": 0.6231,
"step": 666
},
{
"epoch": 1.6843434343434343,
"grad_norm": 1.7735177025840247,
"learning_rate": 7.414167080771867e-07,
"loss": 0.7194,
"step": 667
},
{
"epoch": 1.6868686868686869,
"grad_norm": 1.776911043707877,
"learning_rate": 7.298977877893688e-07,
"loss": 0.7593,
"step": 668
},
{
"epoch": 1.6893939393939394,
"grad_norm": 1.8809283081489958,
"learning_rate": 7.184620012425781e-07,
"loss": 0.6949,
"step": 669
},
{
"epoch": 1.691919191919192,
"grad_norm": 1.691369175357559,
"learning_rate": 7.071095710777925e-07,
"loss": 0.6971,
"step": 670
},
{
"epoch": 1.6944444444444444,
"grad_norm": 1.7467042677875488,
"learning_rate": 6.958407183131339e-07,
"loss": 0.6266,
"step": 671
},
{
"epoch": 1.696969696969697,
"grad_norm": 1.7461977137758284,
"learning_rate": 6.846556623395795e-07,
"loss": 0.6396,
"step": 672
},
{
"epoch": 1.6994949494949494,
"grad_norm": 1.8871606618510632,
"learning_rate": 6.735546209166822e-07,
"loss": 0.6317,
"step": 673
},
{
"epoch": 1.702020202020202,
"grad_norm": 1.805799206249301,
"learning_rate": 6.625378101683317e-07,
"loss": 0.6691,
"step": 674
},
{
"epoch": 1.7045454545454546,
"grad_norm": 1.833839380971258,
"learning_rate": 6.516054445785469e-07,
"loss": 0.7439,
"step": 675
},
{
"epoch": 1.7070707070707072,
"grad_norm": 1.9307619774472617,
"learning_rate": 6.40757736987307e-07,
"loss": 0.687,
"step": 676
},
{
"epoch": 1.7095959595959596,
"grad_norm": 1.8723106541411934,
"learning_rate": 6.299948985863963e-07,
"loss": 0.715,
"step": 677
},
{
"epoch": 1.7121212121212122,
"grad_norm": 2.121689633959957,
"learning_rate": 6.193171389152996e-07,
"loss": 0.701,
"step": 678
},
{
"epoch": 1.7146464646464645,
"grad_norm": 1.8335347326692133,
"learning_rate": 6.087246658571222e-07,
"loss": 0.6758,
"step": 679
},
{
"epoch": 1.7171717171717171,
"grad_norm": 1.7572632332503981,
"learning_rate": 5.982176856345445e-07,
"loss": 0.6283,
"step": 680
},
{
"epoch": 1.7196969696969697,
"grad_norm": 1.568503910441181,
"learning_rate": 5.877964028057976e-07,
"loss": 0.6363,
"step": 681
},
{
"epoch": 1.7222222222222223,
"grad_norm": 1.7328553043839632,
"learning_rate": 5.774610202606939e-07,
"loss": 0.6757,
"step": 682
},
{
"epoch": 1.7247474747474747,
"grad_norm": 1.8391143201108455,
"learning_rate": 5.672117392166688e-07,
"loss": 0.6749,
"step": 683
},
{
"epoch": 1.7272727272727273,
"grad_norm": 1.7170748574553403,
"learning_rate": 5.570487592148666e-07,
"loss": 0.7472,
"step": 684
},
{
"epoch": 1.7297979797979797,
"grad_norm": 1.5212264919185174,
"learning_rate": 5.469722781162495e-07,
"loss": 0.6439,
"step": 685
},
{
"epoch": 1.7323232323232323,
"grad_norm": 1.5392364391539088,
"learning_rate": 5.369824920977567e-07,
"loss": 0.647,
"step": 686
},
{
"epoch": 1.7348484848484849,
"grad_norm": 1.5922121294405573,
"learning_rate": 5.270795956484753e-07,
"loss": 0.6434,
"step": 687
},
{
"epoch": 1.7373737373737375,
"grad_norm": 1.8686546612153003,
"learning_rate": 5.172637815658583e-07,
"loss": 0.6517,
"step": 688
},
{
"epoch": 1.73989898989899,
"grad_norm": 1.8287474874246104,
"learning_rate": 5.075352409519679e-07,
"loss": 0.6253,
"step": 689
},
{
"epoch": 1.7424242424242424,
"grad_norm": 1.5360837393086406,
"learning_rate": 4.978941632097612e-07,
"loss": 0.7039,
"step": 690
},
{
"epoch": 1.7449494949494948,
"grad_norm": 1.587459125503549,
"learning_rate": 4.883407360393944e-07,
"loss": 0.6075,
"step": 691
},
{
"epoch": 1.7474747474747474,
"grad_norm": 1.6286942331459477,
"learning_rate": 4.788751454345763e-07,
"loss": 0.6148,
"step": 692
},
{
"epoch": 1.75,
"grad_norm": 1.5365048261320746,
"learning_rate": 4.6949757567893937e-07,
"loss": 0.6136,
"step": 693
},
{
"epoch": 1.7525252525252526,
"grad_norm": 1.626817667745233,
"learning_rate": 4.60208209342462e-07,
"loss": 0.6388,
"step": 694
},
{
"epoch": 1.7550505050505052,
"grad_norm": 1.680062811715941,
"learning_rate": 4.5100722727790427e-07,
"loss": 0.6834,
"step": 695
},
{
"epoch": 1.7575757575757576,
"grad_norm": 1.7225271859341769,
"learning_rate": 4.4189480861729137e-07,
"loss": 0.6509,
"step": 696
},
{
"epoch": 1.76010101010101,
"grad_norm": 1.4620723106872977,
"learning_rate": 4.3287113076842634e-07,
"loss": 0.6488,
"step": 697
},
{
"epoch": 1.7626262626262625,
"grad_norm": 2.617171730417706,
"learning_rate": 4.239363694114368e-07,
"loss": 0.7771,
"step": 698
},
{
"epoch": 1.7651515151515151,
"grad_norm": 1.6752015204347148,
"learning_rate": 4.150906984953529e-07,
"loss": 0.615,
"step": 699
},
{
"epoch": 1.7676767676767677,
"grad_norm": 1.6616100845650428,
"learning_rate": 4.0633429023472004e-07,
"loss": 0.6133,
"step": 700
},
{
"epoch": 1.7702020202020203,
"grad_norm": 1.6603423449473003,
"learning_rate": 3.9766731510624714e-07,
"loss": 0.7097,
"step": 701
},
{
"epoch": 1.7727272727272727,
"grad_norm": 1.5118570870316996,
"learning_rate": 3.890899418454913e-07,
"loss": 0.6034,
"step": 702
},
{
"epoch": 1.7752525252525253,
"grad_norm": 1.7094243504648734,
"learning_rate": 3.8060233744356634e-07,
"loss": 0.5538,
"step": 703
},
{
"epoch": 1.7777777777777777,
"grad_norm": 1.5311870307016724,
"learning_rate": 3.72204667143895e-07,
"loss": 0.6701,
"step": 704
},
{
"epoch": 1.7803030303030303,
"grad_norm": 1.7713843721868494,
"learning_rate": 3.6389709443899136e-07,
"loss": 0.6517,
"step": 705
},
{
"epoch": 1.7828282828282829,
"grad_norm": 1.589395718472918,
"learning_rate": 3.556797810672785e-07,
"loss": 0.6638,
"step": 706
},
{
"epoch": 1.7853535353535355,
"grad_norm": 1.6229737802024795,
"learning_rate": 3.4755288700993983e-07,
"loss": 0.6783,
"step": 707
},
{
"epoch": 1.7878787878787878,
"grad_norm": 1.8341157891180788,
"learning_rate": 3.395165704878023e-07,
"loss": 0.7036,
"step": 708
},
{
"epoch": 1.7904040404040404,
"grad_norm": 2.0607351593597287,
"learning_rate": 3.3157098795825846e-07,
"loss": 0.7358,
"step": 709
},
{
"epoch": 1.7929292929292928,
"grad_norm": 1.550890845191415,
"learning_rate": 3.237162941122185e-07,
"loss": 0.6055,
"step": 710
},
{
"epoch": 1.7954545454545454,
"grad_norm": 2.164619601436228,
"learning_rate": 3.1595264187110067e-07,
"loss": 0.7836,
"step": 711
},
{
"epoch": 1.797979797979798,
"grad_norm": 1.7723285053604223,
"learning_rate": 3.082801823838527e-07,
"loss": 0.6534,
"step": 712
},
{
"epoch": 1.8005050505050506,
"grad_norm": 1.830934213111903,
"learning_rate": 3.006990650240088e-07,
"loss": 0.7375,
"step": 713
},
{
"epoch": 1.803030303030303,
"grad_norm": 1.6488879997206332,
"learning_rate": 2.932094373867811e-07,
"loss": 0.6124,
"step": 714
},
{
"epoch": 1.8055555555555556,
"grad_norm": 1.8236978463332314,
"learning_rate": 2.858114452861904e-07,
"loss": 0.6679,
"step": 715
},
{
"epoch": 1.808080808080808,
"grad_norm": 2.562080433701544,
"learning_rate": 2.785052327522214e-07,
"loss": 0.5712,
"step": 716
},
{
"epoch": 1.8106060606060606,
"grad_norm": 1.7427268876983995,
"learning_rate": 2.712909420280219e-07,
"loss": 0.6602,
"step": 717
},
{
"epoch": 1.8131313131313131,
"grad_norm": 1.7035981137137926,
"learning_rate": 2.6416871356713224e-07,
"loss": 0.6651,
"step": 718
},
{
"epoch": 1.8156565656565657,
"grad_norm": 2.3967631004055243,
"learning_rate": 2.5713868603075453e-07,
"loss": 0.7528,
"step": 719
},
{
"epoch": 1.8181818181818183,
"grad_norm": 1.6393322597045499,
"learning_rate": 2.5020099628504603e-07,
"loss": 0.6272,
"step": 720
},
{
"epoch": 1.8207070707070707,
"grad_norm": 2.4840414676188134,
"learning_rate": 2.433557793984609e-07,
"loss": 0.8089,
"step": 721
},
{
"epoch": 1.823232323232323,
"grad_norm": 2.8094025014663937,
"learning_rate": 2.3660316863911682e-07,
"loss": 0.6738,
"step": 722
},
{
"epoch": 1.8257575757575757,
"grad_norm": 1.727211405487572,
"learning_rate": 2.2994329547220474e-07,
"loss": 0.6053,
"step": 723
},
{
"epoch": 1.8282828282828283,
"grad_norm": 1.6360597190266526,
"learning_rate": 2.2337628955742263e-07,
"loss": 0.6629,
"step": 724
},
{
"epoch": 1.8308080808080809,
"grad_norm": 1.5931640078580842,
"learning_rate": 2.1690227874645752e-07,
"loss": 0.6324,
"step": 725
},
{
"epoch": 1.8333333333333335,
"grad_norm": 1.7470522314432502,
"learning_rate": 2.1052138908049303e-07,
"loss": 0.6707,
"step": 726
},
{
"epoch": 1.8358585858585859,
"grad_norm": 2.009939600818105,
"learning_rate": 2.0423374478775848e-07,
"loss": 0.6979,
"step": 727
},
{
"epoch": 1.8383838383838382,
"grad_norm": 1.494758075687815,
"learning_rate": 1.9803946828110376e-07,
"loss": 0.5562,
"step": 728
},
{
"epoch": 1.8409090909090908,
"grad_norm": 1.6535534753312173,
"learning_rate": 1.9193868015562445e-07,
"loss": 0.699,
"step": 729
},
{
"epoch": 1.8434343434343434,
"grad_norm": 1.6377714391913847,
"learning_rate": 1.8593149918630927e-07,
"loss": 0.6825,
"step": 730
},
{
"epoch": 1.845959595959596,
"grad_norm": 1.5002198827012776,
"learning_rate": 1.8001804232572695e-07,
"loss": 0.6078,
"step": 731
},
{
"epoch": 1.8484848484848486,
"grad_norm": 2.0157271352901454,
"learning_rate": 1.7419842470175196e-07,
"loss": 0.7527,
"step": 732
},
{
"epoch": 1.851010101010101,
"grad_norm": 1.8478124155934152,
"learning_rate": 1.6847275961532296e-07,
"loss": 0.637,
"step": 733
},
{
"epoch": 1.8535353535353534,
"grad_norm": 1.7833387298929877,
"learning_rate": 1.6284115853823445e-07,
"loss": 0.6452,
"step": 734
},
{
"epoch": 1.856060606060606,
"grad_norm": 1.7242464414571361,
"learning_rate": 1.573037311109682e-07,
"loss": 0.6787,
"step": 735
},
{
"epoch": 1.8585858585858586,
"grad_norm": 1.584303803719777,
"learning_rate": 1.5186058514055912e-07,
"loss": 0.6399,
"step": 736
},
{
"epoch": 1.8611111111111112,
"grad_norm": 1.867844258831577,
"learning_rate": 1.465118265984966e-07,
"loss": 0.7207,
"step": 737
},
{
"epoch": 1.8636363636363638,
"grad_norm": 1.7006874043721387,
"learning_rate": 1.4125755961865827e-07,
"loss": 0.6939,
"step": 738
},
{
"epoch": 1.8661616161616161,
"grad_norm": 1.5545410765896368,
"learning_rate": 1.3609788649528776e-07,
"loss": 0.5153,
"step": 739
},
{
"epoch": 1.8686868686868687,
"grad_norm": 2.016874173855951,
"learning_rate": 1.3103290768099796e-07,
"loss": 0.6773,
"step": 740
},
{
"epoch": 1.871212121212121,
"grad_norm": 1.8026270892247167,
"learning_rate": 1.2606272178482036e-07,
"loss": 0.7393,
"step": 741
},
{
"epoch": 1.8737373737373737,
"grad_norm": 1.6278032330639467,
"learning_rate": 1.2118742557027885e-07,
"loss": 0.6084,
"step": 742
},
{
"epoch": 1.8762626262626263,
"grad_norm": 1.9865167332691045,
"learning_rate": 1.1640711395351446e-07,
"loss": 0.6447,
"step": 743
},
{
"epoch": 1.878787878787879,
"grad_norm": 1.3052243447855199,
"learning_rate": 1.1172188000142803e-07,
"loss": 0.5455,
"step": 744
},
{
"epoch": 1.8813131313131313,
"grad_norm": 1.4543324434489775,
"learning_rate": 1.0713181492987667e-07,
"loss": 0.5873,
"step": 745
},
{
"epoch": 1.8838383838383839,
"grad_norm": 1.538979209208704,
"learning_rate": 1.026370081018907e-07,
"loss": 0.6703,
"step": 746
},
{
"epoch": 1.8863636363636362,
"grad_norm": 1.9586388936279335,
"learning_rate": 9.823754702594058e-08,
"loss": 0.6137,
"step": 747
},
{
"epoch": 1.8888888888888888,
"grad_norm": 1.4629597481835024,
"learning_rate": 9.393351735422773e-08,
"loss": 0.5984,
"step": 748
},
{
"epoch": 1.8914141414141414,
"grad_norm": 1.728888439406922,
"learning_rate": 8.972500288101981e-08,
"loss": 0.643,
"step": 749
},
{
"epoch": 1.893939393939394,
"grad_norm": 2.076289946741432,
"learning_rate": 8.561208554101863e-08,
"loss": 0.6686,
"step": 750
},
{
"epoch": 1.8964646464646466,
"grad_norm": 1.476138042132342,
"learning_rate": 8.159484540776642e-08,
"loss": 0.5421,
"step": 751
},
{
"epoch": 1.898989898989899,
"grad_norm": 1.9205287463572696,
"learning_rate": 7.76733606920832e-08,
"loss": 0.7926,
"step": 752
},
{
"epoch": 1.9015151515151514,
"grad_norm": 1.8996412273203316,
"learning_rate": 7.384770774054861e-08,
"loss": 0.6625,
"step": 753
},
{
"epoch": 1.904040404040404,
"grad_norm": 1.393367488809864,
"learning_rate": 7.011796103401192e-08,
"loss": 0.5729,
"step": 754
},
{
"epoch": 1.9065656565656566,
"grad_norm": 1.6892968343582289,
"learning_rate": 6.648419318614485e-08,
"loss": 0.6934,
"step": 755
},
{
"epoch": 1.9090909090909092,
"grad_norm": 1.609424760586097,
"learning_rate": 6.294647494202444e-08,
"loss": 0.6779,
"step": 756
},
{
"epoch": 1.9116161616161618,
"grad_norm": 1.9571740198025411,
"learning_rate": 5.950487517675962e-08,
"loss": 0.772,
"step": 757
},
{
"epoch": 1.9141414141414141,
"grad_norm": 1.642699644259931,
"learning_rate": 5.615946089414737e-08,
"loss": 0.6663,
"step": 758
},
{
"epoch": 1.9166666666666665,
"grad_norm": 1.8319605271279282,
"learning_rate": 5.291029722537144e-08,
"loss": 0.6907,
"step": 759
},
{
"epoch": 1.9191919191919191,
"grad_norm": 1.7632674284572274,
"learning_rate": 4.975744742772848e-08,
"loss": 0.6367,
"step": 760
},
{
"epoch": 1.9217171717171717,
"grad_norm": 1.723855363998726,
"learning_rate": 4.670097288340281e-08,
"loss": 0.6615,
"step": 761
},
{
"epoch": 1.9242424242424243,
"grad_norm": 1.9109994208491565,
"learning_rate": 4.37409330982691e-08,
"loss": 0.6901,
"step": 762
},
{
"epoch": 1.926767676767677,
"grad_norm": 1.9009399329930168,
"learning_rate": 4.087738570073163e-08,
"loss": 0.7073,
"step": 763
},
{
"epoch": 1.9292929292929293,
"grad_norm": 1.6854909639741686,
"learning_rate": 3.8110386440605164e-08,
"loss": 0.693,
"step": 764
},
{
"epoch": 1.9318181818181817,
"grad_norm": 1.5492157178346968,
"learning_rate": 3.5439989188029156e-08,
"loss": 0.6209,
"step": 765
},
{
"epoch": 1.9343434343434343,
"grad_norm": 1.7666030301094504,
"learning_rate": 3.2866245932418606e-08,
"loss": 0.7276,
"step": 766
},
{
"epoch": 1.9368686868686869,
"grad_norm": 1.716501143505071,
"learning_rate": 3.038920678145041e-08,
"loss": 0.6594,
"step": 767
},
{
"epoch": 1.9393939393939394,
"grad_norm": 1.7227648578440493,
"learning_rate": 2.8008919960090253e-08,
"loss": 0.6714,
"step": 768
},
{
"epoch": 1.941919191919192,
"grad_norm": 1.5873000115447409,
"learning_rate": 2.5725431809652257e-08,
"loss": 0.6212,
"step": 769
},
{
"epoch": 1.9444444444444444,
"grad_norm": 1.6050904435827118,
"learning_rate": 2.3538786786896918e-08,
"loss": 0.6106,
"step": 770
},
{
"epoch": 1.946969696969697,
"grad_norm": 1.9109643583523885,
"learning_rate": 2.144902746316624e-08,
"loss": 0.6878,
"step": 771
},
{
"epoch": 1.9494949494949494,
"grad_norm": 1.7031845535623353,
"learning_rate": 1.9456194523554404e-08,
"loss": 0.6611,
"step": 772
},
{
"epoch": 1.952020202020202,
"grad_norm": 1.490726633174169,
"learning_rate": 1.756032676611674e-08,
"loss": 0.5565,
"step": 773
},
{
"epoch": 1.9545454545454546,
"grad_norm": 1.6161673943575936,
"learning_rate": 1.576146110111032e-08,
"loss": 0.6317,
"step": 774
},
{
"epoch": 1.9570707070707072,
"grad_norm": 1.7333272031946578,
"learning_rate": 1.4059632550281754e-08,
"loss": 0.6815,
"step": 775
},
{
"epoch": 1.9595959595959596,
"grad_norm": 1.8769225687363733,
"learning_rate": 1.2454874246181081e-08,
"loss": 0.7319,
"step": 776
},
{
"epoch": 1.9621212121212122,
"grad_norm": 2.10268374837673,
"learning_rate": 1.0947217431516721e-08,
"loss": 0.6993,
"step": 777
},
{
"epoch": 1.9646464646464645,
"grad_norm": 1.4837652016193354,
"learning_rate": 9.536691458548741e-09,
"loss": 0.5972,
"step": 778
},
{
"epoch": 1.9671717171717171,
"grad_norm": 1.921818751086187,
"learning_rate": 8.223323788517645e-09,
"loss": 0.6495,
"step": 779
},
{
"epoch": 1.9696969696969697,
"grad_norm": 1.7947445790078773,
"learning_rate": 7.007139991108136e-09,
"loss": 0.672,
"step": 780
},
{
"epoch": 1.9722222222222223,
"grad_norm": 1.593596211954236,
"learning_rate": 5.888163743951736e-09,
"loss": 0.6506,
"step": 781
},
{
"epoch": 1.9747474747474747,
"grad_norm": 1.55674251700751,
"learning_rate": 4.866416832167153e-09,
"loss": 0.6867,
"step": 782
},
{
"epoch": 1.9772727272727273,
"grad_norm": 1.8118989097227192,
"learning_rate": 3.941919147934514e-09,
"loss": 0.7436,
"step": 783
},
{
"epoch": 1.9797979797979797,
"grad_norm": 1.8881279353561946,
"learning_rate": 3.1146886901090024e-09,
"loss": 0.6568,
"step": 784
},
{
"epoch": 1.9823232323232323,
"grad_norm": 1.7615143888597569,
"learning_rate": 2.38474156387003e-09,
"loss": 0.6423,
"step": 785
},
{
"epoch": 1.9848484848484849,
"grad_norm": 1.650419083880146,
"learning_rate": 1.7520919804075997e-09,
"loss": 0.6436,
"step": 786
},
{
"epoch": 1.9873737373737375,
"grad_norm": 2.214382209906457,
"learning_rate": 1.216752256646969e-09,
"loss": 0.75,
"step": 787
},
{
"epoch": 1.98989898989899,
"grad_norm": 1.5516208861299512,
"learning_rate": 7.787328150071771e-10,
"loss": 0.5567,
"step": 788
},
{
"epoch": 1.9924242424242424,
"grad_norm": 1.8285321320858845,
"learning_rate": 4.3804218319731804e-10,
"loss": 0.7586,
"step": 789
},
{
"epoch": 1.9949494949494948,
"grad_norm": 1.7954800825290096,
"learning_rate": 1.9468699405444936e-10,
"loss": 0.6822,
"step": 790
},
{
"epoch": 1.9974747474747474,
"grad_norm": 1.7726696588579232,
"learning_rate": 4.867198540980944e-11,
"loss": 0.5786,
"step": 791
},
{
"epoch": 2.0,
"grad_norm": 1.6526377626407076,
"learning_rate": 0.0,
"loss": 0.6341,
"step": 792
},
{
"epoch": 2.0,
"step": 792,
"total_flos": 29590009675776.0,
"train_loss": 0.7852752542104384,
"train_runtime": 1619.8348,
"train_samples_per_second": 3.907,
"train_steps_per_second": 0.489
}
],
"logging_steps": 1,
"max_steps": 792,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 29590009675776.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}