1JV43 / checkpoint-2000 /trainer_state.json
gotzmann's picture
..
24e278d
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9719353662981411,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0004859676831490706,
"grad_norm": 2.4448609352111816,
"learning_rate": 8e-05,
"loss": 1.9202,
"step": 1
},
{
"epoch": 0.0009719353662981412,
"grad_norm": 13.270967483520508,
"learning_rate": 8e-05,
"loss": 2.279,
"step": 2
},
{
"epoch": 0.0014579030494472117,
"grad_norm": 3.2046079635620117,
"learning_rate": 8e-05,
"loss": 2.0192,
"step": 3
},
{
"epoch": 0.0019438707325962824,
"grad_norm": 2.4486401081085205,
"learning_rate": 8e-05,
"loss": 1.9482,
"step": 4
},
{
"epoch": 0.0024298384157453528,
"grad_norm": 1.5665167570114136,
"learning_rate": 8e-05,
"loss": 1.802,
"step": 5
},
{
"epoch": 0.0029158060988944234,
"grad_norm": 1.5493930578231812,
"learning_rate": 8e-05,
"loss": 1.9056,
"step": 6
},
{
"epoch": 0.003401773782043494,
"grad_norm": 1.8967368602752686,
"learning_rate": 8e-05,
"loss": 1.8006,
"step": 7
},
{
"epoch": 0.0038877414651925647,
"grad_norm": 1.4563767910003662,
"learning_rate": 8e-05,
"loss": 1.8623,
"step": 8
},
{
"epoch": 0.004373709148341635,
"grad_norm": 1.0435065031051636,
"learning_rate": 8e-05,
"loss": 1.8765,
"step": 9
},
{
"epoch": 0.0048596768314907056,
"grad_norm": 1.1731218099594116,
"learning_rate": 8e-05,
"loss": 1.8238,
"step": 10
},
{
"epoch": 0.005345644514639776,
"grad_norm": 0.9924207329750061,
"learning_rate": 8e-05,
"loss": 1.8125,
"step": 11
},
{
"epoch": 0.005831612197788847,
"grad_norm": 1.0741186141967773,
"learning_rate": 8e-05,
"loss": 1.8727,
"step": 12
},
{
"epoch": 0.0063175798809379175,
"grad_norm": 1.0654770135879517,
"learning_rate": 8e-05,
"loss": 1.9239,
"step": 13
},
{
"epoch": 0.006803547564086988,
"grad_norm": 1.0058425664901733,
"learning_rate": 8e-05,
"loss": 1.7879,
"step": 14
},
{
"epoch": 0.007289515247236059,
"grad_norm": 1.0155013799667358,
"learning_rate": 8e-05,
"loss": 1.8111,
"step": 15
},
{
"epoch": 0.007775482930385129,
"grad_norm": 1.0371928215026855,
"learning_rate": 8e-05,
"loss": 1.8491,
"step": 16
},
{
"epoch": 0.0082614506135342,
"grad_norm": 0.9706669449806213,
"learning_rate": 8e-05,
"loss": 1.9443,
"step": 17
},
{
"epoch": 0.00874741829668327,
"grad_norm": 0.9941697716712952,
"learning_rate": 8e-05,
"loss": 1.8283,
"step": 18
},
{
"epoch": 0.00923338597983234,
"grad_norm": 0.9171883463859558,
"learning_rate": 8e-05,
"loss": 1.8007,
"step": 19
},
{
"epoch": 0.009719353662981411,
"grad_norm": 0.9293717741966248,
"learning_rate": 8e-05,
"loss": 1.9136,
"step": 20
},
{
"epoch": 0.010205321346130482,
"grad_norm": 0.9232255220413208,
"learning_rate": 8e-05,
"loss": 1.9064,
"step": 21
},
{
"epoch": 0.010691289029279552,
"grad_norm": 1.367170810699463,
"learning_rate": 8e-05,
"loss": 1.7385,
"step": 22
},
{
"epoch": 0.011177256712428623,
"grad_norm": 0.9535369873046875,
"learning_rate": 8e-05,
"loss": 1.9121,
"step": 23
},
{
"epoch": 0.011663224395577694,
"grad_norm": 0.9003153443336487,
"learning_rate": 8e-05,
"loss": 1.906,
"step": 24
},
{
"epoch": 0.012149192078726764,
"grad_norm": 0.874940812587738,
"learning_rate": 8e-05,
"loss": 1.8489,
"step": 25
},
{
"epoch": 0.012635159761875835,
"grad_norm": 0.8550360202789307,
"learning_rate": 8e-05,
"loss": 1.7969,
"step": 26
},
{
"epoch": 0.013121127445024906,
"grad_norm": 0.8408151268959045,
"learning_rate": 8e-05,
"loss": 1.8315,
"step": 27
},
{
"epoch": 0.013607095128173976,
"grad_norm": 0.8486356735229492,
"learning_rate": 8e-05,
"loss": 1.8715,
"step": 28
},
{
"epoch": 0.014093062811323047,
"grad_norm": 0.8953609466552734,
"learning_rate": 8e-05,
"loss": 1.9213,
"step": 29
},
{
"epoch": 0.014579030494472118,
"grad_norm": 0.8564465045928955,
"learning_rate": 8e-05,
"loss": 1.7525,
"step": 30
},
{
"epoch": 0.015064998177621188,
"grad_norm": 0.8466960787773132,
"learning_rate": 8e-05,
"loss": 1.8447,
"step": 31
},
{
"epoch": 0.015550965860770259,
"grad_norm": 0.8281689286231995,
"learning_rate": 8e-05,
"loss": 1.7945,
"step": 32
},
{
"epoch": 0.01603693354391933,
"grad_norm": 0.8964352011680603,
"learning_rate": 8e-05,
"loss": 1.7437,
"step": 33
},
{
"epoch": 0.0165229012270684,
"grad_norm": 0.8936882019042969,
"learning_rate": 8e-05,
"loss": 1.8412,
"step": 34
},
{
"epoch": 0.01700886891021747,
"grad_norm": 0.9289807677268982,
"learning_rate": 8e-05,
"loss": 1.7929,
"step": 35
},
{
"epoch": 0.01749483659336654,
"grad_norm": 0.8268332481384277,
"learning_rate": 8e-05,
"loss": 1.7912,
"step": 36
},
{
"epoch": 0.017980804276515612,
"grad_norm": 0.8690956234931946,
"learning_rate": 8e-05,
"loss": 1.9121,
"step": 37
},
{
"epoch": 0.01846677195966468,
"grad_norm": 0.8100050091743469,
"learning_rate": 8e-05,
"loss": 1.8061,
"step": 38
},
{
"epoch": 0.018952739642813753,
"grad_norm": 0.8319576382637024,
"learning_rate": 8e-05,
"loss": 1.726,
"step": 39
},
{
"epoch": 0.019438707325962822,
"grad_norm": 0.8179341554641724,
"learning_rate": 8e-05,
"loss": 1.7377,
"step": 40
},
{
"epoch": 0.019924675009111895,
"grad_norm": 0.8315586447715759,
"learning_rate": 8e-05,
"loss": 1.8591,
"step": 41
},
{
"epoch": 0.020410642692260964,
"grad_norm": 0.8326808214187622,
"learning_rate": 8e-05,
"loss": 1.854,
"step": 42
},
{
"epoch": 0.020896610375410036,
"grad_norm": 0.8463872671127319,
"learning_rate": 8e-05,
"loss": 1.7272,
"step": 43
},
{
"epoch": 0.021382578058559105,
"grad_norm": 0.8183313012123108,
"learning_rate": 8e-05,
"loss": 1.7174,
"step": 44
},
{
"epoch": 0.021868545741708177,
"grad_norm": 0.8056971430778503,
"learning_rate": 8e-05,
"loss": 1.7288,
"step": 45
},
{
"epoch": 0.022354513424857246,
"grad_norm": 0.8192340135574341,
"learning_rate": 8e-05,
"loss": 1.8293,
"step": 46
},
{
"epoch": 0.02284048110800632,
"grad_norm": 0.8588706851005554,
"learning_rate": 8e-05,
"loss": 1.8596,
"step": 47
},
{
"epoch": 0.023326448791155387,
"grad_norm": 0.7989048361778259,
"learning_rate": 8e-05,
"loss": 1.7926,
"step": 48
},
{
"epoch": 0.02381241647430446,
"grad_norm": 0.8248831033706665,
"learning_rate": 8e-05,
"loss": 1.837,
"step": 49
},
{
"epoch": 0.02429838415745353,
"grad_norm": 0.8250545859336853,
"learning_rate": 8e-05,
"loss": 1.7824,
"step": 50
},
{
"epoch": 0.0247843518406026,
"grad_norm": 0.8194087743759155,
"learning_rate": 8e-05,
"loss": 1.745,
"step": 51
},
{
"epoch": 0.02527031952375167,
"grad_norm": 0.8470503091812134,
"learning_rate": 8e-05,
"loss": 1.9178,
"step": 52
},
{
"epoch": 0.025756287206900742,
"grad_norm": 0.899069607257843,
"learning_rate": 8e-05,
"loss": 1.9354,
"step": 53
},
{
"epoch": 0.02624225489004981,
"grad_norm": 0.761150598526001,
"learning_rate": 8e-05,
"loss": 1.751,
"step": 54
},
{
"epoch": 0.026728222573198884,
"grad_norm": 0.806092381477356,
"learning_rate": 8e-05,
"loss": 1.8163,
"step": 55
},
{
"epoch": 0.027214190256347952,
"grad_norm": 0.8461784720420837,
"learning_rate": 8e-05,
"loss": 1.8842,
"step": 56
},
{
"epoch": 0.027700157939497025,
"grad_norm": 0.7875487804412842,
"learning_rate": 8e-05,
"loss": 1.819,
"step": 57
},
{
"epoch": 0.028186125622646094,
"grad_norm": 0.786284863948822,
"learning_rate": 8e-05,
"loss": 1.6985,
"step": 58
},
{
"epoch": 0.028672093305795166,
"grad_norm": 0.818300724029541,
"learning_rate": 8e-05,
"loss": 1.6373,
"step": 59
},
{
"epoch": 0.029158060988944235,
"grad_norm": 0.7953868508338928,
"learning_rate": 8e-05,
"loss": 1.8081,
"step": 60
},
{
"epoch": 0.029644028672093307,
"grad_norm": 0.7782135605812073,
"learning_rate": 8e-05,
"loss": 1.744,
"step": 61
},
{
"epoch": 0.030129996355242376,
"grad_norm": 0.7963123321533203,
"learning_rate": 8e-05,
"loss": 1.7847,
"step": 62
},
{
"epoch": 0.030615964038391445,
"grad_norm": 0.7954518795013428,
"learning_rate": 8e-05,
"loss": 1.7755,
"step": 63
},
{
"epoch": 0.031101931721540518,
"grad_norm": 0.8115093111991882,
"learning_rate": 8e-05,
"loss": 1.7906,
"step": 64
},
{
"epoch": 0.03158789940468959,
"grad_norm": 0.7762129902839661,
"learning_rate": 8e-05,
"loss": 1.8315,
"step": 65
},
{
"epoch": 0.03207386708783866,
"grad_norm": 0.8169989585876465,
"learning_rate": 8e-05,
"loss": 1.8314,
"step": 66
},
{
"epoch": 0.03255983477098773,
"grad_norm": 0.7835465669631958,
"learning_rate": 8e-05,
"loss": 1.7605,
"step": 67
},
{
"epoch": 0.0330458024541368,
"grad_norm": 0.824303925037384,
"learning_rate": 8e-05,
"loss": 1.9146,
"step": 68
},
{
"epoch": 0.03353177013728587,
"grad_norm": 0.771300196647644,
"learning_rate": 8e-05,
"loss": 1.7004,
"step": 69
},
{
"epoch": 0.03401773782043494,
"grad_norm": 0.7965942621231079,
"learning_rate": 8e-05,
"loss": 1.831,
"step": 70
},
{
"epoch": 0.03450370550358401,
"grad_norm": 0.7493419647216797,
"learning_rate": 8e-05,
"loss": 1.7244,
"step": 71
},
{
"epoch": 0.03498967318673308,
"grad_norm": 0.786291778087616,
"learning_rate": 8e-05,
"loss": 1.7396,
"step": 72
},
{
"epoch": 0.035475640869882155,
"grad_norm": 0.7498940825462341,
"learning_rate": 8e-05,
"loss": 1.63,
"step": 73
},
{
"epoch": 0.035961608553031224,
"grad_norm": 0.7617571949958801,
"learning_rate": 8e-05,
"loss": 1.7174,
"step": 74
},
{
"epoch": 0.03644757623618029,
"grad_norm": 0.7914626002311707,
"learning_rate": 8e-05,
"loss": 1.7682,
"step": 75
},
{
"epoch": 0.03693354391932936,
"grad_norm": 0.7942573428153992,
"learning_rate": 8e-05,
"loss": 1.7419,
"step": 76
},
{
"epoch": 0.03741951160247844,
"grad_norm": 0.7614278197288513,
"learning_rate": 8e-05,
"loss": 1.792,
"step": 77
},
{
"epoch": 0.03790547928562751,
"grad_norm": 0.7638015151023865,
"learning_rate": 8e-05,
"loss": 1.7609,
"step": 78
},
{
"epoch": 0.038391446968776576,
"grad_norm": 0.7628077864646912,
"learning_rate": 8e-05,
"loss": 1.8112,
"step": 79
},
{
"epoch": 0.038877414651925644,
"grad_norm": 0.775968074798584,
"learning_rate": 8e-05,
"loss": 1.8125,
"step": 80
},
{
"epoch": 0.03936338233507472,
"grad_norm": 0.7894865870475769,
"learning_rate": 8e-05,
"loss": 1.7243,
"step": 81
},
{
"epoch": 0.03984935001822379,
"grad_norm": 0.7538908123970032,
"learning_rate": 8e-05,
"loss": 1.7582,
"step": 82
},
{
"epoch": 0.04033531770137286,
"grad_norm": 0.7814928293228149,
"learning_rate": 8e-05,
"loss": 1.7734,
"step": 83
},
{
"epoch": 0.04082128538452193,
"grad_norm": 0.7374563217163086,
"learning_rate": 8e-05,
"loss": 1.6297,
"step": 84
},
{
"epoch": 0.041307253067671,
"grad_norm": 0.7953599095344543,
"learning_rate": 8e-05,
"loss": 1.8084,
"step": 85
},
{
"epoch": 0.04179322075082007,
"grad_norm": 0.7836169004440308,
"learning_rate": 8e-05,
"loss": 1.7694,
"step": 86
},
{
"epoch": 0.04227918843396914,
"grad_norm": 0.78363436460495,
"learning_rate": 8e-05,
"loss": 1.7396,
"step": 87
},
{
"epoch": 0.04276515611711821,
"grad_norm": 0.7557580471038818,
"learning_rate": 8e-05,
"loss": 1.6918,
"step": 88
},
{
"epoch": 0.043251123800267285,
"grad_norm": 0.8616353869438171,
"learning_rate": 8e-05,
"loss": 1.7818,
"step": 89
},
{
"epoch": 0.043737091483416354,
"grad_norm": 0.8253611326217651,
"learning_rate": 8e-05,
"loss": 1.7881,
"step": 90
},
{
"epoch": 0.04422305916656542,
"grad_norm": 0.8060818910598755,
"learning_rate": 8e-05,
"loss": 1.6931,
"step": 91
},
{
"epoch": 0.04470902684971449,
"grad_norm": 0.7820587754249573,
"learning_rate": 8e-05,
"loss": 1.8662,
"step": 92
},
{
"epoch": 0.04519499453286357,
"grad_norm": 0.7535014748573303,
"learning_rate": 8e-05,
"loss": 1.6988,
"step": 93
},
{
"epoch": 0.04568096221601264,
"grad_norm": 0.7786268591880798,
"learning_rate": 8e-05,
"loss": 1.8558,
"step": 94
},
{
"epoch": 0.046166929899161706,
"grad_norm": 0.7799100279808044,
"learning_rate": 8e-05,
"loss": 1.7975,
"step": 95
},
{
"epoch": 0.046652897582310775,
"grad_norm": 0.7639046907424927,
"learning_rate": 8e-05,
"loss": 1.8287,
"step": 96
},
{
"epoch": 0.047138865265459844,
"grad_norm": 0.7496750354766846,
"learning_rate": 8e-05,
"loss": 1.7675,
"step": 97
},
{
"epoch": 0.04762483294860892,
"grad_norm": 0.7450771331787109,
"learning_rate": 8e-05,
"loss": 1.7679,
"step": 98
},
{
"epoch": 0.04811080063175799,
"grad_norm": 0.7535659670829773,
"learning_rate": 8e-05,
"loss": 1.7457,
"step": 99
},
{
"epoch": 0.04859676831490706,
"grad_norm": 0.7719403505325317,
"learning_rate": 8e-05,
"loss": 1.6904,
"step": 100
},
{
"epoch": 0.049082735998056126,
"grad_norm": 0.7801598906517029,
"learning_rate": 8e-05,
"loss": 1.8374,
"step": 101
},
{
"epoch": 0.0495687036812052,
"grad_norm": 0.8033794164657593,
"learning_rate": 8e-05,
"loss": 1.832,
"step": 102
},
{
"epoch": 0.05005467136435427,
"grad_norm": 0.7571967840194702,
"learning_rate": 8e-05,
"loss": 1.7705,
"step": 103
},
{
"epoch": 0.05054063904750334,
"grad_norm": 0.8226978182792664,
"learning_rate": 8e-05,
"loss": 1.8415,
"step": 104
},
{
"epoch": 0.05102660673065241,
"grad_norm": 0.7520501017570496,
"learning_rate": 8e-05,
"loss": 1.7823,
"step": 105
},
{
"epoch": 0.051512574413801485,
"grad_norm": 0.781470537185669,
"learning_rate": 8e-05,
"loss": 1.719,
"step": 106
},
{
"epoch": 0.051998542096950554,
"grad_norm": 0.7554371356964111,
"learning_rate": 8e-05,
"loss": 1.7176,
"step": 107
},
{
"epoch": 0.05248450978009962,
"grad_norm": 0.7618365287780762,
"learning_rate": 8e-05,
"loss": 1.8165,
"step": 108
},
{
"epoch": 0.05297047746324869,
"grad_norm": 0.7600571513175964,
"learning_rate": 8e-05,
"loss": 1.7371,
"step": 109
},
{
"epoch": 0.05345644514639777,
"grad_norm": 0.7518073916435242,
"learning_rate": 8e-05,
"loss": 1.744,
"step": 110
},
{
"epoch": 0.053942412829546836,
"grad_norm": 0.7937865853309631,
"learning_rate": 8e-05,
"loss": 1.8204,
"step": 111
},
{
"epoch": 0.054428380512695905,
"grad_norm": 0.7794619798660278,
"learning_rate": 8e-05,
"loss": 1.8048,
"step": 112
},
{
"epoch": 0.054914348195844974,
"grad_norm": 0.7729494571685791,
"learning_rate": 8e-05,
"loss": 1.7184,
"step": 113
},
{
"epoch": 0.05540031587899405,
"grad_norm": 0.764567494392395,
"learning_rate": 8e-05,
"loss": 1.746,
"step": 114
},
{
"epoch": 0.05588628356214312,
"grad_norm": 0.807798445224762,
"learning_rate": 8e-05,
"loss": 1.8141,
"step": 115
},
{
"epoch": 0.05637225124529219,
"grad_norm": 0.772461473941803,
"learning_rate": 8e-05,
"loss": 1.6878,
"step": 116
},
{
"epoch": 0.056858218928441256,
"grad_norm": 0.7937839031219482,
"learning_rate": 8e-05,
"loss": 1.8105,
"step": 117
},
{
"epoch": 0.05734418661159033,
"grad_norm": 0.736148476600647,
"learning_rate": 8e-05,
"loss": 1.6692,
"step": 118
},
{
"epoch": 0.0578301542947394,
"grad_norm": 0.8022646903991699,
"learning_rate": 8e-05,
"loss": 1.786,
"step": 119
},
{
"epoch": 0.05831612197788847,
"grad_norm": 0.7743873000144958,
"learning_rate": 8e-05,
"loss": 1.804,
"step": 120
},
{
"epoch": 0.05880208966103754,
"grad_norm": 0.79759281873703,
"learning_rate": 8e-05,
"loss": 1.7613,
"step": 121
},
{
"epoch": 0.059288057344186615,
"grad_norm": 0.7736479640007019,
"learning_rate": 8e-05,
"loss": 1.7923,
"step": 122
},
{
"epoch": 0.059774025027335684,
"grad_norm": 0.7620419263839722,
"learning_rate": 8e-05,
"loss": 1.7459,
"step": 123
},
{
"epoch": 0.06025999271048475,
"grad_norm": 0.7272644639015198,
"learning_rate": 8e-05,
"loss": 1.7041,
"step": 124
},
{
"epoch": 0.06074596039363382,
"grad_norm": 0.743613600730896,
"learning_rate": 8e-05,
"loss": 1.7518,
"step": 125
},
{
"epoch": 0.06123192807678289,
"grad_norm": 0.7796114087104797,
"learning_rate": 8e-05,
"loss": 1.7195,
"step": 126
},
{
"epoch": 0.061717895759931966,
"grad_norm": 0.747711718082428,
"learning_rate": 8e-05,
"loss": 1.6905,
"step": 127
},
{
"epoch": 0.062203863443081035,
"grad_norm": 0.7622174024581909,
"learning_rate": 8e-05,
"loss": 1.732,
"step": 128
},
{
"epoch": 0.06268983112623011,
"grad_norm": 0.7554340362548828,
"learning_rate": 8e-05,
"loss": 1.6803,
"step": 129
},
{
"epoch": 0.06317579880937918,
"grad_norm": 0.7275816202163696,
"learning_rate": 8e-05,
"loss": 1.6863,
"step": 130
},
{
"epoch": 0.06366176649252825,
"grad_norm": 0.7357078790664673,
"learning_rate": 8e-05,
"loss": 1.8095,
"step": 131
},
{
"epoch": 0.06414773417567732,
"grad_norm": 0.7402067184448242,
"learning_rate": 8e-05,
"loss": 1.7661,
"step": 132
},
{
"epoch": 0.06463370185882639,
"grad_norm": 0.7280262112617493,
"learning_rate": 8e-05,
"loss": 1.7219,
"step": 133
},
{
"epoch": 0.06511966954197546,
"grad_norm": 0.7843252420425415,
"learning_rate": 8e-05,
"loss": 1.9391,
"step": 134
},
{
"epoch": 0.06560563722512452,
"grad_norm": 0.7665972709655762,
"learning_rate": 8e-05,
"loss": 1.8084,
"step": 135
},
{
"epoch": 0.0660916049082736,
"grad_norm": 0.7905083298683167,
"learning_rate": 8e-05,
"loss": 1.8191,
"step": 136
},
{
"epoch": 0.06657757259142268,
"grad_norm": 0.8167449235916138,
"learning_rate": 8e-05,
"loss": 1.8108,
"step": 137
},
{
"epoch": 0.06706354027457175,
"grad_norm": 0.7527084946632385,
"learning_rate": 8e-05,
"loss": 1.8011,
"step": 138
},
{
"epoch": 0.06754950795772081,
"grad_norm": 0.7817428708076477,
"learning_rate": 8e-05,
"loss": 1.6976,
"step": 139
},
{
"epoch": 0.06803547564086988,
"grad_norm": 0.7485918402671814,
"learning_rate": 8e-05,
"loss": 1.7659,
"step": 140
},
{
"epoch": 0.06852144332401895,
"grad_norm": 0.7752904295921326,
"learning_rate": 8e-05,
"loss": 1.7426,
"step": 141
},
{
"epoch": 0.06900741100716802,
"grad_norm": 0.7750376462936401,
"learning_rate": 8e-05,
"loss": 1.8314,
"step": 142
},
{
"epoch": 0.06949337869031709,
"grad_norm": 0.8052763938903809,
"learning_rate": 8e-05,
"loss": 1.7471,
"step": 143
},
{
"epoch": 0.06997934637346616,
"grad_norm": 0.7564165592193604,
"learning_rate": 8e-05,
"loss": 1.8404,
"step": 144
},
{
"epoch": 0.07046531405661524,
"grad_norm": 0.8227714896202087,
"learning_rate": 8e-05,
"loss": 1.7559,
"step": 145
},
{
"epoch": 0.07095128173976431,
"grad_norm": 0.746425986289978,
"learning_rate": 8e-05,
"loss": 1.7172,
"step": 146
},
{
"epoch": 0.07143724942291338,
"grad_norm": 0.8286752700805664,
"learning_rate": 8e-05,
"loss": 1.7673,
"step": 147
},
{
"epoch": 0.07192321710606245,
"grad_norm": 0.7334216237068176,
"learning_rate": 8e-05,
"loss": 1.7783,
"step": 148
},
{
"epoch": 0.07240918478921152,
"grad_norm": 0.8212454319000244,
"learning_rate": 8e-05,
"loss": 1.7831,
"step": 149
},
{
"epoch": 0.07289515247236059,
"grad_norm": 0.776168704032898,
"learning_rate": 8e-05,
"loss": 1.6956,
"step": 150
},
{
"epoch": 0.07338112015550965,
"grad_norm": 0.7995423078536987,
"learning_rate": 8e-05,
"loss": 1.7718,
"step": 151
},
{
"epoch": 0.07386708783865872,
"grad_norm": 0.8106943964958191,
"learning_rate": 8e-05,
"loss": 1.8241,
"step": 152
},
{
"epoch": 0.0743530555218078,
"grad_norm": 0.7627594470977783,
"learning_rate": 8e-05,
"loss": 1.7804,
"step": 153
},
{
"epoch": 0.07483902320495688,
"grad_norm": 0.7455682754516602,
"learning_rate": 8e-05,
"loss": 1.7426,
"step": 154
},
{
"epoch": 0.07532499088810594,
"grad_norm": 0.7307602763175964,
"learning_rate": 8e-05,
"loss": 1.7283,
"step": 155
},
{
"epoch": 0.07581095857125501,
"grad_norm": 0.784433126449585,
"learning_rate": 8e-05,
"loss": 1.9063,
"step": 156
},
{
"epoch": 0.07629692625440408,
"grad_norm": 0.7390819191932678,
"learning_rate": 8e-05,
"loss": 1.7333,
"step": 157
},
{
"epoch": 0.07678289393755315,
"grad_norm": 0.7457150816917419,
"learning_rate": 8e-05,
"loss": 1.7533,
"step": 158
},
{
"epoch": 0.07726886162070222,
"grad_norm": 0.7638875842094421,
"learning_rate": 8e-05,
"loss": 1.8004,
"step": 159
},
{
"epoch": 0.07775482930385129,
"grad_norm": 0.7380759716033936,
"learning_rate": 8e-05,
"loss": 1.6145,
"step": 160
},
{
"epoch": 0.07824079698700036,
"grad_norm": 0.7439815998077393,
"learning_rate": 8e-05,
"loss": 1.7438,
"step": 161
},
{
"epoch": 0.07872676467014944,
"grad_norm": 0.7513895034790039,
"learning_rate": 8e-05,
"loss": 1.7399,
"step": 162
},
{
"epoch": 0.07921273235329851,
"grad_norm": 0.7387180328369141,
"learning_rate": 8e-05,
"loss": 1.7396,
"step": 163
},
{
"epoch": 0.07969870003644758,
"grad_norm": 0.7378660440444946,
"learning_rate": 8e-05,
"loss": 1.7938,
"step": 164
},
{
"epoch": 0.08018466771959665,
"grad_norm": 0.7654342651367188,
"learning_rate": 8e-05,
"loss": 1.8052,
"step": 165
},
{
"epoch": 0.08067063540274572,
"grad_norm": 0.7595049142837524,
"learning_rate": 8e-05,
"loss": 1.7566,
"step": 166
},
{
"epoch": 0.08115660308589479,
"grad_norm": 0.7756885886192322,
"learning_rate": 8e-05,
"loss": 1.8195,
"step": 167
},
{
"epoch": 0.08164257076904385,
"grad_norm": 0.720808744430542,
"learning_rate": 8e-05,
"loss": 1.8151,
"step": 168
},
{
"epoch": 0.08212853845219292,
"grad_norm": 0.742946982383728,
"learning_rate": 8e-05,
"loss": 1.7991,
"step": 169
},
{
"epoch": 0.082614506135342,
"grad_norm": 0.7688474655151367,
"learning_rate": 8e-05,
"loss": 1.8719,
"step": 170
},
{
"epoch": 0.08310047381849107,
"grad_norm": 0.7337490916252136,
"learning_rate": 8e-05,
"loss": 1.7147,
"step": 171
},
{
"epoch": 0.08358644150164014,
"grad_norm": 0.7888994812965393,
"learning_rate": 8e-05,
"loss": 1.8068,
"step": 172
},
{
"epoch": 0.08407240918478921,
"grad_norm": 0.8796278238296509,
"learning_rate": 8e-05,
"loss": 1.781,
"step": 173
},
{
"epoch": 0.08455837686793828,
"grad_norm": 0.7856938242912292,
"learning_rate": 8e-05,
"loss": 1.7965,
"step": 174
},
{
"epoch": 0.08504434455108735,
"grad_norm": 0.7243691682815552,
"learning_rate": 8e-05,
"loss": 1.7173,
"step": 175
},
{
"epoch": 0.08553031223423642,
"grad_norm": 0.7716972827911377,
"learning_rate": 8e-05,
"loss": 1.7147,
"step": 176
},
{
"epoch": 0.08601627991738549,
"grad_norm": 0.7403793931007385,
"learning_rate": 8e-05,
"loss": 1.8092,
"step": 177
},
{
"epoch": 0.08650224760053457,
"grad_norm": 0.7590733170509338,
"learning_rate": 8e-05,
"loss": 1.6741,
"step": 178
},
{
"epoch": 0.08698821528368364,
"grad_norm": 0.7034937739372253,
"learning_rate": 8e-05,
"loss": 1.6921,
"step": 179
},
{
"epoch": 0.08747418296683271,
"grad_norm": 0.7486933469772339,
"learning_rate": 8e-05,
"loss": 1.7373,
"step": 180
},
{
"epoch": 0.08796015064998178,
"grad_norm": 0.7233313918113708,
"learning_rate": 8e-05,
"loss": 1.6989,
"step": 181
},
{
"epoch": 0.08844611833313085,
"grad_norm": 0.7209268808364868,
"learning_rate": 8e-05,
"loss": 1.6732,
"step": 182
},
{
"epoch": 0.08893208601627992,
"grad_norm": 0.7451608180999756,
"learning_rate": 8e-05,
"loss": 1.9187,
"step": 183
},
{
"epoch": 0.08941805369942898,
"grad_norm": 0.7491340637207031,
"learning_rate": 8e-05,
"loss": 1.7786,
"step": 184
},
{
"epoch": 0.08990402138257805,
"grad_norm": 0.7303433418273926,
"learning_rate": 8e-05,
"loss": 1.6977,
"step": 185
},
{
"epoch": 0.09038998906572714,
"grad_norm": 0.7609615325927734,
"learning_rate": 8e-05,
"loss": 1.8273,
"step": 186
},
{
"epoch": 0.0908759567488762,
"grad_norm": 0.732149064540863,
"learning_rate": 8e-05,
"loss": 1.7655,
"step": 187
},
{
"epoch": 0.09136192443202527,
"grad_norm": 0.7594504356384277,
"learning_rate": 8e-05,
"loss": 1.8122,
"step": 188
},
{
"epoch": 0.09184789211517434,
"grad_norm": 0.7743573188781738,
"learning_rate": 8e-05,
"loss": 1.7085,
"step": 189
},
{
"epoch": 0.09233385979832341,
"grad_norm": 0.758564829826355,
"learning_rate": 8e-05,
"loss": 1.7353,
"step": 190
},
{
"epoch": 0.09281982748147248,
"grad_norm": 0.7762187719345093,
"learning_rate": 8e-05,
"loss": 1.7942,
"step": 191
},
{
"epoch": 0.09330579516462155,
"grad_norm": 0.7215401530265808,
"learning_rate": 8e-05,
"loss": 1.6615,
"step": 192
},
{
"epoch": 0.09379176284777062,
"grad_norm": 0.7193935513496399,
"learning_rate": 8e-05,
"loss": 1.7636,
"step": 193
},
{
"epoch": 0.09427773053091969,
"grad_norm": 0.7437982559204102,
"learning_rate": 8e-05,
"loss": 1.7284,
"step": 194
},
{
"epoch": 0.09476369821406877,
"grad_norm": 0.718082845211029,
"learning_rate": 8e-05,
"loss": 1.6973,
"step": 195
},
{
"epoch": 0.09524966589721784,
"grad_norm": 0.789637565612793,
"learning_rate": 8e-05,
"loss": 1.9072,
"step": 196
},
{
"epoch": 0.09573563358036691,
"grad_norm": 0.7061552405357361,
"learning_rate": 8e-05,
"loss": 1.6949,
"step": 197
},
{
"epoch": 0.09622160126351598,
"grad_norm": 0.7664032578468323,
"learning_rate": 8e-05,
"loss": 1.6626,
"step": 198
},
{
"epoch": 0.09670756894666505,
"grad_norm": 0.719813346862793,
"learning_rate": 8e-05,
"loss": 1.7327,
"step": 199
},
{
"epoch": 0.09719353662981411,
"grad_norm": 0.7477394342422485,
"learning_rate": 8e-05,
"loss": 1.7191,
"step": 200
},
{
"epoch": 0.09767950431296318,
"grad_norm": 0.7387209534645081,
"learning_rate": 8e-05,
"loss": 1.7389,
"step": 201
},
{
"epoch": 0.09816547199611225,
"grad_norm": 0.706491231918335,
"learning_rate": 8e-05,
"loss": 1.7395,
"step": 202
},
{
"epoch": 0.09865143967926134,
"grad_norm": 0.739989161491394,
"learning_rate": 8e-05,
"loss": 1.7539,
"step": 203
},
{
"epoch": 0.0991374073624104,
"grad_norm": 0.7250370979309082,
"learning_rate": 8e-05,
"loss": 1.7691,
"step": 204
},
{
"epoch": 0.09962337504555947,
"grad_norm": 0.7379734516143799,
"learning_rate": 8e-05,
"loss": 1.7048,
"step": 205
},
{
"epoch": 0.10010934272870854,
"grad_norm": 0.7315964698791504,
"learning_rate": 8e-05,
"loss": 1.734,
"step": 206
},
{
"epoch": 0.10059531041185761,
"grad_norm": 0.7972819209098816,
"learning_rate": 8e-05,
"loss": 1.6095,
"step": 207
},
{
"epoch": 0.10108127809500668,
"grad_norm": 0.721659779548645,
"learning_rate": 8e-05,
"loss": 1.7488,
"step": 208
},
{
"epoch": 0.10156724577815575,
"grad_norm": 0.72065269947052,
"learning_rate": 8e-05,
"loss": 1.7815,
"step": 209
},
{
"epoch": 0.10205321346130482,
"grad_norm": 0.7838714718818665,
"learning_rate": 8e-05,
"loss": 1.8119,
"step": 210
},
{
"epoch": 0.1025391811444539,
"grad_norm": 0.7384838461875916,
"learning_rate": 8e-05,
"loss": 1.7302,
"step": 211
},
{
"epoch": 0.10302514882760297,
"grad_norm": 0.776187539100647,
"learning_rate": 8e-05,
"loss": 1.8111,
"step": 212
},
{
"epoch": 0.10351111651075204,
"grad_norm": 0.7436544895172119,
"learning_rate": 8e-05,
"loss": 1.7778,
"step": 213
},
{
"epoch": 0.10399708419390111,
"grad_norm": 0.7787929177284241,
"learning_rate": 8e-05,
"loss": 1.7587,
"step": 214
},
{
"epoch": 0.10448305187705018,
"grad_norm": 0.7442957758903503,
"learning_rate": 8e-05,
"loss": 1.8277,
"step": 215
},
{
"epoch": 0.10496901956019924,
"grad_norm": 0.7331508994102478,
"learning_rate": 8e-05,
"loss": 1.7482,
"step": 216
},
{
"epoch": 0.10545498724334831,
"grad_norm": 0.7571154832839966,
"learning_rate": 8e-05,
"loss": 1.7545,
"step": 217
},
{
"epoch": 0.10594095492649738,
"grad_norm": 0.750484049320221,
"learning_rate": 8e-05,
"loss": 1.7816,
"step": 218
},
{
"epoch": 0.10642692260964647,
"grad_norm": 0.7651139497756958,
"learning_rate": 8e-05,
"loss": 1.6852,
"step": 219
},
{
"epoch": 0.10691289029279553,
"grad_norm": 0.7269699573516846,
"learning_rate": 8e-05,
"loss": 1.6331,
"step": 220
},
{
"epoch": 0.1073988579759446,
"grad_norm": 0.759168267250061,
"learning_rate": 8e-05,
"loss": 1.7688,
"step": 221
},
{
"epoch": 0.10788482565909367,
"grad_norm": 0.6969944834709167,
"learning_rate": 8e-05,
"loss": 1.7579,
"step": 222
},
{
"epoch": 0.10837079334224274,
"grad_norm": 0.7620792984962463,
"learning_rate": 8e-05,
"loss": 1.76,
"step": 223
},
{
"epoch": 0.10885676102539181,
"grad_norm": 0.708992600440979,
"learning_rate": 8e-05,
"loss": 1.719,
"step": 224
},
{
"epoch": 0.10934272870854088,
"grad_norm": 0.7534224390983582,
"learning_rate": 8e-05,
"loss": 1.699,
"step": 225
},
{
"epoch": 0.10982869639168995,
"grad_norm": 0.7195073366165161,
"learning_rate": 8e-05,
"loss": 1.674,
"step": 226
},
{
"epoch": 0.11031466407483902,
"grad_norm": 0.7106963396072388,
"learning_rate": 8e-05,
"loss": 1.7947,
"step": 227
},
{
"epoch": 0.1108006317579881,
"grad_norm": 0.7048214077949524,
"learning_rate": 8e-05,
"loss": 1.719,
"step": 228
},
{
"epoch": 0.11128659944113717,
"grad_norm": 0.7359198927879333,
"learning_rate": 8e-05,
"loss": 1.8299,
"step": 229
},
{
"epoch": 0.11177256712428624,
"grad_norm": 0.7068722248077393,
"learning_rate": 8e-05,
"loss": 1.8336,
"step": 230
},
{
"epoch": 0.1122585348074353,
"grad_norm": 0.6944065093994141,
"learning_rate": 8e-05,
"loss": 1.7053,
"step": 231
},
{
"epoch": 0.11274450249058438,
"grad_norm": 0.7156597375869751,
"learning_rate": 8e-05,
"loss": 1.7556,
"step": 232
},
{
"epoch": 0.11323047017373344,
"grad_norm": 0.7233269810676575,
"learning_rate": 8e-05,
"loss": 1.7028,
"step": 233
},
{
"epoch": 0.11371643785688251,
"grad_norm": 2.004643678665161,
"learning_rate": 8e-05,
"loss": 1.7929,
"step": 234
},
{
"epoch": 0.11420240554003158,
"grad_norm": 0.7343223690986633,
"learning_rate": 8e-05,
"loss": 1.7148,
"step": 235
},
{
"epoch": 0.11468837322318066,
"grad_norm": 0.7092347145080566,
"learning_rate": 8e-05,
"loss": 1.7046,
"step": 236
},
{
"epoch": 0.11517434090632973,
"grad_norm": 0.7329908609390259,
"learning_rate": 8e-05,
"loss": 1.7856,
"step": 237
},
{
"epoch": 0.1156603085894788,
"grad_norm": 0.7091159820556641,
"learning_rate": 8e-05,
"loss": 1.6764,
"step": 238
},
{
"epoch": 0.11614627627262787,
"grad_norm": 0.6997739672660828,
"learning_rate": 8e-05,
"loss": 1.662,
"step": 239
},
{
"epoch": 0.11663224395577694,
"grad_norm": 0.7721783518791199,
"learning_rate": 8e-05,
"loss": 1.7163,
"step": 240
},
{
"epoch": 0.11711821163892601,
"grad_norm": 0.7204678058624268,
"learning_rate": 8e-05,
"loss": 1.6845,
"step": 241
},
{
"epoch": 0.11760417932207508,
"grad_norm": 0.7557322382926941,
"learning_rate": 8e-05,
"loss": 1.8165,
"step": 242
},
{
"epoch": 0.11809014700522415,
"grad_norm": 0.7318572998046875,
"learning_rate": 8e-05,
"loss": 1.834,
"step": 243
},
{
"epoch": 0.11857611468837323,
"grad_norm": 0.7204866409301758,
"learning_rate": 8e-05,
"loss": 1.6916,
"step": 244
},
{
"epoch": 0.1190620823715223,
"grad_norm": 0.7328425645828247,
"learning_rate": 8e-05,
"loss": 1.7068,
"step": 245
},
{
"epoch": 0.11954805005467137,
"grad_norm": 0.733796238899231,
"learning_rate": 8e-05,
"loss": 1.7716,
"step": 246
},
{
"epoch": 0.12003401773782044,
"grad_norm": 0.7169679999351501,
"learning_rate": 8e-05,
"loss": 1.6219,
"step": 247
},
{
"epoch": 0.1205199854209695,
"grad_norm": 0.7298434972763062,
"learning_rate": 8e-05,
"loss": 1.7573,
"step": 248
},
{
"epoch": 0.12100595310411857,
"grad_norm": 0.709538459777832,
"learning_rate": 8e-05,
"loss": 1.6737,
"step": 249
},
{
"epoch": 0.12149192078726764,
"grad_norm": 0.7161252498626709,
"learning_rate": 8e-05,
"loss": 1.7395,
"step": 250
},
{
"epoch": 0.12197788847041671,
"grad_norm": 0.7367464303970337,
"learning_rate": 8e-05,
"loss": 1.7921,
"step": 251
},
{
"epoch": 0.12246385615356578,
"grad_norm": 0.7525370121002197,
"learning_rate": 8e-05,
"loss": 1.8733,
"step": 252
},
{
"epoch": 0.12294982383671486,
"grad_norm": 107.62161254882812,
"learning_rate": 8e-05,
"loss": 2.3949,
"step": 253
},
{
"epoch": 0.12343579151986393,
"grad_norm": 0.7206385135650635,
"learning_rate": 8e-05,
"loss": 1.7251,
"step": 254
},
{
"epoch": 0.123921759203013,
"grad_norm": 0.7572370767593384,
"learning_rate": 8e-05,
"loss": 1.7523,
"step": 255
},
{
"epoch": 0.12440772688616207,
"grad_norm": 0.7151662111282349,
"learning_rate": 8e-05,
"loss": 1.6784,
"step": 256
},
{
"epoch": 0.12489369456931114,
"grad_norm": 0.7240515947341919,
"learning_rate": 8e-05,
"loss": 1.726,
"step": 257
},
{
"epoch": 0.12537966225246022,
"grad_norm": 0.7299691438674927,
"learning_rate": 8e-05,
"loss": 1.6659,
"step": 258
},
{
"epoch": 0.1258656299356093,
"grad_norm": 0.7652246356010437,
"learning_rate": 8e-05,
"loss": 1.7918,
"step": 259
},
{
"epoch": 0.12635159761875836,
"grad_norm": 1.1573753356933594,
"learning_rate": 8e-05,
"loss": 1.7931,
"step": 260
},
{
"epoch": 0.12683756530190743,
"grad_norm": 0.7974875569343567,
"learning_rate": 8e-05,
"loss": 1.7741,
"step": 261
},
{
"epoch": 0.1273235329850565,
"grad_norm": 0.7178170084953308,
"learning_rate": 8e-05,
"loss": 1.8449,
"step": 262
},
{
"epoch": 0.12780950066820557,
"grad_norm": 0.743977427482605,
"learning_rate": 8e-05,
"loss": 1.6866,
"step": 263
},
{
"epoch": 0.12829546835135464,
"grad_norm": 0.7151938080787659,
"learning_rate": 8e-05,
"loss": 1.7498,
"step": 264
},
{
"epoch": 0.1287814360345037,
"grad_norm": 0.7639552354812622,
"learning_rate": 8e-05,
"loss": 1.7577,
"step": 265
},
{
"epoch": 0.12926740371765277,
"grad_norm": 1.4950189590454102,
"learning_rate": 8e-05,
"loss": 1.7232,
"step": 266
},
{
"epoch": 0.12975337140080184,
"grad_norm": 0.7332847118377686,
"learning_rate": 8e-05,
"loss": 1.7094,
"step": 267
},
{
"epoch": 0.1302393390839509,
"grad_norm": 1.2607755661010742,
"learning_rate": 8e-05,
"loss": 1.8076,
"step": 268
},
{
"epoch": 0.13072530676709998,
"grad_norm": 0.8469274044036865,
"learning_rate": 8e-05,
"loss": 1.8711,
"step": 269
},
{
"epoch": 0.13121127445024905,
"grad_norm": 23.890470504760742,
"learning_rate": 8e-05,
"loss": 1.8796,
"step": 270
},
{
"epoch": 0.13169724213339812,
"grad_norm": 0.890592098236084,
"learning_rate": 8e-05,
"loss": 1.7399,
"step": 271
},
{
"epoch": 0.1321832098165472,
"grad_norm": 0.7210285067558289,
"learning_rate": 8e-05,
"loss": 1.76,
"step": 272
},
{
"epoch": 0.13266917749969628,
"grad_norm": 4.838703632354736,
"learning_rate": 8e-05,
"loss": 1.8066,
"step": 273
},
{
"epoch": 0.13315514518284535,
"grad_norm": 0.9866505265235901,
"learning_rate": 8e-05,
"loss": 1.7026,
"step": 274
},
{
"epoch": 0.13364111286599442,
"grad_norm": 0.7707441449165344,
"learning_rate": 8e-05,
"loss": 1.7258,
"step": 275
},
{
"epoch": 0.1341270805491435,
"grad_norm": 0.8940787315368652,
"learning_rate": 8e-05,
"loss": 1.6558,
"step": 276
},
{
"epoch": 0.13461304823229256,
"grad_norm": 0.8925697803497314,
"learning_rate": 8e-05,
"loss": 1.7646,
"step": 277
},
{
"epoch": 0.13509901591544163,
"grad_norm": 0.8577701449394226,
"learning_rate": 8e-05,
"loss": 1.7418,
"step": 278
},
{
"epoch": 0.1355849835985907,
"grad_norm": 3.1491594314575195,
"learning_rate": 8e-05,
"loss": 1.7044,
"step": 279
},
{
"epoch": 0.13607095128173977,
"grad_norm": 0.8906698822975159,
"learning_rate": 8e-05,
"loss": 1.6898,
"step": 280
},
{
"epoch": 0.13655691896488883,
"grad_norm": 0.9891100525856018,
"learning_rate": 8e-05,
"loss": 1.81,
"step": 281
},
{
"epoch": 0.1370428866480379,
"grad_norm": 0.7962388396263123,
"learning_rate": 8e-05,
"loss": 1.6333,
"step": 282
},
{
"epoch": 0.13752885433118697,
"grad_norm": 0.8063424229621887,
"learning_rate": 8e-05,
"loss": 1.8258,
"step": 283
},
{
"epoch": 0.13801482201433604,
"grad_norm": 0.7605735063552856,
"learning_rate": 8e-05,
"loss": 1.6676,
"step": 284
},
{
"epoch": 0.1385007896974851,
"grad_norm": 0.9942009449005127,
"learning_rate": 8e-05,
"loss": 1.7106,
"step": 285
},
{
"epoch": 0.13898675738063418,
"grad_norm": 0.8328946232795715,
"learning_rate": 8e-05,
"loss": 1.7818,
"step": 286
},
{
"epoch": 0.13947272506378325,
"grad_norm": 0.7635605931282043,
"learning_rate": 8e-05,
"loss": 1.814,
"step": 287
},
{
"epoch": 0.13995869274693232,
"grad_norm": 1.255517840385437,
"learning_rate": 8e-05,
"loss": 1.8244,
"step": 288
},
{
"epoch": 0.14044466043008139,
"grad_norm": 0.7666721940040588,
"learning_rate": 8e-05,
"loss": 1.7657,
"step": 289
},
{
"epoch": 0.14093062811323048,
"grad_norm": 0.7664022445678711,
"learning_rate": 8e-05,
"loss": 1.8283,
"step": 290
},
{
"epoch": 0.14141659579637955,
"grad_norm": 0.7420341968536377,
"learning_rate": 8e-05,
"loss": 1.6567,
"step": 291
},
{
"epoch": 0.14190256347952862,
"grad_norm": 0.7334952354431152,
"learning_rate": 8e-05,
"loss": 1.7725,
"step": 292
},
{
"epoch": 0.1423885311626777,
"grad_norm": 7.276203632354736,
"learning_rate": 8e-05,
"loss": 1.7124,
"step": 293
},
{
"epoch": 0.14287449884582676,
"grad_norm": 0.8101209402084351,
"learning_rate": 8e-05,
"loss": 1.7844,
"step": 294
},
{
"epoch": 0.14336046652897583,
"grad_norm": 0.7334319949150085,
"learning_rate": 8e-05,
"loss": 1.7091,
"step": 295
},
{
"epoch": 0.1438464342121249,
"grad_norm": 0.7652103900909424,
"learning_rate": 8e-05,
"loss": 1.7839,
"step": 296
},
{
"epoch": 0.14433240189527397,
"grad_norm": 0.7974730730056763,
"learning_rate": 8e-05,
"loss": 1.8041,
"step": 297
},
{
"epoch": 0.14481836957842303,
"grad_norm": 0.8040671348571777,
"learning_rate": 8e-05,
"loss": 1.8061,
"step": 298
},
{
"epoch": 0.1453043372615721,
"grad_norm": 0.7175395488739014,
"learning_rate": 8e-05,
"loss": 1.6669,
"step": 299
},
{
"epoch": 0.14579030494472117,
"grad_norm": 0.7443219423294067,
"learning_rate": 8e-05,
"loss": 1.6525,
"step": 300
},
{
"epoch": 0.14627627262787024,
"grad_norm": 0.7299359440803528,
"learning_rate": 8e-05,
"loss": 1.7975,
"step": 301
},
{
"epoch": 0.1467622403110193,
"grad_norm": 0.7135804295539856,
"learning_rate": 8e-05,
"loss": 1.7,
"step": 302
},
{
"epoch": 0.14724820799416838,
"grad_norm": 0.7725481390953064,
"learning_rate": 8e-05,
"loss": 1.6892,
"step": 303
},
{
"epoch": 0.14773417567731745,
"grad_norm": 0.7773021459579468,
"learning_rate": 8e-05,
"loss": 1.6862,
"step": 304
},
{
"epoch": 0.14822014336046652,
"grad_norm": 0.7179283499717712,
"learning_rate": 8e-05,
"loss": 1.7691,
"step": 305
},
{
"epoch": 0.1487061110436156,
"grad_norm": 0.70677250623703,
"learning_rate": 8e-05,
"loss": 1.7237,
"step": 306
},
{
"epoch": 0.14919207872676468,
"grad_norm": 0.7373878955841064,
"learning_rate": 8e-05,
"loss": 1.6954,
"step": 307
},
{
"epoch": 0.14967804640991375,
"grad_norm": 0.7277619242668152,
"learning_rate": 8e-05,
"loss": 1.7182,
"step": 308
},
{
"epoch": 0.15016401409306282,
"grad_norm": 0.7144182324409485,
"learning_rate": 8e-05,
"loss": 1.747,
"step": 309
},
{
"epoch": 0.1506499817762119,
"grad_norm": 0.7347834706306458,
"learning_rate": 8e-05,
"loss": 1.8236,
"step": 310
},
{
"epoch": 0.15113594945936096,
"grad_norm": 0.7458093166351318,
"learning_rate": 8e-05,
"loss": 1.6981,
"step": 311
},
{
"epoch": 0.15162191714251003,
"grad_norm": 0.7428991198539734,
"learning_rate": 8e-05,
"loss": 1.7366,
"step": 312
},
{
"epoch": 0.1521078848256591,
"grad_norm": 0.8002575635910034,
"learning_rate": 8e-05,
"loss": 1.7735,
"step": 313
},
{
"epoch": 0.15259385250880816,
"grad_norm": 0.7258359789848328,
"learning_rate": 8e-05,
"loss": 1.7941,
"step": 314
},
{
"epoch": 0.15307982019195723,
"grad_norm": 0.7355910539627075,
"learning_rate": 8e-05,
"loss": 1.7213,
"step": 315
},
{
"epoch": 0.1535657878751063,
"grad_norm": 0.7196224331855774,
"learning_rate": 8e-05,
"loss": 1.7162,
"step": 316
},
{
"epoch": 0.15405175555825537,
"grad_norm": 0.749587893486023,
"learning_rate": 8e-05,
"loss": 1.7576,
"step": 317
},
{
"epoch": 0.15453772324140444,
"grad_norm": 0.692478597164154,
"learning_rate": 8e-05,
"loss": 1.6699,
"step": 318
},
{
"epoch": 0.1550236909245535,
"grad_norm": 0.6896519064903259,
"learning_rate": 8e-05,
"loss": 1.7547,
"step": 319
},
{
"epoch": 0.15550965860770258,
"grad_norm": 0.7503851652145386,
"learning_rate": 8e-05,
"loss": 1.8223,
"step": 320
},
{
"epoch": 0.15599562629085165,
"grad_norm": 0.70699542760849,
"learning_rate": 8e-05,
"loss": 1.7128,
"step": 321
},
{
"epoch": 0.15648159397400072,
"grad_norm": 0.7186034321784973,
"learning_rate": 8e-05,
"loss": 1.6912,
"step": 322
},
{
"epoch": 0.1569675616571498,
"grad_norm": 0.7471842169761658,
"learning_rate": 8e-05,
"loss": 1.8231,
"step": 323
},
{
"epoch": 0.15745352934029888,
"grad_norm": 0.735959529876709,
"learning_rate": 8e-05,
"loss": 1.7811,
"step": 324
},
{
"epoch": 0.15793949702344795,
"grad_norm": 0.7154443264007568,
"learning_rate": 8e-05,
"loss": 1.7168,
"step": 325
},
{
"epoch": 0.15842546470659702,
"grad_norm": 0.7040227055549622,
"learning_rate": 8e-05,
"loss": 1.6931,
"step": 326
},
{
"epoch": 0.1589114323897461,
"grad_norm": 0.7132288217544556,
"learning_rate": 8e-05,
"loss": 1.7103,
"step": 327
},
{
"epoch": 0.15939740007289516,
"grad_norm": 0.7234110236167908,
"learning_rate": 8e-05,
"loss": 1.8161,
"step": 328
},
{
"epoch": 0.15988336775604423,
"grad_norm": 0.7025232911109924,
"learning_rate": 8e-05,
"loss": 1.7208,
"step": 329
},
{
"epoch": 0.1603693354391933,
"grad_norm": 0.7414253950119019,
"learning_rate": 8e-05,
"loss": 1.7837,
"step": 330
},
{
"epoch": 0.16085530312234236,
"grad_norm": 0.7441409826278687,
"learning_rate": 8e-05,
"loss": 1.8675,
"step": 331
},
{
"epoch": 0.16134127080549143,
"grad_norm": 0.6885921955108643,
"learning_rate": 8e-05,
"loss": 1.7308,
"step": 332
},
{
"epoch": 0.1618272384886405,
"grad_norm": 0.7032135128974915,
"learning_rate": 8e-05,
"loss": 1.7341,
"step": 333
},
{
"epoch": 0.16231320617178957,
"grad_norm": 0.7779133915901184,
"learning_rate": 8e-05,
"loss": 1.7767,
"step": 334
},
{
"epoch": 0.16279917385493864,
"grad_norm": 0.7075231671333313,
"learning_rate": 8e-05,
"loss": 1.7358,
"step": 335
},
{
"epoch": 0.1632851415380877,
"grad_norm": 0.7046375274658203,
"learning_rate": 8e-05,
"loss": 1.7414,
"step": 336
},
{
"epoch": 0.16377110922123678,
"grad_norm": 0.706058144569397,
"learning_rate": 8e-05,
"loss": 1.6162,
"step": 337
},
{
"epoch": 0.16425707690438585,
"grad_norm": 0.7058039903640747,
"learning_rate": 8e-05,
"loss": 1.7132,
"step": 338
},
{
"epoch": 0.16474304458753494,
"grad_norm": 0.7231515645980835,
"learning_rate": 8e-05,
"loss": 1.8653,
"step": 339
},
{
"epoch": 0.165229012270684,
"grad_norm": 0.7352226972579956,
"learning_rate": 8e-05,
"loss": 1.8586,
"step": 340
},
{
"epoch": 0.16571497995383308,
"grad_norm": 0.7225251197814941,
"learning_rate": 8e-05,
"loss": 1.7833,
"step": 341
},
{
"epoch": 0.16620094763698215,
"grad_norm": 0.695045530796051,
"learning_rate": 8e-05,
"loss": 1.7219,
"step": 342
},
{
"epoch": 0.16668691532013122,
"grad_norm": 0.8524265885353088,
"learning_rate": 8e-05,
"loss": 1.6984,
"step": 343
},
{
"epoch": 0.1671728830032803,
"grad_norm": 0.7121042609214783,
"learning_rate": 8e-05,
"loss": 1.6768,
"step": 344
},
{
"epoch": 0.16765885068642936,
"grad_norm": 0.7287912964820862,
"learning_rate": 8e-05,
"loss": 1.8547,
"step": 345
},
{
"epoch": 0.16814481836957842,
"grad_norm": 0.7140096426010132,
"learning_rate": 8e-05,
"loss": 1.714,
"step": 346
},
{
"epoch": 0.1686307860527275,
"grad_norm": 0.7194855213165283,
"learning_rate": 8e-05,
"loss": 1.716,
"step": 347
},
{
"epoch": 0.16911675373587656,
"grad_norm": 0.6909961700439453,
"learning_rate": 8e-05,
"loss": 1.6747,
"step": 348
},
{
"epoch": 0.16960272141902563,
"grad_norm": 0.7281751036643982,
"learning_rate": 8e-05,
"loss": 1.8061,
"step": 349
},
{
"epoch": 0.1700886891021747,
"grad_norm": 0.6757460832595825,
"learning_rate": 8e-05,
"loss": 1.7147,
"step": 350
},
{
"epoch": 0.17057465678532377,
"grad_norm": 0.6912110447883606,
"learning_rate": 8e-05,
"loss": 1.6823,
"step": 351
},
{
"epoch": 0.17106062446847284,
"grad_norm": 0.7167717814445496,
"learning_rate": 8e-05,
"loss": 1.7973,
"step": 352
},
{
"epoch": 0.1715465921516219,
"grad_norm": 0.7021996974945068,
"learning_rate": 8e-05,
"loss": 1.5872,
"step": 353
},
{
"epoch": 0.17203255983477098,
"grad_norm": 0.7312988042831421,
"learning_rate": 8e-05,
"loss": 1.7814,
"step": 354
},
{
"epoch": 0.17251852751792005,
"grad_norm": 0.7231782078742981,
"learning_rate": 8e-05,
"loss": 1.7581,
"step": 355
},
{
"epoch": 0.17300449520106914,
"grad_norm": 0.7535229325294495,
"learning_rate": 8e-05,
"loss": 1.7441,
"step": 356
},
{
"epoch": 0.1734904628842182,
"grad_norm": 0.7396194338798523,
"learning_rate": 8e-05,
"loss": 1.7058,
"step": 357
},
{
"epoch": 0.17397643056736728,
"grad_norm": 0.7050851583480835,
"learning_rate": 8e-05,
"loss": 1.7296,
"step": 358
},
{
"epoch": 0.17446239825051635,
"grad_norm": 0.7502471804618835,
"learning_rate": 8e-05,
"loss": 1.7816,
"step": 359
},
{
"epoch": 0.17494836593366542,
"grad_norm": 0.6805384159088135,
"learning_rate": 8e-05,
"loss": 1.6441,
"step": 360
},
{
"epoch": 0.1754343336168145,
"grad_norm": 0.7362212538719177,
"learning_rate": 8e-05,
"loss": 1.7864,
"step": 361
},
{
"epoch": 0.17592030129996356,
"grad_norm": 0.6896436214447021,
"learning_rate": 8e-05,
"loss": 1.7107,
"step": 362
},
{
"epoch": 0.17640626898311262,
"grad_norm": 0.6927011609077454,
"learning_rate": 8e-05,
"loss": 1.7333,
"step": 363
},
{
"epoch": 0.1768922366662617,
"grad_norm": 0.6986691951751709,
"learning_rate": 8e-05,
"loss": 1.7215,
"step": 364
},
{
"epoch": 0.17737820434941076,
"grad_norm": 0.7191731333732605,
"learning_rate": 8e-05,
"loss": 1.7958,
"step": 365
},
{
"epoch": 0.17786417203255983,
"grad_norm": 0.6927951574325562,
"learning_rate": 8e-05,
"loss": 1.7467,
"step": 366
},
{
"epoch": 0.1783501397157089,
"grad_norm": 0.7061179280281067,
"learning_rate": 8e-05,
"loss": 1.7155,
"step": 367
},
{
"epoch": 0.17883610739885797,
"grad_norm": 0.7056461572647095,
"learning_rate": 8e-05,
"loss": 1.6537,
"step": 368
},
{
"epoch": 0.17932207508200704,
"grad_norm": 0.7132648825645447,
"learning_rate": 8e-05,
"loss": 1.7698,
"step": 369
},
{
"epoch": 0.1798080427651561,
"grad_norm": 0.7203075885772705,
"learning_rate": 8e-05,
"loss": 1.7617,
"step": 370
},
{
"epoch": 0.18029401044830518,
"grad_norm": 0.7411680221557617,
"learning_rate": 8e-05,
"loss": 1.739,
"step": 371
},
{
"epoch": 0.18077997813145427,
"grad_norm": 0.7177389860153198,
"learning_rate": 8e-05,
"loss": 1.7428,
"step": 372
},
{
"epoch": 0.18126594581460334,
"grad_norm": 0.6982012987136841,
"learning_rate": 8e-05,
"loss": 1.761,
"step": 373
},
{
"epoch": 0.1817519134977524,
"grad_norm": 0.7072455286979675,
"learning_rate": 8e-05,
"loss": 1.7343,
"step": 374
},
{
"epoch": 0.18223788118090148,
"grad_norm": 0.7454031109809875,
"learning_rate": 8e-05,
"loss": 1.6886,
"step": 375
},
{
"epoch": 0.18272384886405055,
"grad_norm": 1.4396610260009766,
"learning_rate": 8e-05,
"loss": 1.7045,
"step": 376
},
{
"epoch": 0.18320981654719962,
"grad_norm": 0.718212366104126,
"learning_rate": 8e-05,
"loss": 1.6866,
"step": 377
},
{
"epoch": 0.18369578423034869,
"grad_norm": 0.72218257188797,
"learning_rate": 8e-05,
"loss": 1.723,
"step": 378
},
{
"epoch": 0.18418175191349775,
"grad_norm": 0.7068594694137573,
"learning_rate": 8e-05,
"loss": 1.7095,
"step": 379
},
{
"epoch": 0.18466771959664682,
"grad_norm": 0.671427309513092,
"learning_rate": 8e-05,
"loss": 1.6764,
"step": 380
},
{
"epoch": 0.1851536872797959,
"grad_norm": 0.7173644304275513,
"learning_rate": 8e-05,
"loss": 1.7984,
"step": 381
},
{
"epoch": 0.18563965496294496,
"grad_norm": 0.7313031554222107,
"learning_rate": 8e-05,
"loss": 1.8175,
"step": 382
},
{
"epoch": 0.18612562264609403,
"grad_norm": 0.6869568228721619,
"learning_rate": 8e-05,
"loss": 1.7298,
"step": 383
},
{
"epoch": 0.1866115903292431,
"grad_norm": 0.7215043902397156,
"learning_rate": 8e-05,
"loss": 1.7022,
"step": 384
},
{
"epoch": 0.18709755801239217,
"grad_norm": 7.733117580413818,
"learning_rate": 8e-05,
"loss": 1.6847,
"step": 385
},
{
"epoch": 0.18758352569554124,
"grad_norm": 0.7839037179946899,
"learning_rate": 8e-05,
"loss": 1.7405,
"step": 386
},
{
"epoch": 0.1880694933786903,
"grad_norm": 0.7257310748100281,
"learning_rate": 8e-05,
"loss": 1.6304,
"step": 387
},
{
"epoch": 0.18855546106183937,
"grad_norm": 0.7267897725105286,
"learning_rate": 8e-05,
"loss": 1.6344,
"step": 388
},
{
"epoch": 0.18904142874498847,
"grad_norm": 0.7415399551391602,
"learning_rate": 8e-05,
"loss": 1.7804,
"step": 389
},
{
"epoch": 0.18952739642813754,
"grad_norm": 0.7067776918411255,
"learning_rate": 8e-05,
"loss": 1.7362,
"step": 390
},
{
"epoch": 0.1900133641112866,
"grad_norm": 0.7052979469299316,
"learning_rate": 8e-05,
"loss": 1.6074,
"step": 391
},
{
"epoch": 0.19049933179443568,
"grad_norm": 0.7144705057144165,
"learning_rate": 8e-05,
"loss": 1.6859,
"step": 392
},
{
"epoch": 0.19098529947758475,
"grad_norm": 0.7987439632415771,
"learning_rate": 8e-05,
"loss": 1.8387,
"step": 393
},
{
"epoch": 0.19147126716073382,
"grad_norm": 0.6927505731582642,
"learning_rate": 8e-05,
"loss": 1.6787,
"step": 394
},
{
"epoch": 0.19195723484388288,
"grad_norm": 0.7275456786155701,
"learning_rate": 8e-05,
"loss": 1.7561,
"step": 395
},
{
"epoch": 0.19244320252703195,
"grad_norm": 0.7200303673744202,
"learning_rate": 8e-05,
"loss": 1.8142,
"step": 396
},
{
"epoch": 0.19292917021018102,
"grad_norm": 0.76529461145401,
"learning_rate": 8e-05,
"loss": 1.8003,
"step": 397
},
{
"epoch": 0.1934151378933301,
"grad_norm": 0.7350556254386902,
"learning_rate": 8e-05,
"loss": 1.7114,
"step": 398
},
{
"epoch": 0.19390110557647916,
"grad_norm": 0.8515435457229614,
"learning_rate": 8e-05,
"loss": 1.7445,
"step": 399
},
{
"epoch": 0.19438707325962823,
"grad_norm": 1.9147905111312866,
"learning_rate": 8e-05,
"loss": 1.6894,
"step": 400
},
{
"epoch": 0.1948730409427773,
"grad_norm": 0.728226363658905,
"learning_rate": 8e-05,
"loss": 1.6702,
"step": 401
},
{
"epoch": 0.19535900862592637,
"grad_norm": 0.70070481300354,
"learning_rate": 8e-05,
"loss": 1.731,
"step": 402
},
{
"epoch": 0.19584497630907544,
"grad_norm": 1.173539161682129,
"learning_rate": 8e-05,
"loss": 1.7196,
"step": 403
},
{
"epoch": 0.1963309439922245,
"grad_norm": 0.7202131748199463,
"learning_rate": 8e-05,
"loss": 1.6708,
"step": 404
},
{
"epoch": 0.1968169116753736,
"grad_norm": 0.736707866191864,
"learning_rate": 8e-05,
"loss": 1.667,
"step": 405
},
{
"epoch": 0.19730287935852267,
"grad_norm": 0.7264196276664734,
"learning_rate": 8e-05,
"loss": 1.8187,
"step": 406
},
{
"epoch": 0.19778884704167174,
"grad_norm": 0.7067548036575317,
"learning_rate": 8e-05,
"loss": 1.7224,
"step": 407
},
{
"epoch": 0.1982748147248208,
"grad_norm": 0.6923950910568237,
"learning_rate": 8e-05,
"loss": 1.5963,
"step": 408
},
{
"epoch": 0.19876078240796988,
"grad_norm": 0.7020213603973389,
"learning_rate": 8e-05,
"loss": 1.6833,
"step": 409
},
{
"epoch": 0.19924675009111895,
"grad_norm": 0.6964422464370728,
"learning_rate": 8e-05,
"loss": 1.7069,
"step": 410
},
{
"epoch": 0.19973271777426801,
"grad_norm": 0.7008177042007446,
"learning_rate": 8e-05,
"loss": 1.7331,
"step": 411
},
{
"epoch": 0.20021868545741708,
"grad_norm": 0.7455174326896667,
"learning_rate": 8e-05,
"loss": 1.7389,
"step": 412
},
{
"epoch": 0.20070465314056615,
"grad_norm": 0.7137875556945801,
"learning_rate": 8e-05,
"loss": 1.6892,
"step": 413
},
{
"epoch": 0.20119062082371522,
"grad_norm": 0.7967577576637268,
"learning_rate": 8e-05,
"loss": 1.7151,
"step": 414
},
{
"epoch": 0.2016765885068643,
"grad_norm": 0.7129815816879272,
"learning_rate": 8e-05,
"loss": 1.691,
"step": 415
},
{
"epoch": 0.20216255619001336,
"grad_norm": 0.7160812616348267,
"learning_rate": 8e-05,
"loss": 1.8106,
"step": 416
},
{
"epoch": 0.20264852387316243,
"grad_norm": 0.7563639879226685,
"learning_rate": 8e-05,
"loss": 1.7355,
"step": 417
},
{
"epoch": 0.2031344915563115,
"grad_norm": 0.721718430519104,
"learning_rate": 8e-05,
"loss": 1.6833,
"step": 418
},
{
"epoch": 0.20362045923946057,
"grad_norm": 0.75996994972229,
"learning_rate": 8e-05,
"loss": 1.7793,
"step": 419
},
{
"epoch": 0.20410642692260964,
"grad_norm": 0.7527758479118347,
"learning_rate": 8e-05,
"loss": 1.8202,
"step": 420
},
{
"epoch": 0.2045923946057587,
"grad_norm": 0.7160975337028503,
"learning_rate": 8e-05,
"loss": 1.7051,
"step": 421
},
{
"epoch": 0.2050783622889078,
"grad_norm": 0.6943715810775757,
"learning_rate": 8e-05,
"loss": 1.6948,
"step": 422
},
{
"epoch": 0.20556432997205687,
"grad_norm": 0.734963059425354,
"learning_rate": 8e-05,
"loss": 1.7829,
"step": 423
},
{
"epoch": 0.20605029765520594,
"grad_norm": 0.6912250518798828,
"learning_rate": 8e-05,
"loss": 1.5947,
"step": 424
},
{
"epoch": 0.206536265338355,
"grad_norm": 0.7323968410491943,
"learning_rate": 8e-05,
"loss": 1.7622,
"step": 425
},
{
"epoch": 0.20702223302150408,
"grad_norm": 0.7110946774482727,
"learning_rate": 8e-05,
"loss": 1.74,
"step": 426
},
{
"epoch": 0.20750820070465315,
"grad_norm": 0.6980271935462952,
"learning_rate": 8e-05,
"loss": 1.7103,
"step": 427
},
{
"epoch": 0.20799416838780221,
"grad_norm": 0.6758553385734558,
"learning_rate": 8e-05,
"loss": 1.6979,
"step": 428
},
{
"epoch": 0.20848013607095128,
"grad_norm": 0.7173061966896057,
"learning_rate": 8e-05,
"loss": 1.655,
"step": 429
},
{
"epoch": 0.20896610375410035,
"grad_norm": 0.7417454719543457,
"learning_rate": 8e-05,
"loss": 1.691,
"step": 430
},
{
"epoch": 0.20945207143724942,
"grad_norm": 0.7320432662963867,
"learning_rate": 8e-05,
"loss": 1.6892,
"step": 431
},
{
"epoch": 0.2099380391203985,
"grad_norm": 0.7141425609588623,
"learning_rate": 8e-05,
"loss": 1.761,
"step": 432
},
{
"epoch": 0.21042400680354756,
"grad_norm": 0.7038617134094238,
"learning_rate": 8e-05,
"loss": 1.6483,
"step": 433
},
{
"epoch": 0.21090997448669663,
"grad_norm": 0.7042442560195923,
"learning_rate": 8e-05,
"loss": 1.6706,
"step": 434
},
{
"epoch": 0.2113959421698457,
"grad_norm": 0.7139825224876404,
"learning_rate": 8e-05,
"loss": 1.8819,
"step": 435
},
{
"epoch": 0.21188190985299477,
"grad_norm": 0.7329289317131042,
"learning_rate": 8e-05,
"loss": 1.707,
"step": 436
},
{
"epoch": 0.21236787753614383,
"grad_norm": 0.7252111434936523,
"learning_rate": 8e-05,
"loss": 1.795,
"step": 437
},
{
"epoch": 0.21285384521929293,
"grad_norm": 0.702537477016449,
"learning_rate": 8e-05,
"loss": 1.7212,
"step": 438
},
{
"epoch": 0.213339812902442,
"grad_norm": 0.6974433064460754,
"learning_rate": 8e-05,
"loss": 1.6236,
"step": 439
},
{
"epoch": 0.21382578058559107,
"grad_norm": 0.6817081570625305,
"learning_rate": 8e-05,
"loss": 1.7179,
"step": 440
},
{
"epoch": 0.21431174826874014,
"grad_norm": 0.6872859001159668,
"learning_rate": 8e-05,
"loss": 1.6819,
"step": 441
},
{
"epoch": 0.2147977159518892,
"grad_norm": 0.6921105980873108,
"learning_rate": 8e-05,
"loss": 1.6871,
"step": 442
},
{
"epoch": 0.21528368363503828,
"grad_norm": 0.711101233959198,
"learning_rate": 8e-05,
"loss": 1.7059,
"step": 443
},
{
"epoch": 0.21576965131818734,
"grad_norm": 0.7590940594673157,
"learning_rate": 8e-05,
"loss": 1.7404,
"step": 444
},
{
"epoch": 0.2162556190013364,
"grad_norm": 0.6970642805099487,
"learning_rate": 8e-05,
"loss": 1.6561,
"step": 445
},
{
"epoch": 0.21674158668448548,
"grad_norm": 0.7204115390777588,
"learning_rate": 8e-05,
"loss": 1.7421,
"step": 446
},
{
"epoch": 0.21722755436763455,
"grad_norm": 0.7247830033302307,
"learning_rate": 8e-05,
"loss": 1.7893,
"step": 447
},
{
"epoch": 0.21771352205078362,
"grad_norm": 0.6967059373855591,
"learning_rate": 8e-05,
"loss": 1.6915,
"step": 448
},
{
"epoch": 0.2181994897339327,
"grad_norm": 0.7237229943275452,
"learning_rate": 8e-05,
"loss": 1.7392,
"step": 449
},
{
"epoch": 0.21868545741708176,
"grad_norm": 0.701810359954834,
"learning_rate": 8e-05,
"loss": 1.6817,
"step": 450
},
{
"epoch": 0.21917142510023083,
"grad_norm": 1.5513955354690552,
"learning_rate": 8e-05,
"loss": 1.7033,
"step": 451
},
{
"epoch": 0.2196573927833799,
"grad_norm": 0.6946969032287598,
"learning_rate": 8e-05,
"loss": 1.7659,
"step": 452
},
{
"epoch": 0.22014336046652896,
"grad_norm": 0.7080284357070923,
"learning_rate": 8e-05,
"loss": 1.6997,
"step": 453
},
{
"epoch": 0.22062932814967803,
"grad_norm": 0.7313497066497803,
"learning_rate": 8e-05,
"loss": 1.774,
"step": 454
},
{
"epoch": 0.22111529583282713,
"grad_norm": 0.7222405672073364,
"learning_rate": 8e-05,
"loss": 1.7694,
"step": 455
},
{
"epoch": 0.2216012635159762,
"grad_norm": 0.7030674815177917,
"learning_rate": 8e-05,
"loss": 1.7607,
"step": 456
},
{
"epoch": 0.22208723119912527,
"grad_norm": 0.7265918254852295,
"learning_rate": 8e-05,
"loss": 1.6971,
"step": 457
},
{
"epoch": 0.22257319888227434,
"grad_norm": 0.6983145475387573,
"learning_rate": 8e-05,
"loss": 1.7995,
"step": 458
},
{
"epoch": 0.2230591665654234,
"grad_norm": 0.7484574913978577,
"learning_rate": 8e-05,
"loss": 1.7649,
"step": 459
},
{
"epoch": 0.22354513424857247,
"grad_norm": 0.7329131960868835,
"learning_rate": 8e-05,
"loss": 1.7882,
"step": 460
},
{
"epoch": 0.22403110193172154,
"grad_norm": 0.7029368877410889,
"learning_rate": 8e-05,
"loss": 1.7836,
"step": 461
},
{
"epoch": 0.2245170696148706,
"grad_norm": 0.6968295574188232,
"learning_rate": 8e-05,
"loss": 1.7168,
"step": 462
},
{
"epoch": 0.22500303729801968,
"grad_norm": 0.7136850357055664,
"learning_rate": 8e-05,
"loss": 1.7984,
"step": 463
},
{
"epoch": 0.22548900498116875,
"grad_norm": 0.683163583278656,
"learning_rate": 8e-05,
"loss": 1.693,
"step": 464
},
{
"epoch": 0.22597497266431782,
"grad_norm": 0.7015848755836487,
"learning_rate": 8e-05,
"loss": 1.7696,
"step": 465
},
{
"epoch": 0.2264609403474669,
"grad_norm": 0.7096793055534363,
"learning_rate": 8e-05,
"loss": 1.7443,
"step": 466
},
{
"epoch": 0.22694690803061596,
"grad_norm": 0.6917792558670044,
"learning_rate": 8e-05,
"loss": 1.5948,
"step": 467
},
{
"epoch": 0.22743287571376503,
"grad_norm": 0.6934563517570496,
"learning_rate": 8e-05,
"loss": 1.705,
"step": 468
},
{
"epoch": 0.2279188433969141,
"grad_norm": 0.6998134255409241,
"learning_rate": 8e-05,
"loss": 1.7117,
"step": 469
},
{
"epoch": 0.22840481108006316,
"grad_norm": 0.7344012260437012,
"learning_rate": 8e-05,
"loss": 1.8588,
"step": 470
},
{
"epoch": 0.22889077876321223,
"grad_norm": 0.6837858557701111,
"learning_rate": 8e-05,
"loss": 1.651,
"step": 471
},
{
"epoch": 0.22937674644636133,
"grad_norm": 0.7158222794532776,
"learning_rate": 8e-05,
"loss": 1.7681,
"step": 472
},
{
"epoch": 0.2298627141295104,
"grad_norm": 0.7105070948600769,
"learning_rate": 8e-05,
"loss": 1.6895,
"step": 473
},
{
"epoch": 0.23034868181265947,
"grad_norm": 0.7316089272499084,
"learning_rate": 8e-05,
"loss": 1.7805,
"step": 474
},
{
"epoch": 0.23083464949580854,
"grad_norm": 0.6881155371665955,
"learning_rate": 8e-05,
"loss": 1.7601,
"step": 475
},
{
"epoch": 0.2313206171789576,
"grad_norm": 0.7245110273361206,
"learning_rate": 8e-05,
"loss": 1.7934,
"step": 476
},
{
"epoch": 0.23180658486210667,
"grad_norm": 0.689852237701416,
"learning_rate": 8e-05,
"loss": 1.7212,
"step": 477
},
{
"epoch": 0.23229255254525574,
"grad_norm": 0.8751168251037598,
"learning_rate": 8e-05,
"loss": 1.7905,
"step": 478
},
{
"epoch": 0.2327785202284048,
"grad_norm": 0.7239623069763184,
"learning_rate": 8e-05,
"loss": 1.8312,
"step": 479
},
{
"epoch": 0.23326448791155388,
"grad_norm": 0.6992548108100891,
"learning_rate": 8e-05,
"loss": 1.7071,
"step": 480
},
{
"epoch": 0.23375045559470295,
"grad_norm": 0.6987637281417847,
"learning_rate": 8e-05,
"loss": 1.686,
"step": 481
},
{
"epoch": 0.23423642327785202,
"grad_norm": 0.7247014045715332,
"learning_rate": 8e-05,
"loss": 1.7061,
"step": 482
},
{
"epoch": 0.2347223909610011,
"grad_norm": 0.6816551685333252,
"learning_rate": 8e-05,
"loss": 1.5654,
"step": 483
},
{
"epoch": 0.23520835864415016,
"grad_norm": 0.7179449200630188,
"learning_rate": 8e-05,
"loss": 1.9025,
"step": 484
},
{
"epoch": 0.23569432632729923,
"grad_norm": 0.7383981347084045,
"learning_rate": 8e-05,
"loss": 1.7413,
"step": 485
},
{
"epoch": 0.2361802940104483,
"grad_norm": 0.7060659527778625,
"learning_rate": 8e-05,
"loss": 1.6927,
"step": 486
},
{
"epoch": 0.23666626169359736,
"grad_norm": 0.6815193891525269,
"learning_rate": 8e-05,
"loss": 1.7446,
"step": 487
},
{
"epoch": 0.23715222937674646,
"grad_norm": 0.6914201974868774,
"learning_rate": 8e-05,
"loss": 1.6724,
"step": 488
},
{
"epoch": 0.23763819705989553,
"grad_norm": 0.722079873085022,
"learning_rate": 8e-05,
"loss": 1.7868,
"step": 489
},
{
"epoch": 0.2381241647430446,
"grad_norm": 0.70040363073349,
"learning_rate": 8e-05,
"loss": 1.7564,
"step": 490
},
{
"epoch": 0.23861013242619367,
"grad_norm": 0.7107282280921936,
"learning_rate": 8e-05,
"loss": 1.684,
"step": 491
},
{
"epoch": 0.23909610010934274,
"grad_norm": 0.6916898488998413,
"learning_rate": 8e-05,
"loss": 1.6996,
"step": 492
},
{
"epoch": 0.2395820677924918,
"grad_norm": 0.7284011840820312,
"learning_rate": 8e-05,
"loss": 1.8056,
"step": 493
},
{
"epoch": 0.24006803547564087,
"grad_norm": 0.7089860439300537,
"learning_rate": 8e-05,
"loss": 1.7047,
"step": 494
},
{
"epoch": 0.24055400315878994,
"grad_norm": 0.6985988020896912,
"learning_rate": 8e-05,
"loss": 1.7654,
"step": 495
},
{
"epoch": 0.241039970841939,
"grad_norm": 0.6758917570114136,
"learning_rate": 8e-05,
"loss": 1.6573,
"step": 496
},
{
"epoch": 0.24152593852508808,
"grad_norm": 0.693466305732727,
"learning_rate": 8e-05,
"loss": 1.6511,
"step": 497
},
{
"epoch": 0.24201190620823715,
"grad_norm": 0.736146867275238,
"learning_rate": 8e-05,
"loss": 1.703,
"step": 498
},
{
"epoch": 0.24249787389138622,
"grad_norm": 0.7013412714004517,
"learning_rate": 8e-05,
"loss": 1.6233,
"step": 499
},
{
"epoch": 0.2429838415745353,
"grad_norm": 0.6862726211547852,
"learning_rate": 8e-05,
"loss": 1.6619,
"step": 500
},
{
"epoch": 0.24346980925768436,
"grad_norm": 0.7250792980194092,
"learning_rate": 8e-05,
"loss": 1.7591,
"step": 501
},
{
"epoch": 0.24395577694083342,
"grad_norm": 0.6944916248321533,
"learning_rate": 8e-05,
"loss": 1.7817,
"step": 502
},
{
"epoch": 0.2444417446239825,
"grad_norm": 0.6999439597129822,
"learning_rate": 8e-05,
"loss": 1.7159,
"step": 503
},
{
"epoch": 0.24492771230713156,
"grad_norm": 0.6603913903236389,
"learning_rate": 8e-05,
"loss": 1.6739,
"step": 504
},
{
"epoch": 0.24541367999028066,
"grad_norm": 0.7047970294952393,
"learning_rate": 8e-05,
"loss": 1.6637,
"step": 505
},
{
"epoch": 0.24589964767342973,
"grad_norm": 0.7362136840820312,
"learning_rate": 8e-05,
"loss": 1.8074,
"step": 506
},
{
"epoch": 0.2463856153565788,
"grad_norm": 0.6744579672813416,
"learning_rate": 8e-05,
"loss": 1.6935,
"step": 507
},
{
"epoch": 0.24687158303972787,
"grad_norm": 0.6960515379905701,
"learning_rate": 8e-05,
"loss": 1.7088,
"step": 508
},
{
"epoch": 0.24735755072287693,
"grad_norm": 0.7346571683883667,
"learning_rate": 8e-05,
"loss": 1.7757,
"step": 509
},
{
"epoch": 0.247843518406026,
"grad_norm": 0.7043801546096802,
"learning_rate": 8e-05,
"loss": 1.7567,
"step": 510
},
{
"epoch": 0.24832948608917507,
"grad_norm": 0.7161319851875305,
"learning_rate": 8e-05,
"loss": 1.7718,
"step": 511
},
{
"epoch": 0.24881545377232414,
"grad_norm": 0.6959817409515381,
"learning_rate": 8e-05,
"loss": 1.7595,
"step": 512
},
{
"epoch": 0.2493014214554732,
"grad_norm": 0.6952643990516663,
"learning_rate": 8e-05,
"loss": 1.58,
"step": 513
},
{
"epoch": 0.24978738913862228,
"grad_norm": 0.7130714058876038,
"learning_rate": 8e-05,
"loss": 1.7052,
"step": 514
},
{
"epoch": 0.2502733568217714,
"grad_norm": 0.6457836627960205,
"learning_rate": 8e-05,
"loss": 1.589,
"step": 515
},
{
"epoch": 0.25075932450492044,
"grad_norm": 0.6648083925247192,
"learning_rate": 8e-05,
"loss": 1.778,
"step": 516
},
{
"epoch": 0.2512452921880695,
"grad_norm": 0.7063839435577393,
"learning_rate": 8e-05,
"loss": 1.727,
"step": 517
},
{
"epoch": 0.2517312598712186,
"grad_norm": 0.6737691760063171,
"learning_rate": 8e-05,
"loss": 1.6741,
"step": 518
},
{
"epoch": 0.25221722755436765,
"grad_norm": 0.712895393371582,
"learning_rate": 8e-05,
"loss": 1.7191,
"step": 519
},
{
"epoch": 0.2527031952375167,
"grad_norm": 0.714028537273407,
"learning_rate": 8e-05,
"loss": 1.7196,
"step": 520
},
{
"epoch": 0.2531891629206658,
"grad_norm": 0.6951844692230225,
"learning_rate": 8e-05,
"loss": 1.7803,
"step": 521
},
{
"epoch": 0.25367513060381486,
"grad_norm": 0.7156295776367188,
"learning_rate": 8e-05,
"loss": 1.6458,
"step": 522
},
{
"epoch": 0.2541610982869639,
"grad_norm": 0.6805440187454224,
"learning_rate": 8e-05,
"loss": 1.7051,
"step": 523
},
{
"epoch": 0.254647065970113,
"grad_norm": 0.6859557032585144,
"learning_rate": 8e-05,
"loss": 1.7722,
"step": 524
},
{
"epoch": 0.25513303365326206,
"grad_norm": 0.7139891386032104,
"learning_rate": 8e-05,
"loss": 1.5721,
"step": 525
},
{
"epoch": 0.25561900133641113,
"grad_norm": 0.7454090118408203,
"learning_rate": 8e-05,
"loss": 1.8498,
"step": 526
},
{
"epoch": 0.2561049690195602,
"grad_norm": 0.7028915882110596,
"learning_rate": 8e-05,
"loss": 1.7139,
"step": 527
},
{
"epoch": 0.25659093670270927,
"grad_norm": 0.683081328868866,
"learning_rate": 8e-05,
"loss": 1.7758,
"step": 528
},
{
"epoch": 0.25707690438585834,
"grad_norm": 0.7279673218727112,
"learning_rate": 8e-05,
"loss": 1.6658,
"step": 529
},
{
"epoch": 0.2575628720690074,
"grad_norm": 0.6918593645095825,
"learning_rate": 8e-05,
"loss": 1.7601,
"step": 530
},
{
"epoch": 0.2580488397521565,
"grad_norm": 0.7218605279922485,
"learning_rate": 8e-05,
"loss": 1.7032,
"step": 531
},
{
"epoch": 0.25853480743530555,
"grad_norm": 0.7645944356918335,
"learning_rate": 8e-05,
"loss": 1.8349,
"step": 532
},
{
"epoch": 0.2590207751184546,
"grad_norm": 0.7302256226539612,
"learning_rate": 8e-05,
"loss": 1.7089,
"step": 533
},
{
"epoch": 0.2595067428016037,
"grad_norm": 0.7124837040901184,
"learning_rate": 8e-05,
"loss": 1.8135,
"step": 534
},
{
"epoch": 0.25999271048475275,
"grad_norm": 0.6755090355873108,
"learning_rate": 8e-05,
"loss": 1.7794,
"step": 535
},
{
"epoch": 0.2604786781679018,
"grad_norm": 0.6875449419021606,
"learning_rate": 8e-05,
"loss": 1.6895,
"step": 536
},
{
"epoch": 0.2609646458510509,
"grad_norm": 0.68622887134552,
"learning_rate": 8e-05,
"loss": 1.7283,
"step": 537
},
{
"epoch": 0.26145061353419996,
"grad_norm": 0.708236575126648,
"learning_rate": 8e-05,
"loss": 1.7276,
"step": 538
},
{
"epoch": 0.26193658121734903,
"grad_norm": 0.7129213213920593,
"learning_rate": 8e-05,
"loss": 1.8007,
"step": 539
},
{
"epoch": 0.2624225489004981,
"grad_norm": 0.7373605966567993,
"learning_rate": 8e-05,
"loss": 1.7766,
"step": 540
},
{
"epoch": 0.26290851658364717,
"grad_norm": 0.7104474306106567,
"learning_rate": 8e-05,
"loss": 1.7928,
"step": 541
},
{
"epoch": 0.26339448426679624,
"grad_norm": 0.6636412739753723,
"learning_rate": 8e-05,
"loss": 1.4502,
"step": 542
},
{
"epoch": 0.2638804519499453,
"grad_norm": 0.7120438814163208,
"learning_rate": 8e-05,
"loss": 1.7942,
"step": 543
},
{
"epoch": 0.2643664196330944,
"grad_norm": 0.6799701452255249,
"learning_rate": 8e-05,
"loss": 1.6017,
"step": 544
},
{
"epoch": 0.26485238731624344,
"grad_norm": 0.6921384930610657,
"learning_rate": 8e-05,
"loss": 1.7992,
"step": 545
},
{
"epoch": 0.26533835499939257,
"grad_norm": 0.6836612820625305,
"learning_rate": 8e-05,
"loss": 1.7116,
"step": 546
},
{
"epoch": 0.26582432268254164,
"grad_norm": 0.6928842663764954,
"learning_rate": 8e-05,
"loss": 1.6887,
"step": 547
},
{
"epoch": 0.2663102903656907,
"grad_norm": 0.6903363466262817,
"learning_rate": 8e-05,
"loss": 1.7396,
"step": 548
},
{
"epoch": 0.2667962580488398,
"grad_norm": 0.7098380923271179,
"learning_rate": 8e-05,
"loss": 1.7317,
"step": 549
},
{
"epoch": 0.26728222573198884,
"grad_norm": 0.720300018787384,
"learning_rate": 8e-05,
"loss": 1.7333,
"step": 550
},
{
"epoch": 0.2677681934151379,
"grad_norm": 0.6820052266120911,
"learning_rate": 8e-05,
"loss": 1.6491,
"step": 551
},
{
"epoch": 0.268254161098287,
"grad_norm": 0.6927011013031006,
"learning_rate": 8e-05,
"loss": 1.6107,
"step": 552
},
{
"epoch": 0.26874012878143605,
"grad_norm": 0.6916157603263855,
"learning_rate": 8e-05,
"loss": 1.7209,
"step": 553
},
{
"epoch": 0.2692260964645851,
"grad_norm": 0.7326366901397705,
"learning_rate": 8e-05,
"loss": 1.7567,
"step": 554
},
{
"epoch": 0.2697120641477342,
"grad_norm": 0.68709796667099,
"learning_rate": 8e-05,
"loss": 1.6627,
"step": 555
},
{
"epoch": 0.27019803183088326,
"grad_norm": 0.7288592457771301,
"learning_rate": 8e-05,
"loss": 1.8309,
"step": 556
},
{
"epoch": 0.2706839995140323,
"grad_norm": 0.6784886121749878,
"learning_rate": 8e-05,
"loss": 1.7597,
"step": 557
},
{
"epoch": 0.2711699671971814,
"grad_norm": 0.7501283288002014,
"learning_rate": 8e-05,
"loss": 1.7674,
"step": 558
},
{
"epoch": 0.27165593488033046,
"grad_norm": 0.7130365967750549,
"learning_rate": 8e-05,
"loss": 1.7476,
"step": 559
},
{
"epoch": 0.27214190256347953,
"grad_norm": 0.6928538680076599,
"learning_rate": 8e-05,
"loss": 1.7251,
"step": 560
},
{
"epoch": 0.2726278702466286,
"grad_norm": 0.6954242587089539,
"learning_rate": 8e-05,
"loss": 1.7667,
"step": 561
},
{
"epoch": 0.27311383792977767,
"grad_norm": 0.7214319109916687,
"learning_rate": 8e-05,
"loss": 1.658,
"step": 562
},
{
"epoch": 0.27359980561292674,
"grad_norm": 0.6760271191596985,
"learning_rate": 8e-05,
"loss": 1.7001,
"step": 563
},
{
"epoch": 0.2740857732960758,
"grad_norm": 0.7545631527900696,
"learning_rate": 8e-05,
"loss": 1.6607,
"step": 564
},
{
"epoch": 0.2745717409792249,
"grad_norm": 0.7361739277839661,
"learning_rate": 8e-05,
"loss": 1.717,
"step": 565
},
{
"epoch": 0.27505770866237395,
"grad_norm": 0.702701210975647,
"learning_rate": 8e-05,
"loss": 1.8446,
"step": 566
},
{
"epoch": 0.275543676345523,
"grad_norm": 0.7299447655677795,
"learning_rate": 8e-05,
"loss": 1.9285,
"step": 567
},
{
"epoch": 0.2760296440286721,
"grad_norm": 0.7114670872688293,
"learning_rate": 8e-05,
"loss": 1.7747,
"step": 568
},
{
"epoch": 0.27651561171182115,
"grad_norm": 0.6972919702529907,
"learning_rate": 8e-05,
"loss": 1.7866,
"step": 569
},
{
"epoch": 0.2770015793949702,
"grad_norm": 0.69215327501297,
"learning_rate": 8e-05,
"loss": 1.7224,
"step": 570
},
{
"epoch": 0.2774875470781193,
"grad_norm": 0.6653308868408203,
"learning_rate": 8e-05,
"loss": 1.6739,
"step": 571
},
{
"epoch": 0.27797351476126836,
"grad_norm": 0.7020081281661987,
"learning_rate": 8e-05,
"loss": 1.7776,
"step": 572
},
{
"epoch": 0.2784594824444174,
"grad_norm": 0.6915746927261353,
"learning_rate": 8e-05,
"loss": 1.7161,
"step": 573
},
{
"epoch": 0.2789454501275665,
"grad_norm": 0.7168675661087036,
"learning_rate": 8e-05,
"loss": 1.6718,
"step": 574
},
{
"epoch": 0.27943141781071557,
"grad_norm": 0.6929783225059509,
"learning_rate": 8e-05,
"loss": 1.6508,
"step": 575
},
{
"epoch": 0.27991738549386463,
"grad_norm": 0.6933654546737671,
"learning_rate": 8e-05,
"loss": 1.6531,
"step": 576
},
{
"epoch": 0.2804033531770137,
"grad_norm": 0.730681300163269,
"learning_rate": 8e-05,
"loss": 1.7288,
"step": 577
},
{
"epoch": 0.28088932086016277,
"grad_norm": 0.7689934372901917,
"learning_rate": 8e-05,
"loss": 1.7032,
"step": 578
},
{
"epoch": 0.2813752885433119,
"grad_norm": 0.7239090204238892,
"learning_rate": 8e-05,
"loss": 1.7474,
"step": 579
},
{
"epoch": 0.28186125622646097,
"grad_norm": 0.8013045787811279,
"learning_rate": 8e-05,
"loss": 1.7319,
"step": 580
},
{
"epoch": 0.28234722390961003,
"grad_norm": 0.7367481589317322,
"learning_rate": 8e-05,
"loss": 1.7359,
"step": 581
},
{
"epoch": 0.2828331915927591,
"grad_norm": 0.7651221752166748,
"learning_rate": 8e-05,
"loss": 1.6418,
"step": 582
},
{
"epoch": 0.2833191592759082,
"grad_norm": 0.7227338552474976,
"learning_rate": 8e-05,
"loss": 1.8029,
"step": 583
},
{
"epoch": 0.28380512695905724,
"grad_norm": 0.7584525346755981,
"learning_rate": 8e-05,
"loss": 1.7288,
"step": 584
},
{
"epoch": 0.2842910946422063,
"grad_norm": 0.7245039343833923,
"learning_rate": 8e-05,
"loss": 1.8557,
"step": 585
},
{
"epoch": 0.2847770623253554,
"grad_norm": 0.7978915572166443,
"learning_rate": 8e-05,
"loss": 1.7347,
"step": 586
},
{
"epoch": 0.28526303000850445,
"grad_norm": 0.7050460577011108,
"learning_rate": 8e-05,
"loss": 1.8232,
"step": 587
},
{
"epoch": 0.2857489976916535,
"grad_norm": 0.7757149338722229,
"learning_rate": 8e-05,
"loss": 1.7079,
"step": 588
},
{
"epoch": 0.2862349653748026,
"grad_norm": 0.6907417178153992,
"learning_rate": 8e-05,
"loss": 1.7015,
"step": 589
},
{
"epoch": 0.28672093305795165,
"grad_norm": 0.7418529391288757,
"learning_rate": 8e-05,
"loss": 1.7228,
"step": 590
},
{
"epoch": 0.2872069007411007,
"grad_norm": 0.7167986035346985,
"learning_rate": 8e-05,
"loss": 1.7876,
"step": 591
},
{
"epoch": 0.2876928684242498,
"grad_norm": 0.6988394856452942,
"learning_rate": 8e-05,
"loss": 1.7615,
"step": 592
},
{
"epoch": 0.28817883610739886,
"grad_norm": 0.6974465847015381,
"learning_rate": 8e-05,
"loss": 1.7155,
"step": 593
},
{
"epoch": 0.28866480379054793,
"grad_norm": 0.7521048784255981,
"learning_rate": 8e-05,
"loss": 1.7372,
"step": 594
},
{
"epoch": 0.289150771473697,
"grad_norm": 0.6788330674171448,
"learning_rate": 8e-05,
"loss": 1.6089,
"step": 595
},
{
"epoch": 0.28963673915684607,
"grad_norm": 0.6883137822151184,
"learning_rate": 8e-05,
"loss": 1.6851,
"step": 596
},
{
"epoch": 0.29012270683999514,
"grad_norm": 0.6942862272262573,
"learning_rate": 8e-05,
"loss": 1.7191,
"step": 597
},
{
"epoch": 0.2906086745231442,
"grad_norm": 0.69673752784729,
"learning_rate": 8e-05,
"loss": 1.6598,
"step": 598
},
{
"epoch": 0.2910946422062933,
"grad_norm": 0.7147909998893738,
"learning_rate": 8e-05,
"loss": 1.775,
"step": 599
},
{
"epoch": 0.29158060988944234,
"grad_norm": 0.7194314002990723,
"learning_rate": 8e-05,
"loss": 1.6175,
"step": 600
},
{
"epoch": 0.2920665775725914,
"grad_norm": 0.7144424319267273,
"learning_rate": 8e-05,
"loss": 1.6754,
"step": 601
},
{
"epoch": 0.2925525452557405,
"grad_norm": 0.7796189785003662,
"learning_rate": 8e-05,
"loss": 1.7452,
"step": 602
},
{
"epoch": 0.29303851293888955,
"grad_norm": 0.7541716694831848,
"learning_rate": 8e-05,
"loss": 1.7163,
"step": 603
},
{
"epoch": 0.2935244806220386,
"grad_norm": 0.743827760219574,
"learning_rate": 8e-05,
"loss": 1.8049,
"step": 604
},
{
"epoch": 0.2940104483051877,
"grad_norm": 0.7244659066200256,
"learning_rate": 8e-05,
"loss": 1.7454,
"step": 605
},
{
"epoch": 0.29449641598833676,
"grad_norm": 0.7259283661842346,
"learning_rate": 8e-05,
"loss": 1.7226,
"step": 606
},
{
"epoch": 0.2949823836714858,
"grad_norm": 0.6881210207939148,
"learning_rate": 8e-05,
"loss": 1.7789,
"step": 607
},
{
"epoch": 0.2954683513546349,
"grad_norm": 0.6795855164527893,
"learning_rate": 8e-05,
"loss": 1.6374,
"step": 608
},
{
"epoch": 0.29595431903778396,
"grad_norm": 0.6765013933181763,
"learning_rate": 8e-05,
"loss": 1.6363,
"step": 609
},
{
"epoch": 0.29644028672093303,
"grad_norm": 0.7044883370399475,
"learning_rate": 8e-05,
"loss": 1.6962,
"step": 610
},
{
"epoch": 0.2969262544040821,
"grad_norm": 0.7081432342529297,
"learning_rate": 8e-05,
"loss": 1.737,
"step": 611
},
{
"epoch": 0.2974122220872312,
"grad_norm": 0.7889771461486816,
"learning_rate": 8e-05,
"loss": 1.6903,
"step": 612
},
{
"epoch": 0.2978981897703803,
"grad_norm": 0.7401373386383057,
"learning_rate": 8e-05,
"loss": 1.7204,
"step": 613
},
{
"epoch": 0.29838415745352936,
"grad_norm": 0.7494681477546692,
"learning_rate": 8e-05,
"loss": 1.7386,
"step": 614
},
{
"epoch": 0.29887012513667843,
"grad_norm": 0.672346293926239,
"learning_rate": 8e-05,
"loss": 1.7277,
"step": 615
},
{
"epoch": 0.2993560928198275,
"grad_norm": 0.7091094851493835,
"learning_rate": 8e-05,
"loss": 1.763,
"step": 616
},
{
"epoch": 0.29984206050297657,
"grad_norm": 0.6953763961791992,
"learning_rate": 8e-05,
"loss": 1.819,
"step": 617
},
{
"epoch": 0.30032802818612564,
"grad_norm": 0.7064865231513977,
"learning_rate": 8e-05,
"loss": 1.75,
"step": 618
},
{
"epoch": 0.3008139958692747,
"grad_norm": 0.6994685530662537,
"learning_rate": 8e-05,
"loss": 1.6968,
"step": 619
},
{
"epoch": 0.3012999635524238,
"grad_norm": 0.6812862753868103,
"learning_rate": 8e-05,
"loss": 1.6968,
"step": 620
},
{
"epoch": 0.30178593123557285,
"grad_norm": 0.6928630471229553,
"learning_rate": 8e-05,
"loss": 1.6216,
"step": 621
},
{
"epoch": 0.3022718989187219,
"grad_norm": 0.7348055243492126,
"learning_rate": 8e-05,
"loss": 1.7554,
"step": 622
},
{
"epoch": 0.302757866601871,
"grad_norm": 0.6720457673072815,
"learning_rate": 8e-05,
"loss": 1.6727,
"step": 623
},
{
"epoch": 0.30324383428502005,
"grad_norm": 0.7116773724555969,
"learning_rate": 8e-05,
"loss": 1.6855,
"step": 624
},
{
"epoch": 0.3037298019681691,
"grad_norm": 0.6709682941436768,
"learning_rate": 8e-05,
"loss": 1.7274,
"step": 625
},
{
"epoch": 0.3042157696513182,
"grad_norm": 0.717004120349884,
"learning_rate": 8e-05,
"loss": 1.7087,
"step": 626
},
{
"epoch": 0.30470173733446726,
"grad_norm": 0.686567485332489,
"learning_rate": 8e-05,
"loss": 1.7382,
"step": 627
},
{
"epoch": 0.30518770501761633,
"grad_norm": 0.6693478226661682,
"learning_rate": 8e-05,
"loss": 1.7077,
"step": 628
},
{
"epoch": 0.3056736727007654,
"grad_norm": 0.6920218467712402,
"learning_rate": 8e-05,
"loss": 1.6745,
"step": 629
},
{
"epoch": 0.30615964038391447,
"grad_norm": 0.715502917766571,
"learning_rate": 8e-05,
"loss": 1.7656,
"step": 630
},
{
"epoch": 0.30664560806706354,
"grad_norm": 0.6711974143981934,
"learning_rate": 8e-05,
"loss": 1.6409,
"step": 631
},
{
"epoch": 0.3071315757502126,
"grad_norm": 0.6944555044174194,
"learning_rate": 8e-05,
"loss": 1.7202,
"step": 632
},
{
"epoch": 0.3076175434333617,
"grad_norm": 0.6978852152824402,
"learning_rate": 8e-05,
"loss": 1.7713,
"step": 633
},
{
"epoch": 0.30810351111651074,
"grad_norm": 0.7136162519454956,
"learning_rate": 8e-05,
"loss": 1.7804,
"step": 634
},
{
"epoch": 0.3085894787996598,
"grad_norm": 0.6399965286254883,
"learning_rate": 8e-05,
"loss": 1.5789,
"step": 635
},
{
"epoch": 0.3090754464828089,
"grad_norm": 0.675133466720581,
"learning_rate": 8e-05,
"loss": 1.6818,
"step": 636
},
{
"epoch": 0.30956141416595795,
"grad_norm": 0.6725639700889587,
"learning_rate": 8e-05,
"loss": 1.7162,
"step": 637
},
{
"epoch": 0.310047381849107,
"grad_norm": 0.6805446743965149,
"learning_rate": 8e-05,
"loss": 1.6275,
"step": 638
},
{
"epoch": 0.3105333495322561,
"grad_norm": 0.7230474948883057,
"learning_rate": 8e-05,
"loss": 1.7247,
"step": 639
},
{
"epoch": 0.31101931721540516,
"grad_norm": 0.6834046244621277,
"learning_rate": 8e-05,
"loss": 1.7722,
"step": 640
},
{
"epoch": 0.3115052848985542,
"grad_norm": 0.683344841003418,
"learning_rate": 8e-05,
"loss": 1.6948,
"step": 641
},
{
"epoch": 0.3119912525817033,
"grad_norm": 0.6865242719650269,
"learning_rate": 8e-05,
"loss": 1.7313,
"step": 642
},
{
"epoch": 0.31247722026485236,
"grad_norm": 0.7007044553756714,
"learning_rate": 8e-05,
"loss": 1.6907,
"step": 643
},
{
"epoch": 0.31296318794800143,
"grad_norm": 0.7030975222587585,
"learning_rate": 8e-05,
"loss": 1.781,
"step": 644
},
{
"epoch": 0.31344915563115056,
"grad_norm": 0.6999849081039429,
"learning_rate": 8e-05,
"loss": 1.7243,
"step": 645
},
{
"epoch": 0.3139351233142996,
"grad_norm": 0.6664942502975464,
"learning_rate": 8e-05,
"loss": 1.665,
"step": 646
},
{
"epoch": 0.3144210909974487,
"grad_norm": 0.6898604035377502,
"learning_rate": 8e-05,
"loss": 1.755,
"step": 647
},
{
"epoch": 0.31490705868059776,
"grad_norm": 0.6785181164741516,
"learning_rate": 8e-05,
"loss": 1.7018,
"step": 648
},
{
"epoch": 0.31539302636374683,
"grad_norm": 0.6965758204460144,
"learning_rate": 8e-05,
"loss": 1.7484,
"step": 649
},
{
"epoch": 0.3158789940468959,
"grad_norm": 0.6679044365882874,
"learning_rate": 8e-05,
"loss": 1.5853,
"step": 650
},
{
"epoch": 0.31636496173004497,
"grad_norm": 0.6945375204086304,
"learning_rate": 8e-05,
"loss": 1.6642,
"step": 651
},
{
"epoch": 0.31685092941319404,
"grad_norm": 0.667552649974823,
"learning_rate": 8e-05,
"loss": 1.5783,
"step": 652
},
{
"epoch": 0.3173368970963431,
"grad_norm": 0.6989448070526123,
"learning_rate": 8e-05,
"loss": 1.7434,
"step": 653
},
{
"epoch": 0.3178228647794922,
"grad_norm": 0.6956431865692139,
"learning_rate": 8e-05,
"loss": 1.7055,
"step": 654
},
{
"epoch": 0.31830883246264124,
"grad_norm": 0.6771115064620972,
"learning_rate": 8e-05,
"loss": 1.723,
"step": 655
},
{
"epoch": 0.3187948001457903,
"grad_norm": 0.6688347458839417,
"learning_rate": 8e-05,
"loss": 1.7305,
"step": 656
},
{
"epoch": 0.3192807678289394,
"grad_norm": 0.7163428068161011,
"learning_rate": 8e-05,
"loss": 1.7346,
"step": 657
},
{
"epoch": 0.31976673551208845,
"grad_norm": 0.7120910882949829,
"learning_rate": 8e-05,
"loss": 1.7408,
"step": 658
},
{
"epoch": 0.3202527031952375,
"grad_norm": 0.7058713436126709,
"learning_rate": 8e-05,
"loss": 1.8368,
"step": 659
},
{
"epoch": 0.3207386708783866,
"grad_norm": 0.7976484894752502,
"learning_rate": 8e-05,
"loss": 1.6987,
"step": 660
},
{
"epoch": 0.32122463856153566,
"grad_norm": 0.7006098031997681,
"learning_rate": 8e-05,
"loss": 1.7049,
"step": 661
},
{
"epoch": 0.3217106062446847,
"grad_norm": 0.7080637812614441,
"learning_rate": 8e-05,
"loss": 1.672,
"step": 662
},
{
"epoch": 0.3221965739278338,
"grad_norm": 0.6846632957458496,
"learning_rate": 8e-05,
"loss": 1.7495,
"step": 663
},
{
"epoch": 0.32268254161098286,
"grad_norm": 0.7317066192626953,
"learning_rate": 8e-05,
"loss": 1.793,
"step": 664
},
{
"epoch": 0.32316850929413193,
"grad_norm": 0.6794739961624146,
"learning_rate": 8e-05,
"loss": 1.6725,
"step": 665
},
{
"epoch": 0.323654476977281,
"grad_norm": 0.6898600459098816,
"learning_rate": 8e-05,
"loss": 1.6968,
"step": 666
},
{
"epoch": 0.32414044466043007,
"grad_norm": 0.664738655090332,
"learning_rate": 8e-05,
"loss": 1.7035,
"step": 667
},
{
"epoch": 0.32462641234357914,
"grad_norm": 0.6971855163574219,
"learning_rate": 8e-05,
"loss": 1.7321,
"step": 668
},
{
"epoch": 0.3251123800267282,
"grad_norm": 0.6689901351928711,
"learning_rate": 8e-05,
"loss": 1.7187,
"step": 669
},
{
"epoch": 0.3255983477098773,
"grad_norm": 0.7430669069290161,
"learning_rate": 8e-05,
"loss": 1.7643,
"step": 670
},
{
"epoch": 0.32608431539302635,
"grad_norm": 0.6867819428443909,
"learning_rate": 8e-05,
"loss": 1.7678,
"step": 671
},
{
"epoch": 0.3265702830761754,
"grad_norm": 0.7471033334732056,
"learning_rate": 8e-05,
"loss": 1.8406,
"step": 672
},
{
"epoch": 0.3270562507593245,
"grad_norm": 0.6916874051094055,
"learning_rate": 8e-05,
"loss": 1.6642,
"step": 673
},
{
"epoch": 0.32754221844247355,
"grad_norm": 0.7641606330871582,
"learning_rate": 8e-05,
"loss": 1.7398,
"step": 674
},
{
"epoch": 0.3280281861256226,
"grad_norm": 0.7503807544708252,
"learning_rate": 8e-05,
"loss": 1.7585,
"step": 675
},
{
"epoch": 0.3285141538087717,
"grad_norm": 0.698069155216217,
"learning_rate": 8e-05,
"loss": 1.7155,
"step": 676
},
{
"epoch": 0.32900012149192076,
"grad_norm": 0.7112565040588379,
"learning_rate": 8e-05,
"loss": 1.764,
"step": 677
},
{
"epoch": 0.3294860891750699,
"grad_norm": 0.6986814737319946,
"learning_rate": 8e-05,
"loss": 1.7118,
"step": 678
},
{
"epoch": 0.32997205685821895,
"grad_norm": 0.7235680818557739,
"learning_rate": 8e-05,
"loss": 1.5749,
"step": 679
},
{
"epoch": 0.330458024541368,
"grad_norm": 0.7491697669029236,
"learning_rate": 8e-05,
"loss": 1.7661,
"step": 680
},
{
"epoch": 0.3309439922245171,
"grad_norm": 0.6982185244560242,
"learning_rate": 8e-05,
"loss": 1.7177,
"step": 681
},
{
"epoch": 0.33142995990766616,
"grad_norm": 0.7124080061912537,
"learning_rate": 8e-05,
"loss": 1.7063,
"step": 682
},
{
"epoch": 0.33191592759081523,
"grad_norm": 0.6832630634307861,
"learning_rate": 8e-05,
"loss": 1.588,
"step": 683
},
{
"epoch": 0.3324018952739643,
"grad_norm": 0.7236844301223755,
"learning_rate": 8e-05,
"loss": 1.7338,
"step": 684
},
{
"epoch": 0.33288786295711337,
"grad_norm": 0.7127472758293152,
"learning_rate": 8e-05,
"loss": 1.7555,
"step": 685
},
{
"epoch": 0.33337383064026244,
"grad_norm": 0.6831504702568054,
"learning_rate": 8e-05,
"loss": 1.6426,
"step": 686
},
{
"epoch": 0.3338597983234115,
"grad_norm": 0.6751428246498108,
"learning_rate": 8e-05,
"loss": 1.646,
"step": 687
},
{
"epoch": 0.3343457660065606,
"grad_norm": 0.6844292283058167,
"learning_rate": 8e-05,
"loss": 1.7087,
"step": 688
},
{
"epoch": 0.33483173368970964,
"grad_norm": 0.718913733959198,
"learning_rate": 8e-05,
"loss": 1.8296,
"step": 689
},
{
"epoch": 0.3353177013728587,
"grad_norm": 0.7067818641662598,
"learning_rate": 8e-05,
"loss": 1.6965,
"step": 690
},
{
"epoch": 0.3358036690560078,
"grad_norm": 0.6959990859031677,
"learning_rate": 8e-05,
"loss": 1.7785,
"step": 691
},
{
"epoch": 0.33628963673915685,
"grad_norm": 0.7144571542739868,
"learning_rate": 8e-05,
"loss": 1.6908,
"step": 692
},
{
"epoch": 0.3367756044223059,
"grad_norm": 0.7456130981445312,
"learning_rate": 8e-05,
"loss": 1.7632,
"step": 693
},
{
"epoch": 0.337261572105455,
"grad_norm": 0.6853093504905701,
"learning_rate": 8e-05,
"loss": 1.7011,
"step": 694
},
{
"epoch": 0.33774753978860406,
"grad_norm": 0.7311987280845642,
"learning_rate": 8e-05,
"loss": 1.757,
"step": 695
},
{
"epoch": 0.3382335074717531,
"grad_norm": 0.692941427230835,
"learning_rate": 8e-05,
"loss": 1.7853,
"step": 696
},
{
"epoch": 0.3387194751549022,
"grad_norm": 0.7096104025840759,
"learning_rate": 8e-05,
"loss": 1.6507,
"step": 697
},
{
"epoch": 0.33920544283805126,
"grad_norm": 0.7057453989982605,
"learning_rate": 8e-05,
"loss": 1.7361,
"step": 698
},
{
"epoch": 0.33969141052120033,
"grad_norm": 0.6995853185653687,
"learning_rate": 8e-05,
"loss": 1.6988,
"step": 699
},
{
"epoch": 0.3401773782043494,
"grad_norm": 0.6842801570892334,
"learning_rate": 8e-05,
"loss": 1.7112,
"step": 700
},
{
"epoch": 0.34066334588749847,
"grad_norm": 0.6936244368553162,
"learning_rate": 8e-05,
"loss": 1.7249,
"step": 701
},
{
"epoch": 0.34114931357064754,
"grad_norm": 0.6642776131629944,
"learning_rate": 8e-05,
"loss": 1.6834,
"step": 702
},
{
"epoch": 0.3416352812537966,
"grad_norm": 0.6814457178115845,
"learning_rate": 8e-05,
"loss": 1.705,
"step": 703
},
{
"epoch": 0.3421212489369457,
"grad_norm": 0.6857737898826599,
"learning_rate": 8e-05,
"loss": 1.7087,
"step": 704
},
{
"epoch": 0.34260721662009475,
"grad_norm": 0.681952714920044,
"learning_rate": 8e-05,
"loss": 1.6908,
"step": 705
},
{
"epoch": 0.3430931843032438,
"grad_norm": 0.6904730200767517,
"learning_rate": 8e-05,
"loss": 1.7594,
"step": 706
},
{
"epoch": 0.3435791519863929,
"grad_norm": 0.6764344573020935,
"learning_rate": 8e-05,
"loss": 1.6886,
"step": 707
},
{
"epoch": 0.34406511966954195,
"grad_norm": 0.6928572654724121,
"learning_rate": 8e-05,
"loss": 1.5948,
"step": 708
},
{
"epoch": 0.344551087352691,
"grad_norm": 0.7080250382423401,
"learning_rate": 8e-05,
"loss": 1.8411,
"step": 709
},
{
"epoch": 0.3450370550358401,
"grad_norm": 0.701583743095398,
"learning_rate": 8e-05,
"loss": 1.8013,
"step": 710
},
{
"epoch": 0.3455230227189892,
"grad_norm": 0.6787291169166565,
"learning_rate": 8e-05,
"loss": 1.662,
"step": 711
},
{
"epoch": 0.3460089904021383,
"grad_norm": 0.6819856762886047,
"learning_rate": 8e-05,
"loss": 1.707,
"step": 712
},
{
"epoch": 0.34649495808528735,
"grad_norm": 0.6898195743560791,
"learning_rate": 8e-05,
"loss": 1.6747,
"step": 713
},
{
"epoch": 0.3469809257684364,
"grad_norm": 0.6973528265953064,
"learning_rate": 8e-05,
"loss": 1.7357,
"step": 714
},
{
"epoch": 0.3474668934515855,
"grad_norm": 0.6738211512565613,
"learning_rate": 8e-05,
"loss": 1.6637,
"step": 715
},
{
"epoch": 0.34795286113473456,
"grad_norm": 0.7323836088180542,
"learning_rate": 8e-05,
"loss": 1.788,
"step": 716
},
{
"epoch": 0.34843882881788363,
"grad_norm": 0.7128671407699585,
"learning_rate": 8e-05,
"loss": 1.7472,
"step": 717
},
{
"epoch": 0.3489247965010327,
"grad_norm": 0.6978126764297485,
"learning_rate": 8e-05,
"loss": 1.7542,
"step": 718
},
{
"epoch": 0.34941076418418177,
"grad_norm": 0.7203449010848999,
"learning_rate": 8e-05,
"loss": 1.8486,
"step": 719
},
{
"epoch": 0.34989673186733083,
"grad_norm": 0.7253541350364685,
"learning_rate": 8e-05,
"loss": 1.7574,
"step": 720
},
{
"epoch": 0.3503826995504799,
"grad_norm": 0.7175763249397278,
"learning_rate": 8e-05,
"loss": 1.7841,
"step": 721
},
{
"epoch": 0.350868667233629,
"grad_norm": 0.692802369594574,
"learning_rate": 8e-05,
"loss": 1.7118,
"step": 722
},
{
"epoch": 0.35135463491677804,
"grad_norm": 0.6721988320350647,
"learning_rate": 8e-05,
"loss": 1.6411,
"step": 723
},
{
"epoch": 0.3518406025999271,
"grad_norm": 0.6904393434524536,
"learning_rate": 8e-05,
"loss": 1.6707,
"step": 724
},
{
"epoch": 0.3523265702830762,
"grad_norm": 0.6631871461868286,
"learning_rate": 8e-05,
"loss": 1.694,
"step": 725
},
{
"epoch": 0.35281253796622525,
"grad_norm": 0.6686208844184875,
"learning_rate": 8e-05,
"loss": 1.6628,
"step": 726
},
{
"epoch": 0.3532985056493743,
"grad_norm": 0.7212857604026794,
"learning_rate": 8e-05,
"loss": 1.7381,
"step": 727
},
{
"epoch": 0.3537844733325234,
"grad_norm": 0.7178005576133728,
"learning_rate": 8e-05,
"loss": 1.6917,
"step": 728
},
{
"epoch": 0.35427044101567245,
"grad_norm": 0.6868698000907898,
"learning_rate": 8e-05,
"loss": 1.7654,
"step": 729
},
{
"epoch": 0.3547564086988215,
"grad_norm": 0.6992602944374084,
"learning_rate": 8e-05,
"loss": 1.7562,
"step": 730
},
{
"epoch": 0.3552423763819706,
"grad_norm": 0.7130458354949951,
"learning_rate": 8e-05,
"loss": 1.7142,
"step": 731
},
{
"epoch": 0.35572834406511966,
"grad_norm": 0.7360597252845764,
"learning_rate": 8e-05,
"loss": 1.8032,
"step": 732
},
{
"epoch": 0.35621431174826873,
"grad_norm": 0.7023678421974182,
"learning_rate": 8e-05,
"loss": 1.68,
"step": 733
},
{
"epoch": 0.3567002794314178,
"grad_norm": 0.6872215867042542,
"learning_rate": 8e-05,
"loss": 1.689,
"step": 734
},
{
"epoch": 0.35718624711456687,
"grad_norm": 0.6893778443336487,
"learning_rate": 8e-05,
"loss": 1.7228,
"step": 735
},
{
"epoch": 0.35767221479771594,
"grad_norm": 0.6837376356124878,
"learning_rate": 8e-05,
"loss": 1.6901,
"step": 736
},
{
"epoch": 0.358158182480865,
"grad_norm": 0.677789032459259,
"learning_rate": 8e-05,
"loss": 1.6059,
"step": 737
},
{
"epoch": 0.3586441501640141,
"grad_norm": 0.7021698355674744,
"learning_rate": 8e-05,
"loss": 1.6352,
"step": 738
},
{
"epoch": 0.35913011784716314,
"grad_norm": 0.7170615792274475,
"learning_rate": 8e-05,
"loss": 1.7215,
"step": 739
},
{
"epoch": 0.3596160855303122,
"grad_norm": 0.6669505834579468,
"learning_rate": 8e-05,
"loss": 1.6934,
"step": 740
},
{
"epoch": 0.3601020532134613,
"grad_norm": 0.7288329005241394,
"learning_rate": 8e-05,
"loss": 1.8032,
"step": 741
},
{
"epoch": 0.36058802089661035,
"grad_norm": 0.7091507911682129,
"learning_rate": 8e-05,
"loss": 1.751,
"step": 742
},
{
"epoch": 0.3610739885797594,
"grad_norm": 0.7438647747039795,
"learning_rate": 8e-05,
"loss": 1.7154,
"step": 743
},
{
"epoch": 0.36155995626290854,
"grad_norm": 0.6953144073486328,
"learning_rate": 8e-05,
"loss": 1.7764,
"step": 744
},
{
"epoch": 0.3620459239460576,
"grad_norm": 0.7226343750953674,
"learning_rate": 8e-05,
"loss": 1.699,
"step": 745
},
{
"epoch": 0.3625318916292067,
"grad_norm": 0.6900684833526611,
"learning_rate": 8e-05,
"loss": 1.7181,
"step": 746
},
{
"epoch": 0.36301785931235575,
"grad_norm": 0.7096751928329468,
"learning_rate": 8e-05,
"loss": 1.705,
"step": 747
},
{
"epoch": 0.3635038269955048,
"grad_norm": 0.6874499917030334,
"learning_rate": 8e-05,
"loss": 1.6872,
"step": 748
},
{
"epoch": 0.3639897946786539,
"grad_norm": 0.7049229741096497,
"learning_rate": 8e-05,
"loss": 1.681,
"step": 749
},
{
"epoch": 0.36447576236180296,
"grad_norm": 0.7370143532752991,
"learning_rate": 8e-05,
"loss": 1.7453,
"step": 750
},
{
"epoch": 0.364961730044952,
"grad_norm": 0.7138962149620056,
"learning_rate": 8e-05,
"loss": 1.7454,
"step": 751
},
{
"epoch": 0.3654476977281011,
"grad_norm": 0.6863689422607422,
"learning_rate": 8e-05,
"loss": 1.7144,
"step": 752
},
{
"epoch": 0.36593366541125016,
"grad_norm": 0.6769356727600098,
"learning_rate": 8e-05,
"loss": 1.6079,
"step": 753
},
{
"epoch": 0.36641963309439923,
"grad_norm": 0.738787055015564,
"learning_rate": 8e-05,
"loss": 1.7435,
"step": 754
},
{
"epoch": 0.3669056007775483,
"grad_norm": 0.6810450553894043,
"learning_rate": 8e-05,
"loss": 1.6657,
"step": 755
},
{
"epoch": 0.36739156846069737,
"grad_norm": 0.7094942331314087,
"learning_rate": 8e-05,
"loss": 1.801,
"step": 756
},
{
"epoch": 0.36787753614384644,
"grad_norm": 0.7048606872558594,
"learning_rate": 8e-05,
"loss": 1.6962,
"step": 757
},
{
"epoch": 0.3683635038269955,
"grad_norm": 0.697839617729187,
"learning_rate": 8e-05,
"loss": 1.698,
"step": 758
},
{
"epoch": 0.3688494715101446,
"grad_norm": 0.7033606767654419,
"learning_rate": 8e-05,
"loss": 1.7372,
"step": 759
},
{
"epoch": 0.36933543919329365,
"grad_norm": 0.6924566030502319,
"learning_rate": 8e-05,
"loss": 1.6313,
"step": 760
},
{
"epoch": 0.3698214068764427,
"grad_norm": 0.7041929364204407,
"learning_rate": 8e-05,
"loss": 1.7038,
"step": 761
},
{
"epoch": 0.3703073745595918,
"grad_norm": 0.709780216217041,
"learning_rate": 8e-05,
"loss": 1.7423,
"step": 762
},
{
"epoch": 0.37079334224274085,
"grad_norm": 0.7087435722351074,
"learning_rate": 8e-05,
"loss": 1.742,
"step": 763
},
{
"epoch": 0.3712793099258899,
"grad_norm": 0.7007718086242676,
"learning_rate": 8e-05,
"loss": 1.6585,
"step": 764
},
{
"epoch": 0.371765277609039,
"grad_norm": 0.7238125205039978,
"learning_rate": 8e-05,
"loss": 1.7995,
"step": 765
},
{
"epoch": 0.37225124529218806,
"grad_norm": 0.7054280638694763,
"learning_rate": 8e-05,
"loss": 1.658,
"step": 766
},
{
"epoch": 0.37273721297533713,
"grad_norm": 0.7188702821731567,
"learning_rate": 8e-05,
"loss": 1.7099,
"step": 767
},
{
"epoch": 0.3732231806584862,
"grad_norm": 0.7082090377807617,
"learning_rate": 8e-05,
"loss": 1.6814,
"step": 768
},
{
"epoch": 0.37370914834163527,
"grad_norm": 0.6647132635116577,
"learning_rate": 8e-05,
"loss": 1.679,
"step": 769
},
{
"epoch": 0.37419511602478434,
"grad_norm": 0.7240138053894043,
"learning_rate": 8e-05,
"loss": 1.6995,
"step": 770
},
{
"epoch": 0.3746810837079334,
"grad_norm": 0.6638062000274658,
"learning_rate": 8e-05,
"loss": 1.668,
"step": 771
},
{
"epoch": 0.3751670513910825,
"grad_norm": 0.7058272957801819,
"learning_rate": 8e-05,
"loss": 1.683,
"step": 772
},
{
"epoch": 0.37565301907423154,
"grad_norm": 0.7083149552345276,
"learning_rate": 8e-05,
"loss": 1.6905,
"step": 773
},
{
"epoch": 0.3761389867573806,
"grad_norm": 0.7284737229347229,
"learning_rate": 8e-05,
"loss": 1.696,
"step": 774
},
{
"epoch": 0.3766249544405297,
"grad_norm": 0.6703829169273376,
"learning_rate": 8e-05,
"loss": 1.7623,
"step": 775
},
{
"epoch": 0.37711092212367875,
"grad_norm": 0.7210977673530579,
"learning_rate": 8e-05,
"loss": 1.725,
"step": 776
},
{
"epoch": 0.3775968898068279,
"grad_norm": 0.6856040358543396,
"learning_rate": 8e-05,
"loss": 1.7112,
"step": 777
},
{
"epoch": 0.37808285748997694,
"grad_norm": 0.7452442646026611,
"learning_rate": 8e-05,
"loss": 1.7337,
"step": 778
},
{
"epoch": 0.378568825173126,
"grad_norm": 0.7243314385414124,
"learning_rate": 8e-05,
"loss": 1.662,
"step": 779
},
{
"epoch": 0.3790547928562751,
"grad_norm": 0.6789048314094543,
"learning_rate": 8e-05,
"loss": 1.7704,
"step": 780
},
{
"epoch": 0.37954076053942415,
"grad_norm": 0.6936148405075073,
"learning_rate": 8e-05,
"loss": 1.6351,
"step": 781
},
{
"epoch": 0.3800267282225732,
"grad_norm": 0.6592870354652405,
"learning_rate": 8e-05,
"loss": 1.6556,
"step": 782
},
{
"epoch": 0.3805126959057223,
"grad_norm": 0.6431177258491516,
"learning_rate": 8e-05,
"loss": 1.5877,
"step": 783
},
{
"epoch": 0.38099866358887136,
"grad_norm": 0.6808754205703735,
"learning_rate": 8e-05,
"loss": 1.73,
"step": 784
},
{
"epoch": 0.3814846312720204,
"grad_norm": 0.6854271292686462,
"learning_rate": 8e-05,
"loss": 1.7208,
"step": 785
},
{
"epoch": 0.3819705989551695,
"grad_norm": 0.6662227511405945,
"learning_rate": 8e-05,
"loss": 1.6591,
"step": 786
},
{
"epoch": 0.38245656663831856,
"grad_norm": 0.6742472648620605,
"learning_rate": 8e-05,
"loss": 1.7358,
"step": 787
},
{
"epoch": 0.38294253432146763,
"grad_norm": 0.6727938055992126,
"learning_rate": 8e-05,
"loss": 1.6611,
"step": 788
},
{
"epoch": 0.3834285020046167,
"grad_norm": 0.7182742357254028,
"learning_rate": 8e-05,
"loss": 1.7756,
"step": 789
},
{
"epoch": 0.38391446968776577,
"grad_norm": 0.6842840313911438,
"learning_rate": 8e-05,
"loss": 1.7347,
"step": 790
},
{
"epoch": 0.38440043737091484,
"grad_norm": 0.7511676549911499,
"learning_rate": 8e-05,
"loss": 1.8546,
"step": 791
},
{
"epoch": 0.3848864050540639,
"grad_norm": 0.6960460543632507,
"learning_rate": 8e-05,
"loss": 1.7523,
"step": 792
},
{
"epoch": 0.385372372737213,
"grad_norm": 0.6879598498344421,
"learning_rate": 8e-05,
"loss": 1.7206,
"step": 793
},
{
"epoch": 0.38585834042036204,
"grad_norm": 0.6782386302947998,
"learning_rate": 8e-05,
"loss": 1.7023,
"step": 794
},
{
"epoch": 0.3863443081035111,
"grad_norm": 0.725420355796814,
"learning_rate": 8e-05,
"loss": 1.7066,
"step": 795
},
{
"epoch": 0.3868302757866602,
"grad_norm": 0.6997841596603394,
"learning_rate": 8e-05,
"loss": 1.723,
"step": 796
},
{
"epoch": 0.38731624346980925,
"grad_norm": 0.6572220921516418,
"learning_rate": 8e-05,
"loss": 1.6855,
"step": 797
},
{
"epoch": 0.3878022111529583,
"grad_norm": 0.6855907440185547,
"learning_rate": 8e-05,
"loss": 1.7322,
"step": 798
},
{
"epoch": 0.3882881788361074,
"grad_norm": 0.6765984892845154,
"learning_rate": 8e-05,
"loss": 1.667,
"step": 799
},
{
"epoch": 0.38877414651925646,
"grad_norm": 0.6914188265800476,
"learning_rate": 8e-05,
"loss": 1.7051,
"step": 800
},
{
"epoch": 0.3892601142024055,
"grad_norm": 0.715277373790741,
"learning_rate": 8e-05,
"loss": 1.7199,
"step": 801
},
{
"epoch": 0.3897460818855546,
"grad_norm": 0.6714770197868347,
"learning_rate": 8e-05,
"loss": 1.6992,
"step": 802
},
{
"epoch": 0.39023204956870366,
"grad_norm": 0.741721510887146,
"learning_rate": 8e-05,
"loss": 1.7375,
"step": 803
},
{
"epoch": 0.39071801725185273,
"grad_norm": 0.7057347893714905,
"learning_rate": 8e-05,
"loss": 1.7769,
"step": 804
},
{
"epoch": 0.3912039849350018,
"grad_norm": 0.7016803026199341,
"learning_rate": 8e-05,
"loss": 1.7315,
"step": 805
},
{
"epoch": 0.39168995261815087,
"grad_norm": 0.6794413924217224,
"learning_rate": 8e-05,
"loss": 1.7699,
"step": 806
},
{
"epoch": 0.39217592030129994,
"grad_norm": 0.6544263362884521,
"learning_rate": 8e-05,
"loss": 1.6509,
"step": 807
},
{
"epoch": 0.392661887984449,
"grad_norm": 0.6961451172828674,
"learning_rate": 8e-05,
"loss": 1.6422,
"step": 808
},
{
"epoch": 0.3931478556675981,
"grad_norm": 0.664314866065979,
"learning_rate": 8e-05,
"loss": 1.794,
"step": 809
},
{
"epoch": 0.3936338233507472,
"grad_norm": 0.6872162222862244,
"learning_rate": 8e-05,
"loss": 1.6422,
"step": 810
},
{
"epoch": 0.39411979103389627,
"grad_norm": 0.6748554110527039,
"learning_rate": 8e-05,
"loss": 1.6915,
"step": 811
},
{
"epoch": 0.39460575871704534,
"grad_norm": 0.6837556958198547,
"learning_rate": 8e-05,
"loss": 1.6511,
"step": 812
},
{
"epoch": 0.3950917264001944,
"grad_norm": 0.6896275877952576,
"learning_rate": 8e-05,
"loss": 1.7798,
"step": 813
},
{
"epoch": 0.3955776940833435,
"grad_norm": 0.6529355645179749,
"learning_rate": 8e-05,
"loss": 1.6849,
"step": 814
},
{
"epoch": 0.39606366176649255,
"grad_norm": 0.6821863055229187,
"learning_rate": 8e-05,
"loss": 1.7039,
"step": 815
},
{
"epoch": 0.3965496294496416,
"grad_norm": 0.6722215414047241,
"learning_rate": 8e-05,
"loss": 1.7436,
"step": 816
},
{
"epoch": 0.3970355971327907,
"grad_norm": 0.6700097322463989,
"learning_rate": 8e-05,
"loss": 1.6214,
"step": 817
},
{
"epoch": 0.39752156481593975,
"grad_norm": 0.676224946975708,
"learning_rate": 8e-05,
"loss": 1.7256,
"step": 818
},
{
"epoch": 0.3980075324990888,
"grad_norm": 0.6975687742233276,
"learning_rate": 8e-05,
"loss": 1.8245,
"step": 819
},
{
"epoch": 0.3984935001822379,
"grad_norm": 0.703895628452301,
"learning_rate": 8e-05,
"loss": 1.7452,
"step": 820
},
{
"epoch": 0.39897946786538696,
"grad_norm": 0.6618953347206116,
"learning_rate": 8e-05,
"loss": 1.6509,
"step": 821
},
{
"epoch": 0.39946543554853603,
"grad_norm": 0.667535662651062,
"learning_rate": 8e-05,
"loss": 1.688,
"step": 822
},
{
"epoch": 0.3999514032316851,
"grad_norm": 0.6862965226173401,
"learning_rate": 8e-05,
"loss": 1.7155,
"step": 823
},
{
"epoch": 0.40043737091483417,
"grad_norm": 0.6981528997421265,
"learning_rate": 8e-05,
"loss": 1.6535,
"step": 824
},
{
"epoch": 0.40092333859798324,
"grad_norm": 0.6970820426940918,
"learning_rate": 8e-05,
"loss": 1.8253,
"step": 825
},
{
"epoch": 0.4014093062811323,
"grad_norm": 0.7245957851409912,
"learning_rate": 8e-05,
"loss": 1.8077,
"step": 826
},
{
"epoch": 0.4018952739642814,
"grad_norm": 0.6667302846908569,
"learning_rate": 8e-05,
"loss": 1.677,
"step": 827
},
{
"epoch": 0.40238124164743044,
"grad_norm": 0.6981498003005981,
"learning_rate": 8e-05,
"loss": 1.7418,
"step": 828
},
{
"epoch": 0.4028672093305795,
"grad_norm": 0.6691059470176697,
"learning_rate": 8e-05,
"loss": 1.6942,
"step": 829
},
{
"epoch": 0.4033531770137286,
"grad_norm": 0.6742236018180847,
"learning_rate": 8e-05,
"loss": 1.7596,
"step": 830
},
{
"epoch": 0.40383914469687765,
"grad_norm": 0.6770570278167725,
"learning_rate": 8e-05,
"loss": 1.6545,
"step": 831
},
{
"epoch": 0.4043251123800267,
"grad_norm": 0.673988401889801,
"learning_rate": 8e-05,
"loss": 1.79,
"step": 832
},
{
"epoch": 0.4048110800631758,
"grad_norm": 0.6888890862464905,
"learning_rate": 8e-05,
"loss": 1.6964,
"step": 833
},
{
"epoch": 0.40529704774632486,
"grad_norm": 0.7041418552398682,
"learning_rate": 8e-05,
"loss": 1.6975,
"step": 834
},
{
"epoch": 0.4057830154294739,
"grad_norm": 0.6616572737693787,
"learning_rate": 8e-05,
"loss": 1.609,
"step": 835
},
{
"epoch": 0.406268983112623,
"grad_norm": 0.6954545974731445,
"learning_rate": 8e-05,
"loss": 1.6616,
"step": 836
},
{
"epoch": 0.40675495079577206,
"grad_norm": 0.6895012259483337,
"learning_rate": 8e-05,
"loss": 1.7397,
"step": 837
},
{
"epoch": 0.40724091847892113,
"grad_norm": 0.6795293688774109,
"learning_rate": 8e-05,
"loss": 1.6625,
"step": 838
},
{
"epoch": 0.4077268861620702,
"grad_norm": 0.6722828149795532,
"learning_rate": 8e-05,
"loss": 1.6734,
"step": 839
},
{
"epoch": 0.40821285384521927,
"grad_norm": 0.703922688961029,
"learning_rate": 8e-05,
"loss": 1.7597,
"step": 840
},
{
"epoch": 0.40869882152836834,
"grad_norm": 0.6770139336585999,
"learning_rate": 8e-05,
"loss": 1.6764,
"step": 841
},
{
"epoch": 0.4091847892115174,
"grad_norm": 0.6721189618110657,
"learning_rate": 8e-05,
"loss": 1.6321,
"step": 842
},
{
"epoch": 0.40967075689466653,
"grad_norm": 0.7049797773361206,
"learning_rate": 8e-05,
"loss": 1.7613,
"step": 843
},
{
"epoch": 0.4101567245778156,
"grad_norm": 0.6847895383834839,
"learning_rate": 8e-05,
"loss": 1.687,
"step": 844
},
{
"epoch": 0.41064269226096467,
"grad_norm": 0.7181977033615112,
"learning_rate": 8e-05,
"loss": 1.6613,
"step": 845
},
{
"epoch": 0.41112865994411374,
"grad_norm": 0.7018725275993347,
"learning_rate": 8e-05,
"loss": 1.8038,
"step": 846
},
{
"epoch": 0.4116146276272628,
"grad_norm": 0.6717066168785095,
"learning_rate": 8e-05,
"loss": 1.636,
"step": 847
},
{
"epoch": 0.4121005953104119,
"grad_norm": 0.6622872352600098,
"learning_rate": 8e-05,
"loss": 1.6516,
"step": 848
},
{
"epoch": 0.41258656299356095,
"grad_norm": 0.6866238117218018,
"learning_rate": 8e-05,
"loss": 1.7175,
"step": 849
},
{
"epoch": 0.41307253067671,
"grad_norm": 0.6799110174179077,
"learning_rate": 8e-05,
"loss": 1.7228,
"step": 850
},
{
"epoch": 0.4135584983598591,
"grad_norm": 0.6873098015785217,
"learning_rate": 8e-05,
"loss": 1.7308,
"step": 851
},
{
"epoch": 0.41404446604300815,
"grad_norm": 0.6333160996437073,
"learning_rate": 8e-05,
"loss": 1.648,
"step": 852
},
{
"epoch": 0.4145304337261572,
"grad_norm": 0.7071367502212524,
"learning_rate": 8e-05,
"loss": 1.7583,
"step": 853
},
{
"epoch": 0.4150164014093063,
"grad_norm": 0.6877809762954712,
"learning_rate": 8e-05,
"loss": 1.7178,
"step": 854
},
{
"epoch": 0.41550236909245536,
"grad_norm": 0.7020477652549744,
"learning_rate": 8e-05,
"loss": 1.7284,
"step": 855
},
{
"epoch": 0.41598833677560443,
"grad_norm": 0.6756927371025085,
"learning_rate": 8e-05,
"loss": 1.6667,
"step": 856
},
{
"epoch": 0.4164743044587535,
"grad_norm": 0.7071303725242615,
"learning_rate": 8e-05,
"loss": 1.7081,
"step": 857
},
{
"epoch": 0.41696027214190257,
"grad_norm": 0.6677414178848267,
"learning_rate": 8e-05,
"loss": 1.6691,
"step": 858
},
{
"epoch": 0.41744623982505163,
"grad_norm": 0.6795874834060669,
"learning_rate": 8e-05,
"loss": 1.6806,
"step": 859
},
{
"epoch": 0.4179322075082007,
"grad_norm": 0.7159068584442139,
"learning_rate": 8e-05,
"loss": 1.9206,
"step": 860
},
{
"epoch": 0.4184181751913498,
"grad_norm": 0.6572151780128479,
"learning_rate": 8e-05,
"loss": 1.648,
"step": 861
},
{
"epoch": 0.41890414287449884,
"grad_norm": 0.7271780967712402,
"learning_rate": 8e-05,
"loss": 1.623,
"step": 862
},
{
"epoch": 0.4193901105576479,
"grad_norm": 0.6506872773170471,
"learning_rate": 8e-05,
"loss": 1.5811,
"step": 863
},
{
"epoch": 0.419876078240797,
"grad_norm": 0.6598126888275146,
"learning_rate": 8e-05,
"loss": 1.6703,
"step": 864
},
{
"epoch": 0.42036204592394605,
"grad_norm": 0.6662724614143372,
"learning_rate": 8e-05,
"loss": 1.6486,
"step": 865
},
{
"epoch": 0.4208480136070951,
"grad_norm": 0.7072134017944336,
"learning_rate": 8e-05,
"loss": 1.7073,
"step": 866
},
{
"epoch": 0.4213339812902442,
"grad_norm": 0.7048529982566833,
"learning_rate": 8e-05,
"loss": 1.7314,
"step": 867
},
{
"epoch": 0.42181994897339326,
"grad_norm": 0.6804163455963135,
"learning_rate": 8e-05,
"loss": 1.572,
"step": 868
},
{
"epoch": 0.4223059166565423,
"grad_norm": 0.6902045011520386,
"learning_rate": 8e-05,
"loss": 1.7352,
"step": 869
},
{
"epoch": 0.4227918843396914,
"grad_norm": 0.6777858138084412,
"learning_rate": 8e-05,
"loss": 1.7343,
"step": 870
},
{
"epoch": 0.42327785202284046,
"grad_norm": 0.7061010599136353,
"learning_rate": 8e-05,
"loss": 1.7791,
"step": 871
},
{
"epoch": 0.42376381970598953,
"grad_norm": 0.7418678998947144,
"learning_rate": 8e-05,
"loss": 1.776,
"step": 872
},
{
"epoch": 0.4242497873891386,
"grad_norm": 0.6834468841552734,
"learning_rate": 8e-05,
"loss": 1.6412,
"step": 873
},
{
"epoch": 0.42473575507228767,
"grad_norm": 0.7752047181129456,
"learning_rate": 8e-05,
"loss": 1.7917,
"step": 874
},
{
"epoch": 0.42522172275543674,
"grad_norm": 0.6734682321548462,
"learning_rate": 8e-05,
"loss": 1.5883,
"step": 875
},
{
"epoch": 0.42570769043858586,
"grad_norm": 0.6892010569572449,
"learning_rate": 8e-05,
"loss": 1.6137,
"step": 876
},
{
"epoch": 0.42619365812173493,
"grad_norm": 0.6889033317565918,
"learning_rate": 8e-05,
"loss": 1.7339,
"step": 877
},
{
"epoch": 0.426679625804884,
"grad_norm": 0.6788530945777893,
"learning_rate": 8e-05,
"loss": 1.7603,
"step": 878
},
{
"epoch": 0.42716559348803307,
"grad_norm": 0.6809303760528564,
"learning_rate": 8e-05,
"loss": 1.5856,
"step": 879
},
{
"epoch": 0.42765156117118214,
"grad_norm": 0.6807916760444641,
"learning_rate": 8e-05,
"loss": 1.672,
"step": 880
},
{
"epoch": 0.4281375288543312,
"grad_norm": 0.6880296468734741,
"learning_rate": 8e-05,
"loss": 1.6914,
"step": 881
},
{
"epoch": 0.4286234965374803,
"grad_norm": 0.6756458282470703,
"learning_rate": 8e-05,
"loss": 1.6854,
"step": 882
},
{
"epoch": 0.42910946422062934,
"grad_norm": 0.7178724408149719,
"learning_rate": 8e-05,
"loss": 1.6913,
"step": 883
},
{
"epoch": 0.4295954319037784,
"grad_norm": 0.6717207431793213,
"learning_rate": 8e-05,
"loss": 1.7001,
"step": 884
},
{
"epoch": 0.4300813995869275,
"grad_norm": 0.6702977418899536,
"learning_rate": 8e-05,
"loss": 1.7455,
"step": 885
},
{
"epoch": 0.43056736727007655,
"grad_norm": 0.6849383115768433,
"learning_rate": 8e-05,
"loss": 1.6358,
"step": 886
},
{
"epoch": 0.4310533349532256,
"grad_norm": 0.6844571828842163,
"learning_rate": 8e-05,
"loss": 1.7251,
"step": 887
},
{
"epoch": 0.4315393026363747,
"grad_norm": 0.6590885519981384,
"learning_rate": 8e-05,
"loss": 1.6084,
"step": 888
},
{
"epoch": 0.43202527031952376,
"grad_norm": 0.6888204216957092,
"learning_rate": 8e-05,
"loss": 1.6628,
"step": 889
},
{
"epoch": 0.4325112380026728,
"grad_norm": 0.6698614358901978,
"learning_rate": 8e-05,
"loss": 1.7264,
"step": 890
},
{
"epoch": 0.4329972056858219,
"grad_norm": 0.6939740180969238,
"learning_rate": 8e-05,
"loss": 1.731,
"step": 891
},
{
"epoch": 0.43348317336897096,
"grad_norm": 0.6651397347450256,
"learning_rate": 8e-05,
"loss": 1.667,
"step": 892
},
{
"epoch": 0.43396914105212003,
"grad_norm": 0.6693311333656311,
"learning_rate": 8e-05,
"loss": 1.6822,
"step": 893
},
{
"epoch": 0.4344551087352691,
"grad_norm": 0.6974858641624451,
"learning_rate": 8e-05,
"loss": 1.752,
"step": 894
},
{
"epoch": 0.43494107641841817,
"grad_norm": 0.6832178235054016,
"learning_rate": 8e-05,
"loss": 1.7949,
"step": 895
},
{
"epoch": 0.43542704410156724,
"grad_norm": 0.6783958673477173,
"learning_rate": 8e-05,
"loss": 1.704,
"step": 896
},
{
"epoch": 0.4359130117847163,
"grad_norm": 0.6979472637176514,
"learning_rate": 8e-05,
"loss": 1.7986,
"step": 897
},
{
"epoch": 0.4363989794678654,
"grad_norm": 0.667197048664093,
"learning_rate": 8e-05,
"loss": 1.6722,
"step": 898
},
{
"epoch": 0.43688494715101445,
"grad_norm": 0.6760208606719971,
"learning_rate": 8e-05,
"loss": 1.5674,
"step": 899
},
{
"epoch": 0.4373709148341635,
"grad_norm": 0.6722148060798645,
"learning_rate": 8e-05,
"loss": 1.6734,
"step": 900
},
{
"epoch": 0.4378568825173126,
"grad_norm": 0.7194588780403137,
"learning_rate": 8e-05,
"loss": 1.8366,
"step": 901
},
{
"epoch": 0.43834285020046165,
"grad_norm": 0.7332800626754761,
"learning_rate": 8e-05,
"loss": 1.7084,
"step": 902
},
{
"epoch": 0.4388288178836107,
"grad_norm": 0.6858316659927368,
"learning_rate": 8e-05,
"loss": 1.7635,
"step": 903
},
{
"epoch": 0.4393147855667598,
"grad_norm": 0.7341161966323853,
"learning_rate": 8e-05,
"loss": 1.7552,
"step": 904
},
{
"epoch": 0.43980075324990886,
"grad_norm": 0.6958224177360535,
"learning_rate": 8e-05,
"loss": 1.7027,
"step": 905
},
{
"epoch": 0.44028672093305793,
"grad_norm": 0.7173404693603516,
"learning_rate": 8e-05,
"loss": 1.7089,
"step": 906
},
{
"epoch": 0.440772688616207,
"grad_norm": 0.6778115630149841,
"learning_rate": 8e-05,
"loss": 1.6948,
"step": 907
},
{
"epoch": 0.44125865629935607,
"grad_norm": 0.7206325531005859,
"learning_rate": 8e-05,
"loss": 1.6411,
"step": 908
},
{
"epoch": 0.44174462398250514,
"grad_norm": 0.6802138686180115,
"learning_rate": 8e-05,
"loss": 1.6809,
"step": 909
},
{
"epoch": 0.44223059166565426,
"grad_norm": 0.711110532283783,
"learning_rate": 8e-05,
"loss": 1.7,
"step": 910
},
{
"epoch": 0.44271655934880333,
"grad_norm": 0.6667793393135071,
"learning_rate": 8e-05,
"loss": 1.6586,
"step": 911
},
{
"epoch": 0.4432025270319524,
"grad_norm": 0.7193270325660706,
"learning_rate": 8e-05,
"loss": 1.7688,
"step": 912
},
{
"epoch": 0.44368849471510147,
"grad_norm": 0.6943178176879883,
"learning_rate": 8e-05,
"loss": 1.7831,
"step": 913
},
{
"epoch": 0.44417446239825054,
"grad_norm": 0.6790280938148499,
"learning_rate": 8e-05,
"loss": 1.7174,
"step": 914
},
{
"epoch": 0.4446604300813996,
"grad_norm": 0.7052329182624817,
"learning_rate": 8e-05,
"loss": 1.7705,
"step": 915
},
{
"epoch": 0.4451463977645487,
"grad_norm": 0.6999489068984985,
"learning_rate": 8e-05,
"loss": 1.7962,
"step": 916
},
{
"epoch": 0.44563236544769774,
"grad_norm": 0.6908236145973206,
"learning_rate": 8e-05,
"loss": 1.8017,
"step": 917
},
{
"epoch": 0.4461183331308468,
"grad_norm": 0.7321158051490784,
"learning_rate": 8e-05,
"loss": 1.6769,
"step": 918
},
{
"epoch": 0.4466043008139959,
"grad_norm": 0.6623929738998413,
"learning_rate": 8e-05,
"loss": 1.6603,
"step": 919
},
{
"epoch": 0.44709026849714495,
"grad_norm": 0.6914306282997131,
"learning_rate": 8e-05,
"loss": 1.6928,
"step": 920
},
{
"epoch": 0.447576236180294,
"grad_norm": 0.6833542585372925,
"learning_rate": 8e-05,
"loss": 1.7031,
"step": 921
},
{
"epoch": 0.4480622038634431,
"grad_norm": 0.7149777412414551,
"learning_rate": 8e-05,
"loss": 1.693,
"step": 922
},
{
"epoch": 0.44854817154659216,
"grad_norm": 0.7228732109069824,
"learning_rate": 8e-05,
"loss": 1.699,
"step": 923
},
{
"epoch": 0.4490341392297412,
"grad_norm": 0.68680739402771,
"learning_rate": 8e-05,
"loss": 1.6486,
"step": 924
},
{
"epoch": 0.4495201069128903,
"grad_norm": 0.684031069278717,
"learning_rate": 8e-05,
"loss": 1.6912,
"step": 925
},
{
"epoch": 0.45000607459603936,
"grad_norm": 0.6929844617843628,
"learning_rate": 8e-05,
"loss": 1.601,
"step": 926
},
{
"epoch": 0.45049204227918843,
"grad_norm": 0.7184422016143799,
"learning_rate": 8e-05,
"loss": 1.7476,
"step": 927
},
{
"epoch": 0.4509780099623375,
"grad_norm": 0.704727828502655,
"learning_rate": 8e-05,
"loss": 1.5983,
"step": 928
},
{
"epoch": 0.45146397764548657,
"grad_norm": 0.7435169816017151,
"learning_rate": 8e-05,
"loss": 1.7228,
"step": 929
},
{
"epoch": 0.45194994532863564,
"grad_norm": 0.7119385600090027,
"learning_rate": 8e-05,
"loss": 1.6971,
"step": 930
},
{
"epoch": 0.4524359130117847,
"grad_norm": 0.7039663195610046,
"learning_rate": 8e-05,
"loss": 1.627,
"step": 931
},
{
"epoch": 0.4529218806949338,
"grad_norm": 0.6682771444320679,
"learning_rate": 8e-05,
"loss": 1.6716,
"step": 932
},
{
"epoch": 0.45340784837808285,
"grad_norm": 0.6845352053642273,
"learning_rate": 8e-05,
"loss": 1.7061,
"step": 933
},
{
"epoch": 0.4538938160612319,
"grad_norm": 0.6739903092384338,
"learning_rate": 8e-05,
"loss": 1.6599,
"step": 934
},
{
"epoch": 0.454379783744381,
"grad_norm": 0.673058271408081,
"learning_rate": 8e-05,
"loss": 1.7172,
"step": 935
},
{
"epoch": 0.45486575142753005,
"grad_norm": 0.7266900539398193,
"learning_rate": 8e-05,
"loss": 1.7724,
"step": 936
},
{
"epoch": 0.4553517191106791,
"grad_norm": 0.707149088382721,
"learning_rate": 8e-05,
"loss": 1.8268,
"step": 937
},
{
"epoch": 0.4558376867938282,
"grad_norm": 0.6685764789581299,
"learning_rate": 8e-05,
"loss": 1.7289,
"step": 938
},
{
"epoch": 0.45632365447697726,
"grad_norm": 0.7215582132339478,
"learning_rate": 8e-05,
"loss": 1.7686,
"step": 939
},
{
"epoch": 0.4568096221601263,
"grad_norm": 0.741390585899353,
"learning_rate": 8e-05,
"loss": 1.7516,
"step": 940
},
{
"epoch": 0.4572955898432754,
"grad_norm": 0.6973326802253723,
"learning_rate": 8e-05,
"loss": 1.6763,
"step": 941
},
{
"epoch": 0.45778155752642447,
"grad_norm": 0.7052226662635803,
"learning_rate": 8e-05,
"loss": 1.6543,
"step": 942
},
{
"epoch": 0.4582675252095736,
"grad_norm": 0.7032400965690613,
"learning_rate": 8e-05,
"loss": 1.6386,
"step": 943
},
{
"epoch": 0.45875349289272266,
"grad_norm": 0.6860877275466919,
"learning_rate": 8e-05,
"loss": 1.6796,
"step": 944
},
{
"epoch": 0.4592394605758717,
"grad_norm": 0.7431606650352478,
"learning_rate": 8e-05,
"loss": 1.741,
"step": 945
},
{
"epoch": 0.4597254282590208,
"grad_norm": 0.6740962862968445,
"learning_rate": 8e-05,
"loss": 1.7181,
"step": 946
},
{
"epoch": 0.46021139594216987,
"grad_norm": 0.707183837890625,
"learning_rate": 8e-05,
"loss": 1.7372,
"step": 947
},
{
"epoch": 0.46069736362531893,
"grad_norm": 0.7016905546188354,
"learning_rate": 8e-05,
"loss": 1.7363,
"step": 948
},
{
"epoch": 0.461183331308468,
"grad_norm": 0.6717005372047424,
"learning_rate": 8e-05,
"loss": 1.6656,
"step": 949
},
{
"epoch": 0.46166929899161707,
"grad_norm": 0.6505550742149353,
"learning_rate": 8e-05,
"loss": 1.5786,
"step": 950
},
{
"epoch": 0.46215526667476614,
"grad_norm": 0.6775349974632263,
"learning_rate": 8e-05,
"loss": 1.6237,
"step": 951
},
{
"epoch": 0.4626412343579152,
"grad_norm": 0.6711937189102173,
"learning_rate": 8e-05,
"loss": 1.7301,
"step": 952
},
{
"epoch": 0.4631272020410643,
"grad_norm": 0.6834034323692322,
"learning_rate": 8e-05,
"loss": 1.718,
"step": 953
},
{
"epoch": 0.46361316972421335,
"grad_norm": 0.6843579411506653,
"learning_rate": 8e-05,
"loss": 1.7937,
"step": 954
},
{
"epoch": 0.4640991374073624,
"grad_norm": 0.6686553359031677,
"learning_rate": 8e-05,
"loss": 1.7961,
"step": 955
},
{
"epoch": 0.4645851050905115,
"grad_norm": 0.6416002511978149,
"learning_rate": 8e-05,
"loss": 1.6599,
"step": 956
},
{
"epoch": 0.46507107277366055,
"grad_norm": 0.6630215048789978,
"learning_rate": 8e-05,
"loss": 1.7118,
"step": 957
},
{
"epoch": 0.4655570404568096,
"grad_norm": 0.6850245594978333,
"learning_rate": 8e-05,
"loss": 1.7957,
"step": 958
},
{
"epoch": 0.4660430081399587,
"grad_norm": 0.6981562376022339,
"learning_rate": 8e-05,
"loss": 1.7848,
"step": 959
},
{
"epoch": 0.46652897582310776,
"grad_norm": 0.6983283162117004,
"learning_rate": 8e-05,
"loss": 1.6546,
"step": 960
},
{
"epoch": 0.46701494350625683,
"grad_norm": 0.7038816213607788,
"learning_rate": 8e-05,
"loss": 1.8054,
"step": 961
},
{
"epoch": 0.4675009111894059,
"grad_norm": 0.6405352354049683,
"learning_rate": 8e-05,
"loss": 1.6331,
"step": 962
},
{
"epoch": 0.46798687887255497,
"grad_norm": 0.6842737197875977,
"learning_rate": 8e-05,
"loss": 1.686,
"step": 963
},
{
"epoch": 0.46847284655570404,
"grad_norm": 0.6727338433265686,
"learning_rate": 8e-05,
"loss": 1.5686,
"step": 964
},
{
"epoch": 0.4689588142388531,
"grad_norm": 0.6844631433486938,
"learning_rate": 8e-05,
"loss": 1.6779,
"step": 965
},
{
"epoch": 0.4694447819220022,
"grad_norm": 0.6630315780639648,
"learning_rate": 8e-05,
"loss": 1.6418,
"step": 966
},
{
"epoch": 0.46993074960515124,
"grad_norm": 0.6761659383773804,
"learning_rate": 8e-05,
"loss": 1.7039,
"step": 967
},
{
"epoch": 0.4704167172883003,
"grad_norm": 0.673235297203064,
"learning_rate": 8e-05,
"loss": 1.6502,
"step": 968
},
{
"epoch": 0.4709026849714494,
"grad_norm": 0.7071899771690369,
"learning_rate": 8e-05,
"loss": 1.75,
"step": 969
},
{
"epoch": 0.47138865265459845,
"grad_norm": 0.6871898770332336,
"learning_rate": 8e-05,
"loss": 1.6775,
"step": 970
},
{
"epoch": 0.4718746203377475,
"grad_norm": 0.6730138063430786,
"learning_rate": 8e-05,
"loss": 1.6927,
"step": 971
},
{
"epoch": 0.4723605880208966,
"grad_norm": 0.6996859312057495,
"learning_rate": 8e-05,
"loss": 1.6185,
"step": 972
},
{
"epoch": 0.47284655570404566,
"grad_norm": 0.7092289924621582,
"learning_rate": 8e-05,
"loss": 1.7848,
"step": 973
},
{
"epoch": 0.4733325233871947,
"grad_norm": 0.7551800608634949,
"learning_rate": 8e-05,
"loss": 1.7257,
"step": 974
},
{
"epoch": 0.4738184910703438,
"grad_norm": 0.6844103336334229,
"learning_rate": 8e-05,
"loss": 1.7648,
"step": 975
},
{
"epoch": 0.4743044587534929,
"grad_norm": 0.8119296431541443,
"learning_rate": 8e-05,
"loss": 1.637,
"step": 976
},
{
"epoch": 0.474790426436642,
"grad_norm": 0.7059913873672485,
"learning_rate": 8e-05,
"loss": 1.6914,
"step": 977
},
{
"epoch": 0.47527639411979106,
"grad_norm": 0.6748428344726562,
"learning_rate": 8e-05,
"loss": 1.6814,
"step": 978
},
{
"epoch": 0.4757623618029401,
"grad_norm": 0.6687542796134949,
"learning_rate": 8e-05,
"loss": 1.7119,
"step": 979
},
{
"epoch": 0.4762483294860892,
"grad_norm": 0.6950284838676453,
"learning_rate": 8e-05,
"loss": 1.7841,
"step": 980
},
{
"epoch": 0.47673429716923826,
"grad_norm": 0.6974992752075195,
"learning_rate": 8e-05,
"loss": 1.7525,
"step": 981
},
{
"epoch": 0.47722026485238733,
"grad_norm": 0.7307083010673523,
"learning_rate": 8e-05,
"loss": 1.7173,
"step": 982
},
{
"epoch": 0.4777062325355364,
"grad_norm": 0.6927682161331177,
"learning_rate": 8e-05,
"loss": 1.7385,
"step": 983
},
{
"epoch": 0.47819220021868547,
"grad_norm": 0.7314983606338501,
"learning_rate": 8e-05,
"loss": 1.7581,
"step": 984
},
{
"epoch": 0.47867816790183454,
"grad_norm": 0.7015753388404846,
"learning_rate": 8e-05,
"loss": 1.6908,
"step": 985
},
{
"epoch": 0.4791641355849836,
"grad_norm": 0.7249748706817627,
"learning_rate": 8e-05,
"loss": 1.71,
"step": 986
},
{
"epoch": 0.4796501032681327,
"grad_norm": 0.6497809290885925,
"learning_rate": 8e-05,
"loss": 1.6395,
"step": 987
},
{
"epoch": 0.48013607095128175,
"grad_norm": 0.7022762298583984,
"learning_rate": 8e-05,
"loss": 1.6537,
"step": 988
},
{
"epoch": 0.4806220386344308,
"grad_norm": 0.7135415077209473,
"learning_rate": 8e-05,
"loss": 1.704,
"step": 989
},
{
"epoch": 0.4811080063175799,
"grad_norm": 0.6976737976074219,
"learning_rate": 8e-05,
"loss": 1.7815,
"step": 990
},
{
"epoch": 0.48159397400072895,
"grad_norm": 0.683862566947937,
"learning_rate": 8e-05,
"loss": 1.6496,
"step": 991
},
{
"epoch": 0.482079941683878,
"grad_norm": 0.6709874272346497,
"learning_rate": 8e-05,
"loss": 1.6932,
"step": 992
},
{
"epoch": 0.4825659093670271,
"grad_norm": 0.7125788331031799,
"learning_rate": 8e-05,
"loss": 1.7091,
"step": 993
},
{
"epoch": 0.48305187705017616,
"grad_norm": 0.6912755966186523,
"learning_rate": 8e-05,
"loss": 1.7835,
"step": 994
},
{
"epoch": 0.48353784473332523,
"grad_norm": 0.7357064485549927,
"learning_rate": 8e-05,
"loss": 1.7512,
"step": 995
},
{
"epoch": 0.4840238124164743,
"grad_norm": 0.6826263666152954,
"learning_rate": 8e-05,
"loss": 1.6849,
"step": 996
},
{
"epoch": 0.48450978009962337,
"grad_norm": 0.7202624678611755,
"learning_rate": 8e-05,
"loss": 1.7009,
"step": 997
},
{
"epoch": 0.48499574778277244,
"grad_norm": 0.6897886991500854,
"learning_rate": 8e-05,
"loss": 1.702,
"step": 998
},
{
"epoch": 0.4854817154659215,
"grad_norm": 0.6974169611930847,
"learning_rate": 8e-05,
"loss": 1.7423,
"step": 999
},
{
"epoch": 0.4859676831490706,
"grad_norm": 0.6582604646682739,
"learning_rate": 8e-05,
"loss": 1.7295,
"step": 1000
},
{
"epoch": 0.48645365083221964,
"grad_norm": 0.6589087247848511,
"learning_rate": 8e-05,
"loss": 1.6061,
"step": 1001
},
{
"epoch": 0.4869396185153687,
"grad_norm": 0.6788417100906372,
"learning_rate": 8e-05,
"loss": 1.7109,
"step": 1002
},
{
"epoch": 0.4874255861985178,
"grad_norm": 0.6648368835449219,
"learning_rate": 8e-05,
"loss": 1.6986,
"step": 1003
},
{
"epoch": 0.48791155388166685,
"grad_norm": 0.7064778804779053,
"learning_rate": 8e-05,
"loss": 1.7699,
"step": 1004
},
{
"epoch": 0.4883975215648159,
"grad_norm": 0.6786182522773743,
"learning_rate": 8e-05,
"loss": 1.6681,
"step": 1005
},
{
"epoch": 0.488883489247965,
"grad_norm": 0.6443123817443848,
"learning_rate": 8e-05,
"loss": 1.5953,
"step": 1006
},
{
"epoch": 0.48936945693111406,
"grad_norm": 0.6474608182907104,
"learning_rate": 8e-05,
"loss": 1.6378,
"step": 1007
},
{
"epoch": 0.4898554246142631,
"grad_norm": 0.6983360648155212,
"learning_rate": 8e-05,
"loss": 1.7295,
"step": 1008
},
{
"epoch": 0.49034139229741225,
"grad_norm": 0.6883093118667603,
"learning_rate": 8e-05,
"loss": 1.7896,
"step": 1009
},
{
"epoch": 0.4908273599805613,
"grad_norm": 0.6982046365737915,
"learning_rate": 8e-05,
"loss": 1.7487,
"step": 1010
},
{
"epoch": 0.4913133276637104,
"grad_norm": 0.6386318802833557,
"learning_rate": 8e-05,
"loss": 1.5755,
"step": 1011
},
{
"epoch": 0.49179929534685946,
"grad_norm": 0.7076333165168762,
"learning_rate": 8e-05,
"loss": 1.785,
"step": 1012
},
{
"epoch": 0.4922852630300085,
"grad_norm": 0.6782214045524597,
"learning_rate": 8e-05,
"loss": 1.7385,
"step": 1013
},
{
"epoch": 0.4927712307131576,
"grad_norm": 0.6707011461257935,
"learning_rate": 8e-05,
"loss": 1.6847,
"step": 1014
},
{
"epoch": 0.49325719839630666,
"grad_norm": 0.672001838684082,
"learning_rate": 8e-05,
"loss": 1.6738,
"step": 1015
},
{
"epoch": 0.49374316607945573,
"grad_norm": 0.6605908870697021,
"learning_rate": 8e-05,
"loss": 1.7217,
"step": 1016
},
{
"epoch": 0.4942291337626048,
"grad_norm": 0.6649426817893982,
"learning_rate": 8e-05,
"loss": 1.6874,
"step": 1017
},
{
"epoch": 0.49471510144575387,
"grad_norm": 0.6722491383552551,
"learning_rate": 8e-05,
"loss": 1.6404,
"step": 1018
},
{
"epoch": 0.49520106912890294,
"grad_norm": 0.6372204422950745,
"learning_rate": 8e-05,
"loss": 1.6603,
"step": 1019
},
{
"epoch": 0.495687036812052,
"grad_norm": 0.6632013916969299,
"learning_rate": 8e-05,
"loss": 1.6326,
"step": 1020
},
{
"epoch": 0.4961730044952011,
"grad_norm": 0.6747928261756897,
"learning_rate": 8e-05,
"loss": 1.7608,
"step": 1021
},
{
"epoch": 0.49665897217835014,
"grad_norm": 0.6605545878410339,
"learning_rate": 8e-05,
"loss": 1.6918,
"step": 1022
},
{
"epoch": 0.4971449398614992,
"grad_norm": 0.6780149936676025,
"learning_rate": 8e-05,
"loss": 1.7652,
"step": 1023
},
{
"epoch": 0.4976309075446483,
"grad_norm": 0.6795799136161804,
"learning_rate": 8e-05,
"loss": 1.7847,
"step": 1024
},
{
"epoch": 0.49811687522779735,
"grad_norm": 0.6555472016334534,
"learning_rate": 8e-05,
"loss": 1.6776,
"step": 1025
},
{
"epoch": 0.4986028429109464,
"grad_norm": 0.6981608867645264,
"learning_rate": 8e-05,
"loss": 1.7474,
"step": 1026
},
{
"epoch": 0.4990888105940955,
"grad_norm": 0.7160595655441284,
"learning_rate": 8e-05,
"loss": 1.7485,
"step": 1027
},
{
"epoch": 0.49957477827724456,
"grad_norm": 0.701192319393158,
"learning_rate": 8e-05,
"loss": 1.6572,
"step": 1028
},
{
"epoch": 0.5000607459603936,
"grad_norm": 0.6892615556716919,
"learning_rate": 8e-05,
"loss": 1.787,
"step": 1029
},
{
"epoch": 0.5005467136435428,
"grad_norm": 0.696123480796814,
"learning_rate": 8e-05,
"loss": 1.6004,
"step": 1030
},
{
"epoch": 0.5010326813266918,
"grad_norm": 0.6587275266647339,
"learning_rate": 8e-05,
"loss": 1.662,
"step": 1031
},
{
"epoch": 0.5015186490098409,
"grad_norm": 0.6967602968215942,
"learning_rate": 8e-05,
"loss": 1.6713,
"step": 1032
},
{
"epoch": 0.5020046166929899,
"grad_norm": 0.6858366131782532,
"learning_rate": 8e-05,
"loss": 1.655,
"step": 1033
},
{
"epoch": 0.502490584376139,
"grad_norm": 0.6969988346099854,
"learning_rate": 8e-05,
"loss": 1.7654,
"step": 1034
},
{
"epoch": 0.502976552059288,
"grad_norm": 0.6985758543014526,
"learning_rate": 8e-05,
"loss": 1.7236,
"step": 1035
},
{
"epoch": 0.5034625197424372,
"grad_norm": 0.690239667892456,
"learning_rate": 8e-05,
"loss": 1.7359,
"step": 1036
},
{
"epoch": 0.5039484874255862,
"grad_norm": 0.6656016111373901,
"learning_rate": 8e-05,
"loss": 1.6691,
"step": 1037
},
{
"epoch": 0.5044344551087353,
"grad_norm": 0.6736864447593689,
"learning_rate": 8e-05,
"loss": 1.6993,
"step": 1038
},
{
"epoch": 0.5049204227918843,
"grad_norm": 0.6747360229492188,
"learning_rate": 8e-05,
"loss": 1.6164,
"step": 1039
},
{
"epoch": 0.5054063904750334,
"grad_norm": 0.6757073998451233,
"learning_rate": 8e-05,
"loss": 1.7767,
"step": 1040
},
{
"epoch": 0.5058923581581825,
"grad_norm": 0.7053402662277222,
"learning_rate": 8e-05,
"loss": 1.7843,
"step": 1041
},
{
"epoch": 0.5063783258413316,
"grad_norm": 0.6746927499771118,
"learning_rate": 8e-05,
"loss": 1.6911,
"step": 1042
},
{
"epoch": 0.5068642935244806,
"grad_norm": 0.7140036225318909,
"learning_rate": 8e-05,
"loss": 1.7887,
"step": 1043
},
{
"epoch": 0.5073502612076297,
"grad_norm": 0.6578049659729004,
"learning_rate": 8e-05,
"loss": 1.6552,
"step": 1044
},
{
"epoch": 0.5078362288907787,
"grad_norm": 0.7378446459770203,
"learning_rate": 8e-05,
"loss": 1.8959,
"step": 1045
},
{
"epoch": 0.5083221965739279,
"grad_norm": 0.669704794883728,
"learning_rate": 8e-05,
"loss": 1.6415,
"step": 1046
},
{
"epoch": 0.5088081642570769,
"grad_norm": 0.6754531264305115,
"learning_rate": 8e-05,
"loss": 1.6027,
"step": 1047
},
{
"epoch": 0.509294131940226,
"grad_norm": 0.693828821182251,
"learning_rate": 8e-05,
"loss": 1.683,
"step": 1048
},
{
"epoch": 0.509780099623375,
"grad_norm": 0.7376023530960083,
"learning_rate": 8e-05,
"loss": 1.7007,
"step": 1049
},
{
"epoch": 0.5102660673065241,
"grad_norm": 0.7014142870903015,
"learning_rate": 8e-05,
"loss": 1.7452,
"step": 1050
},
{
"epoch": 0.5107520349896731,
"grad_norm": 0.7107053399085999,
"learning_rate": 8e-05,
"loss": 1.7412,
"step": 1051
},
{
"epoch": 0.5112380026728223,
"grad_norm": 0.7034938335418701,
"learning_rate": 8e-05,
"loss": 1.7001,
"step": 1052
},
{
"epoch": 0.5117239703559713,
"grad_norm": 0.6613598465919495,
"learning_rate": 8e-05,
"loss": 1.7274,
"step": 1053
},
{
"epoch": 0.5122099380391204,
"grad_norm": 0.7322868704795837,
"learning_rate": 8e-05,
"loss": 1.73,
"step": 1054
},
{
"epoch": 0.5126959057222694,
"grad_norm": 0.6688373684883118,
"learning_rate": 8e-05,
"loss": 1.7351,
"step": 1055
},
{
"epoch": 0.5131818734054185,
"grad_norm": 0.7376517057418823,
"learning_rate": 8e-05,
"loss": 1.7381,
"step": 1056
},
{
"epoch": 0.5136678410885676,
"grad_norm": 0.6638660430908203,
"learning_rate": 8e-05,
"loss": 1.6387,
"step": 1057
},
{
"epoch": 0.5141538087717167,
"grad_norm": 0.7435979843139648,
"learning_rate": 8e-05,
"loss": 1.8159,
"step": 1058
},
{
"epoch": 0.5146397764548658,
"grad_norm": 0.6750348210334778,
"learning_rate": 8e-05,
"loss": 1.6696,
"step": 1059
},
{
"epoch": 0.5151257441380148,
"grad_norm": 0.688221275806427,
"learning_rate": 8e-05,
"loss": 1.722,
"step": 1060
},
{
"epoch": 0.5156117118211639,
"grad_norm": 0.667355477809906,
"learning_rate": 8e-05,
"loss": 1.6996,
"step": 1061
},
{
"epoch": 0.516097679504313,
"grad_norm": 0.7043483853340149,
"learning_rate": 8e-05,
"loss": 1.7123,
"step": 1062
},
{
"epoch": 0.5165836471874621,
"grad_norm": 0.6757128834724426,
"learning_rate": 8e-05,
"loss": 1.8093,
"step": 1063
},
{
"epoch": 0.5170696148706111,
"grad_norm": 0.6613709330558777,
"learning_rate": 8e-05,
"loss": 1.7407,
"step": 1064
},
{
"epoch": 0.5175555825537602,
"grad_norm": 0.6824665665626526,
"learning_rate": 8e-05,
"loss": 1.7187,
"step": 1065
},
{
"epoch": 0.5180415502369092,
"grad_norm": 0.6840562224388123,
"learning_rate": 8e-05,
"loss": 1.7,
"step": 1066
},
{
"epoch": 0.5185275179200584,
"grad_norm": 0.6628724932670593,
"learning_rate": 8e-05,
"loss": 1.5835,
"step": 1067
},
{
"epoch": 0.5190134856032074,
"grad_norm": 0.6936410665512085,
"learning_rate": 8e-05,
"loss": 1.7947,
"step": 1068
},
{
"epoch": 0.5194994532863565,
"grad_norm": 0.6807119250297546,
"learning_rate": 8e-05,
"loss": 1.7051,
"step": 1069
},
{
"epoch": 0.5199854209695055,
"grad_norm": 0.6959723234176636,
"learning_rate": 8e-05,
"loss": 1.6849,
"step": 1070
},
{
"epoch": 0.5204713886526546,
"grad_norm": 0.6840643882751465,
"learning_rate": 8e-05,
"loss": 1.76,
"step": 1071
},
{
"epoch": 0.5209573563358036,
"grad_norm": 0.6923168897628784,
"learning_rate": 8e-05,
"loss": 1.651,
"step": 1072
},
{
"epoch": 0.5214433240189528,
"grad_norm": 0.6913697123527527,
"learning_rate": 8e-05,
"loss": 1.6084,
"step": 1073
},
{
"epoch": 0.5219292917021018,
"grad_norm": 0.7328845262527466,
"learning_rate": 8e-05,
"loss": 1.6404,
"step": 1074
},
{
"epoch": 0.5224152593852509,
"grad_norm": 0.7062130570411682,
"learning_rate": 8e-05,
"loss": 1.7286,
"step": 1075
},
{
"epoch": 0.5229012270683999,
"grad_norm": 0.7092142105102539,
"learning_rate": 8e-05,
"loss": 1.7618,
"step": 1076
},
{
"epoch": 0.523387194751549,
"grad_norm": 0.6776517033576965,
"learning_rate": 8e-05,
"loss": 1.7858,
"step": 1077
},
{
"epoch": 0.5238731624346981,
"grad_norm": 0.7153838276863098,
"learning_rate": 8e-05,
"loss": 1.7419,
"step": 1078
},
{
"epoch": 0.5243591301178472,
"grad_norm": 0.6707956790924072,
"learning_rate": 8e-05,
"loss": 1.6106,
"step": 1079
},
{
"epoch": 0.5248450978009962,
"grad_norm": 0.6869599223136902,
"learning_rate": 8e-05,
"loss": 1.7611,
"step": 1080
},
{
"epoch": 0.5253310654841453,
"grad_norm": 0.6967989206314087,
"learning_rate": 8e-05,
"loss": 1.7347,
"step": 1081
},
{
"epoch": 0.5258170331672943,
"grad_norm": 0.6940819025039673,
"learning_rate": 8e-05,
"loss": 1.7525,
"step": 1082
},
{
"epoch": 0.5263030008504435,
"grad_norm": 0.6862159371376038,
"learning_rate": 8e-05,
"loss": 1.7608,
"step": 1083
},
{
"epoch": 0.5267889685335925,
"grad_norm": 0.792173445224762,
"learning_rate": 8e-05,
"loss": 1.6708,
"step": 1084
},
{
"epoch": 0.5272749362167416,
"grad_norm": 0.7116245627403259,
"learning_rate": 8e-05,
"loss": 1.8339,
"step": 1085
},
{
"epoch": 0.5277609038998906,
"grad_norm": 0.6545159816741943,
"learning_rate": 8e-05,
"loss": 1.6607,
"step": 1086
},
{
"epoch": 0.5282468715830397,
"grad_norm": 0.6704550981521606,
"learning_rate": 8e-05,
"loss": 1.6507,
"step": 1087
},
{
"epoch": 0.5287328392661887,
"grad_norm": 0.6805651187896729,
"learning_rate": 8e-05,
"loss": 1.6781,
"step": 1088
},
{
"epoch": 0.5292188069493379,
"grad_norm": 0.7035424709320068,
"learning_rate": 8e-05,
"loss": 1.7513,
"step": 1089
},
{
"epoch": 0.5297047746324869,
"grad_norm": 0.6777779459953308,
"learning_rate": 8e-05,
"loss": 1.6119,
"step": 1090
},
{
"epoch": 0.530190742315636,
"grad_norm": 0.6916229128837585,
"learning_rate": 8e-05,
"loss": 1.6481,
"step": 1091
},
{
"epoch": 0.5306767099987851,
"grad_norm": 0.6896541118621826,
"learning_rate": 8e-05,
"loss": 1.6588,
"step": 1092
},
{
"epoch": 0.5311626776819341,
"grad_norm": 0.6746338605880737,
"learning_rate": 8e-05,
"loss": 1.6696,
"step": 1093
},
{
"epoch": 0.5316486453650833,
"grad_norm": 0.7134196758270264,
"learning_rate": 8e-05,
"loss": 1.7404,
"step": 1094
},
{
"epoch": 0.5321346130482323,
"grad_norm": 0.7049342393875122,
"learning_rate": 8e-05,
"loss": 1.6827,
"step": 1095
},
{
"epoch": 0.5326205807313814,
"grad_norm": 0.7187740206718445,
"learning_rate": 8e-05,
"loss": 1.7568,
"step": 1096
},
{
"epoch": 0.5331065484145304,
"grad_norm": 0.6730419397354126,
"learning_rate": 8e-05,
"loss": 1.669,
"step": 1097
},
{
"epoch": 0.5335925160976795,
"grad_norm": 0.7545679807662964,
"learning_rate": 8e-05,
"loss": 1.7155,
"step": 1098
},
{
"epoch": 0.5340784837808286,
"grad_norm": 0.682472825050354,
"learning_rate": 8e-05,
"loss": 1.7001,
"step": 1099
},
{
"epoch": 0.5345644514639777,
"grad_norm": 0.6999737024307251,
"learning_rate": 8e-05,
"loss": 1.7233,
"step": 1100
},
{
"epoch": 0.5350504191471267,
"grad_norm": 0.6580104827880859,
"learning_rate": 8e-05,
"loss": 1.6988,
"step": 1101
},
{
"epoch": 0.5355363868302758,
"grad_norm": 0.7009209990501404,
"learning_rate": 8e-05,
"loss": 1.7606,
"step": 1102
},
{
"epoch": 0.5360223545134248,
"grad_norm": 0.6509638428688049,
"learning_rate": 8e-05,
"loss": 1.6495,
"step": 1103
},
{
"epoch": 0.536508322196574,
"grad_norm": 0.7591949105262756,
"learning_rate": 8e-05,
"loss": 1.7161,
"step": 1104
},
{
"epoch": 0.536994289879723,
"grad_norm": 0.7702423334121704,
"learning_rate": 8e-05,
"loss": 1.804,
"step": 1105
},
{
"epoch": 0.5374802575628721,
"grad_norm": 0.7488783001899719,
"learning_rate": 8e-05,
"loss": 1.7808,
"step": 1106
},
{
"epoch": 0.5379662252460211,
"grad_norm": 0.6847324967384338,
"learning_rate": 8e-05,
"loss": 1.7346,
"step": 1107
},
{
"epoch": 0.5384521929291702,
"grad_norm": 0.6858810782432556,
"learning_rate": 8e-05,
"loss": 1.7615,
"step": 1108
},
{
"epoch": 0.5389381606123193,
"grad_norm": 0.7295746803283691,
"learning_rate": 8e-05,
"loss": 1.6769,
"step": 1109
},
{
"epoch": 0.5394241282954684,
"grad_norm": 0.6610291004180908,
"learning_rate": 8e-05,
"loss": 1.6992,
"step": 1110
},
{
"epoch": 0.5399100959786174,
"grad_norm": 0.7642123103141785,
"learning_rate": 8e-05,
"loss": 1.692,
"step": 1111
},
{
"epoch": 0.5403960636617665,
"grad_norm": 0.7037755250930786,
"learning_rate": 8e-05,
"loss": 1.7074,
"step": 1112
},
{
"epoch": 0.5408820313449155,
"grad_norm": 0.7020889520645142,
"learning_rate": 8e-05,
"loss": 1.7325,
"step": 1113
},
{
"epoch": 0.5413679990280647,
"grad_norm": 0.6954485774040222,
"learning_rate": 8e-05,
"loss": 1.697,
"step": 1114
},
{
"epoch": 0.5418539667112137,
"grad_norm": 0.6591249704360962,
"learning_rate": 8e-05,
"loss": 1.6332,
"step": 1115
},
{
"epoch": 0.5423399343943628,
"grad_norm": 0.7541276812553406,
"learning_rate": 8e-05,
"loss": 1.803,
"step": 1116
},
{
"epoch": 0.5428259020775118,
"grad_norm": 0.6712557077407837,
"learning_rate": 8e-05,
"loss": 1.8162,
"step": 1117
},
{
"epoch": 0.5433118697606609,
"grad_norm": 0.7443874478340149,
"learning_rate": 8e-05,
"loss": 1.733,
"step": 1118
},
{
"epoch": 0.5437978374438099,
"grad_norm": 0.6772555112838745,
"learning_rate": 8e-05,
"loss": 1.609,
"step": 1119
},
{
"epoch": 0.5442838051269591,
"grad_norm": 0.697890043258667,
"learning_rate": 8e-05,
"loss": 1.6882,
"step": 1120
},
{
"epoch": 0.5447697728101081,
"grad_norm": 0.6418668627738953,
"learning_rate": 8e-05,
"loss": 1.6304,
"step": 1121
},
{
"epoch": 0.5452557404932572,
"grad_norm": 0.7052850127220154,
"learning_rate": 8e-05,
"loss": 1.7692,
"step": 1122
},
{
"epoch": 0.5457417081764062,
"grad_norm": 0.6747222542762756,
"learning_rate": 8e-05,
"loss": 1.6501,
"step": 1123
},
{
"epoch": 0.5462276758595553,
"grad_norm": 0.6862781643867493,
"learning_rate": 8e-05,
"loss": 1.6592,
"step": 1124
},
{
"epoch": 0.5467136435427045,
"grad_norm": 0.6705482602119446,
"learning_rate": 8e-05,
"loss": 1.7713,
"step": 1125
},
{
"epoch": 0.5471996112258535,
"grad_norm": 0.7051555514335632,
"learning_rate": 8e-05,
"loss": 1.6661,
"step": 1126
},
{
"epoch": 0.5476855789090026,
"grad_norm": 0.6723673939704895,
"learning_rate": 8e-05,
"loss": 1.5912,
"step": 1127
},
{
"epoch": 0.5481715465921516,
"grad_norm": 0.6593241691589355,
"learning_rate": 8e-05,
"loss": 1.6355,
"step": 1128
},
{
"epoch": 0.5486575142753007,
"grad_norm": 0.682691216468811,
"learning_rate": 8e-05,
"loss": 1.6931,
"step": 1129
},
{
"epoch": 0.5491434819584498,
"grad_norm": 0.7138491868972778,
"learning_rate": 8e-05,
"loss": 1.7887,
"step": 1130
},
{
"epoch": 0.5496294496415989,
"grad_norm": 0.6577277183532715,
"learning_rate": 8e-05,
"loss": 1.6587,
"step": 1131
},
{
"epoch": 0.5501154173247479,
"grad_norm": 0.673745334148407,
"learning_rate": 8e-05,
"loss": 1.6854,
"step": 1132
},
{
"epoch": 0.550601385007897,
"grad_norm": 0.6557290554046631,
"learning_rate": 8e-05,
"loss": 1.6194,
"step": 1133
},
{
"epoch": 0.551087352691046,
"grad_norm": 0.6759630441665649,
"learning_rate": 8e-05,
"loss": 1.6773,
"step": 1134
},
{
"epoch": 0.5515733203741952,
"grad_norm": 0.7059656381607056,
"learning_rate": 8e-05,
"loss": 1.6898,
"step": 1135
},
{
"epoch": 0.5520592880573442,
"grad_norm": 0.6711774468421936,
"learning_rate": 8e-05,
"loss": 1.5991,
"step": 1136
},
{
"epoch": 0.5525452557404933,
"grad_norm": 0.6710250377655029,
"learning_rate": 8e-05,
"loss": 1.7062,
"step": 1137
},
{
"epoch": 0.5530312234236423,
"grad_norm": 0.6695495843887329,
"learning_rate": 8e-05,
"loss": 1.7922,
"step": 1138
},
{
"epoch": 0.5535171911067914,
"grad_norm": 0.6507815718650818,
"learning_rate": 8e-05,
"loss": 1.6887,
"step": 1139
},
{
"epoch": 0.5540031587899404,
"grad_norm": 0.6614372134208679,
"learning_rate": 8e-05,
"loss": 1.7116,
"step": 1140
},
{
"epoch": 0.5544891264730896,
"grad_norm": 0.6573963761329651,
"learning_rate": 8e-05,
"loss": 1.7216,
"step": 1141
},
{
"epoch": 0.5549750941562386,
"grad_norm": 0.6942079067230225,
"learning_rate": 8e-05,
"loss": 1.6854,
"step": 1142
},
{
"epoch": 0.5554610618393877,
"grad_norm": 0.6856942176818848,
"learning_rate": 8e-05,
"loss": 1.789,
"step": 1143
},
{
"epoch": 0.5559470295225367,
"grad_norm": 0.6599071621894836,
"learning_rate": 8e-05,
"loss": 1.6855,
"step": 1144
},
{
"epoch": 0.5564329972056858,
"grad_norm": 0.6450647115707397,
"learning_rate": 8e-05,
"loss": 1.663,
"step": 1145
},
{
"epoch": 0.5569189648888349,
"grad_norm": 0.6622946858406067,
"learning_rate": 8e-05,
"loss": 1.7176,
"step": 1146
},
{
"epoch": 0.557404932571984,
"grad_norm": 0.680656909942627,
"learning_rate": 8e-05,
"loss": 1.6539,
"step": 1147
},
{
"epoch": 0.557890900255133,
"grad_norm": 0.6791431903839111,
"learning_rate": 8e-05,
"loss": 1.6389,
"step": 1148
},
{
"epoch": 0.5583768679382821,
"grad_norm": 0.6704488396644592,
"learning_rate": 8e-05,
"loss": 1.6644,
"step": 1149
},
{
"epoch": 0.5588628356214311,
"grad_norm": 0.6784191131591797,
"learning_rate": 8e-05,
"loss": 1.7046,
"step": 1150
},
{
"epoch": 0.5593488033045803,
"grad_norm": 0.6725232005119324,
"learning_rate": 8e-05,
"loss": 1.6476,
"step": 1151
},
{
"epoch": 0.5598347709877293,
"grad_norm": 0.6413189172744751,
"learning_rate": 8e-05,
"loss": 1.5653,
"step": 1152
},
{
"epoch": 0.5603207386708784,
"grad_norm": 0.6819736957550049,
"learning_rate": 8e-05,
"loss": 1.7429,
"step": 1153
},
{
"epoch": 0.5608067063540274,
"grad_norm": 0.6808190941810608,
"learning_rate": 8e-05,
"loss": 1.7261,
"step": 1154
},
{
"epoch": 0.5612926740371765,
"grad_norm": 0.659640908241272,
"learning_rate": 8e-05,
"loss": 1.7365,
"step": 1155
},
{
"epoch": 0.5617786417203255,
"grad_norm": 0.6655954122543335,
"learning_rate": 8e-05,
"loss": 1.667,
"step": 1156
},
{
"epoch": 0.5622646094034747,
"grad_norm": 0.6755356192588806,
"learning_rate": 8e-05,
"loss": 1.6884,
"step": 1157
},
{
"epoch": 0.5627505770866238,
"grad_norm": 0.6537230014801025,
"learning_rate": 8e-05,
"loss": 1.655,
"step": 1158
},
{
"epoch": 0.5632365447697728,
"grad_norm": 0.6520752906799316,
"learning_rate": 8e-05,
"loss": 1.6876,
"step": 1159
},
{
"epoch": 0.5637225124529219,
"grad_norm": 0.6777552962303162,
"learning_rate": 8e-05,
"loss": 1.7346,
"step": 1160
},
{
"epoch": 0.564208480136071,
"grad_norm": 0.6801326870918274,
"learning_rate": 8e-05,
"loss": 1.6431,
"step": 1161
},
{
"epoch": 0.5646944478192201,
"grad_norm": 0.6536253094673157,
"learning_rate": 8e-05,
"loss": 1.6213,
"step": 1162
},
{
"epoch": 0.5651804155023691,
"grad_norm": 0.6658689975738525,
"learning_rate": 8e-05,
"loss": 1.7144,
"step": 1163
},
{
"epoch": 0.5656663831855182,
"grad_norm": 0.6927465200424194,
"learning_rate": 8e-05,
"loss": 1.8414,
"step": 1164
},
{
"epoch": 0.5661523508686672,
"grad_norm": 0.6576777100563049,
"learning_rate": 8e-05,
"loss": 1.6705,
"step": 1165
},
{
"epoch": 0.5666383185518163,
"grad_norm": 0.6840883493423462,
"learning_rate": 8e-05,
"loss": 1.7135,
"step": 1166
},
{
"epoch": 0.5671242862349654,
"grad_norm": 0.7268074154853821,
"learning_rate": 8e-05,
"loss": 1.6994,
"step": 1167
},
{
"epoch": 0.5676102539181145,
"grad_norm": 0.6900855302810669,
"learning_rate": 8e-05,
"loss": 1.7193,
"step": 1168
},
{
"epoch": 0.5680962216012635,
"grad_norm": 0.6806018352508545,
"learning_rate": 8e-05,
"loss": 1.7718,
"step": 1169
},
{
"epoch": 0.5685821892844126,
"grad_norm": 0.683824896812439,
"learning_rate": 8e-05,
"loss": 1.6555,
"step": 1170
},
{
"epoch": 0.5690681569675616,
"grad_norm": 0.6862534284591675,
"learning_rate": 8e-05,
"loss": 1.7172,
"step": 1171
},
{
"epoch": 0.5695541246507108,
"grad_norm": 0.6748654246330261,
"learning_rate": 8e-05,
"loss": 1.7235,
"step": 1172
},
{
"epoch": 0.5700400923338598,
"grad_norm": 0.6655370593070984,
"learning_rate": 8e-05,
"loss": 1.6982,
"step": 1173
},
{
"epoch": 0.5705260600170089,
"grad_norm": 0.6745444536209106,
"learning_rate": 8e-05,
"loss": 1.6692,
"step": 1174
},
{
"epoch": 0.5710120277001579,
"grad_norm": 0.6913198828697205,
"learning_rate": 8e-05,
"loss": 1.6696,
"step": 1175
},
{
"epoch": 0.571497995383307,
"grad_norm": 0.6696571707725525,
"learning_rate": 8e-05,
"loss": 1.7032,
"step": 1176
},
{
"epoch": 0.571983963066456,
"grad_norm": 0.7033218741416931,
"learning_rate": 8e-05,
"loss": 1.7794,
"step": 1177
},
{
"epoch": 0.5724699307496052,
"grad_norm": 0.6802006363868713,
"learning_rate": 8e-05,
"loss": 1.7003,
"step": 1178
},
{
"epoch": 0.5729558984327542,
"grad_norm": 0.6938331723213196,
"learning_rate": 8e-05,
"loss": 1.7271,
"step": 1179
},
{
"epoch": 0.5734418661159033,
"grad_norm": 0.6567596793174744,
"learning_rate": 8e-05,
"loss": 1.6777,
"step": 1180
},
{
"epoch": 0.5739278337990523,
"grad_norm": 0.6705310344696045,
"learning_rate": 8e-05,
"loss": 1.6928,
"step": 1181
},
{
"epoch": 0.5744138014822014,
"grad_norm": 0.7192783951759338,
"learning_rate": 8e-05,
"loss": 1.7905,
"step": 1182
},
{
"epoch": 0.5748997691653505,
"grad_norm": 0.6790586709976196,
"learning_rate": 8e-05,
"loss": 1.6583,
"step": 1183
},
{
"epoch": 0.5753857368484996,
"grad_norm": 0.7103889584541321,
"learning_rate": 8e-05,
"loss": 1.699,
"step": 1184
},
{
"epoch": 0.5758717045316486,
"grad_norm": 0.678926944732666,
"learning_rate": 8e-05,
"loss": 1.7732,
"step": 1185
},
{
"epoch": 0.5763576722147977,
"grad_norm": 0.6926059722900391,
"learning_rate": 8e-05,
"loss": 1.5405,
"step": 1186
},
{
"epoch": 0.5768436398979467,
"grad_norm": 0.7004907131195068,
"learning_rate": 8e-05,
"loss": 1.7625,
"step": 1187
},
{
"epoch": 0.5773296075810959,
"grad_norm": 0.7104352116584778,
"learning_rate": 8e-05,
"loss": 1.639,
"step": 1188
},
{
"epoch": 0.5778155752642449,
"grad_norm": 0.7317578196525574,
"learning_rate": 8e-05,
"loss": 1.687,
"step": 1189
},
{
"epoch": 0.578301542947394,
"grad_norm": 0.6903920769691467,
"learning_rate": 8e-05,
"loss": 1.588,
"step": 1190
},
{
"epoch": 0.5787875106305431,
"grad_norm": 0.6683222651481628,
"learning_rate": 8e-05,
"loss": 1.72,
"step": 1191
},
{
"epoch": 0.5792734783136921,
"grad_norm": 0.6732016205787659,
"learning_rate": 8e-05,
"loss": 1.7349,
"step": 1192
},
{
"epoch": 0.5797594459968413,
"grad_norm": 0.6823511719703674,
"learning_rate": 8e-05,
"loss": 1.7643,
"step": 1193
},
{
"epoch": 0.5802454136799903,
"grad_norm": 0.6696779131889343,
"learning_rate": 8e-05,
"loss": 1.7342,
"step": 1194
},
{
"epoch": 0.5807313813631394,
"grad_norm": 0.716525673866272,
"learning_rate": 8e-05,
"loss": 1.6209,
"step": 1195
},
{
"epoch": 0.5812173490462884,
"grad_norm": 0.6862484812736511,
"learning_rate": 8e-05,
"loss": 1.6563,
"step": 1196
},
{
"epoch": 0.5817033167294375,
"grad_norm": 0.683681070804596,
"learning_rate": 8e-05,
"loss": 1.6454,
"step": 1197
},
{
"epoch": 0.5821892844125865,
"grad_norm": 0.6821367740631104,
"learning_rate": 8e-05,
"loss": 1.6325,
"step": 1198
},
{
"epoch": 0.5826752520957357,
"grad_norm": 0.6539075970649719,
"learning_rate": 8e-05,
"loss": 1.6822,
"step": 1199
},
{
"epoch": 0.5831612197788847,
"grad_norm": 0.737847626209259,
"learning_rate": 8e-05,
"loss": 1.7325,
"step": 1200
},
{
"epoch": 0.5836471874620338,
"grad_norm": 0.6822332739830017,
"learning_rate": 8e-05,
"loss": 1.7866,
"step": 1201
},
{
"epoch": 0.5841331551451828,
"grad_norm": 0.6578431725502014,
"learning_rate": 8e-05,
"loss": 1.6757,
"step": 1202
},
{
"epoch": 0.584619122828332,
"grad_norm": 0.6829719543457031,
"learning_rate": 8e-05,
"loss": 1.7026,
"step": 1203
},
{
"epoch": 0.585105090511481,
"grad_norm": 0.690361499786377,
"learning_rate": 8e-05,
"loss": 1.7872,
"step": 1204
},
{
"epoch": 0.5855910581946301,
"grad_norm": 0.6874240636825562,
"learning_rate": 8e-05,
"loss": 1.6867,
"step": 1205
},
{
"epoch": 0.5860770258777791,
"grad_norm": 0.6720805764198303,
"learning_rate": 8e-05,
"loss": 1.6675,
"step": 1206
},
{
"epoch": 0.5865629935609282,
"grad_norm": 0.7064617872238159,
"learning_rate": 8e-05,
"loss": 1.7227,
"step": 1207
},
{
"epoch": 0.5870489612440772,
"grad_norm": 0.6419195532798767,
"learning_rate": 8e-05,
"loss": 1.5611,
"step": 1208
},
{
"epoch": 0.5875349289272264,
"grad_norm": 0.6887343525886536,
"learning_rate": 8e-05,
"loss": 1.6504,
"step": 1209
},
{
"epoch": 0.5880208966103754,
"grad_norm": 0.6889417171478271,
"learning_rate": 8e-05,
"loss": 1.6762,
"step": 1210
},
{
"epoch": 0.5885068642935245,
"grad_norm": 0.6796707510948181,
"learning_rate": 8e-05,
"loss": 1.7662,
"step": 1211
},
{
"epoch": 0.5889928319766735,
"grad_norm": 0.6833084225654602,
"learning_rate": 8e-05,
"loss": 1.781,
"step": 1212
},
{
"epoch": 0.5894787996598226,
"grad_norm": 0.6800668239593506,
"learning_rate": 8e-05,
"loss": 1.6795,
"step": 1213
},
{
"epoch": 0.5899647673429717,
"grad_norm": 0.6984237432479858,
"learning_rate": 8e-05,
"loss": 1.7124,
"step": 1214
},
{
"epoch": 0.5904507350261208,
"grad_norm": 0.7134154438972473,
"learning_rate": 8e-05,
"loss": 1.7614,
"step": 1215
},
{
"epoch": 0.5909367027092698,
"grad_norm": 0.6832752823829651,
"learning_rate": 8e-05,
"loss": 1.6502,
"step": 1216
},
{
"epoch": 0.5914226703924189,
"grad_norm": 0.6450145244598389,
"learning_rate": 8e-05,
"loss": 1.6695,
"step": 1217
},
{
"epoch": 0.5919086380755679,
"grad_norm": 0.7088375091552734,
"learning_rate": 8e-05,
"loss": 1.713,
"step": 1218
},
{
"epoch": 0.592394605758717,
"grad_norm": 0.7023234963417053,
"learning_rate": 8e-05,
"loss": 1.7124,
"step": 1219
},
{
"epoch": 0.5928805734418661,
"grad_norm": 0.7200241684913635,
"learning_rate": 8e-05,
"loss": 1.7761,
"step": 1220
},
{
"epoch": 0.5933665411250152,
"grad_norm": 0.6853911876678467,
"learning_rate": 8e-05,
"loss": 1.7446,
"step": 1221
},
{
"epoch": 0.5938525088081642,
"grad_norm": 0.6965936422348022,
"learning_rate": 8e-05,
"loss": 1.6055,
"step": 1222
},
{
"epoch": 0.5943384764913133,
"grad_norm": 0.7172530889511108,
"learning_rate": 8e-05,
"loss": 1.7919,
"step": 1223
},
{
"epoch": 0.5948244441744625,
"grad_norm": 0.6731247901916504,
"learning_rate": 8e-05,
"loss": 1.6662,
"step": 1224
},
{
"epoch": 0.5953104118576115,
"grad_norm": 0.716677725315094,
"learning_rate": 8e-05,
"loss": 1.6755,
"step": 1225
},
{
"epoch": 0.5957963795407606,
"grad_norm": 0.6751598715782166,
"learning_rate": 8e-05,
"loss": 1.6918,
"step": 1226
},
{
"epoch": 0.5962823472239096,
"grad_norm": 0.7308856844902039,
"learning_rate": 8e-05,
"loss": 1.6726,
"step": 1227
},
{
"epoch": 0.5967683149070587,
"grad_norm": 0.668401837348938,
"learning_rate": 8e-05,
"loss": 1.7417,
"step": 1228
},
{
"epoch": 0.5972542825902077,
"grad_norm": 0.6913663148880005,
"learning_rate": 8e-05,
"loss": 1.7568,
"step": 1229
},
{
"epoch": 0.5977402502733569,
"grad_norm": 0.6790227890014648,
"learning_rate": 8e-05,
"loss": 1.774,
"step": 1230
},
{
"epoch": 0.5982262179565059,
"grad_norm": 0.7138398885726929,
"learning_rate": 8e-05,
"loss": 1.7636,
"step": 1231
},
{
"epoch": 0.598712185639655,
"grad_norm": 0.6717175841331482,
"learning_rate": 8e-05,
"loss": 1.6984,
"step": 1232
},
{
"epoch": 0.599198153322804,
"grad_norm": 0.6992210745811462,
"learning_rate": 8e-05,
"loss": 1.6166,
"step": 1233
},
{
"epoch": 0.5996841210059531,
"grad_norm": 0.7094160914421082,
"learning_rate": 8e-05,
"loss": 1.7681,
"step": 1234
},
{
"epoch": 0.6001700886891022,
"grad_norm": 0.6791561245918274,
"learning_rate": 8e-05,
"loss": 1.7148,
"step": 1235
},
{
"epoch": 0.6006560563722513,
"grad_norm": 0.7138521671295166,
"learning_rate": 8e-05,
"loss": 1.7625,
"step": 1236
},
{
"epoch": 0.6011420240554003,
"grad_norm": 0.7009010314941406,
"learning_rate": 8e-05,
"loss": 1.7067,
"step": 1237
},
{
"epoch": 0.6016279917385494,
"grad_norm": 0.7364915609359741,
"learning_rate": 8e-05,
"loss": 1.6623,
"step": 1238
},
{
"epoch": 0.6021139594216984,
"grad_norm": 0.6793099641799927,
"learning_rate": 8e-05,
"loss": 1.658,
"step": 1239
},
{
"epoch": 0.6025999271048476,
"grad_norm": 0.7189677953720093,
"learning_rate": 8e-05,
"loss": 1.7025,
"step": 1240
},
{
"epoch": 0.6030858947879966,
"grad_norm": 0.7181973457336426,
"learning_rate": 8e-05,
"loss": 1.8068,
"step": 1241
},
{
"epoch": 0.6035718624711457,
"grad_norm": 0.687425434589386,
"learning_rate": 8e-05,
"loss": 1.6792,
"step": 1242
},
{
"epoch": 0.6040578301542947,
"grad_norm": 0.6552849411964417,
"learning_rate": 8e-05,
"loss": 1.672,
"step": 1243
},
{
"epoch": 0.6045437978374438,
"grad_norm": 0.7122268676757812,
"learning_rate": 8e-05,
"loss": 1.7756,
"step": 1244
},
{
"epoch": 0.6050297655205928,
"grad_norm": 0.6844674944877625,
"learning_rate": 8e-05,
"loss": 1.7895,
"step": 1245
},
{
"epoch": 0.605515733203742,
"grad_norm": 0.6658867597579956,
"learning_rate": 8e-05,
"loss": 1.6759,
"step": 1246
},
{
"epoch": 0.606001700886891,
"grad_norm": 0.6862877607345581,
"learning_rate": 8e-05,
"loss": 1.7381,
"step": 1247
},
{
"epoch": 0.6064876685700401,
"grad_norm": 0.6513017416000366,
"learning_rate": 8e-05,
"loss": 1.726,
"step": 1248
},
{
"epoch": 0.6069736362531891,
"grad_norm": 0.6754278540611267,
"learning_rate": 8e-05,
"loss": 1.6301,
"step": 1249
},
{
"epoch": 0.6074596039363382,
"grad_norm": 0.6985076665878296,
"learning_rate": 8e-05,
"loss": 1.7157,
"step": 1250
},
{
"epoch": 0.6079455716194873,
"grad_norm": 0.6825240254402161,
"learning_rate": 8e-05,
"loss": 1.6926,
"step": 1251
},
{
"epoch": 0.6084315393026364,
"grad_norm": 0.6575943231582642,
"learning_rate": 8e-05,
"loss": 1.63,
"step": 1252
},
{
"epoch": 0.6089175069857854,
"grad_norm": 0.6731870770454407,
"learning_rate": 8e-05,
"loss": 1.6489,
"step": 1253
},
{
"epoch": 0.6094034746689345,
"grad_norm": 0.6386263966560364,
"learning_rate": 8e-05,
"loss": 1.5998,
"step": 1254
},
{
"epoch": 0.6098894423520835,
"grad_norm": 0.6733653545379639,
"learning_rate": 8e-05,
"loss": 1.7495,
"step": 1255
},
{
"epoch": 0.6103754100352327,
"grad_norm": 0.681827962398529,
"learning_rate": 8e-05,
"loss": 1.7408,
"step": 1256
},
{
"epoch": 0.6108613777183818,
"grad_norm": 0.6763688325881958,
"learning_rate": 8e-05,
"loss": 1.7478,
"step": 1257
},
{
"epoch": 0.6113473454015308,
"grad_norm": 0.6924687623977661,
"learning_rate": 8e-05,
"loss": 1.7819,
"step": 1258
},
{
"epoch": 0.6118333130846799,
"grad_norm": 0.6565300226211548,
"learning_rate": 8e-05,
"loss": 1.6319,
"step": 1259
},
{
"epoch": 0.6123192807678289,
"grad_norm": 0.7047408819198608,
"learning_rate": 8e-05,
"loss": 1.6709,
"step": 1260
},
{
"epoch": 0.6128052484509781,
"grad_norm": 0.6591905951499939,
"learning_rate": 8e-05,
"loss": 1.7454,
"step": 1261
},
{
"epoch": 0.6132912161341271,
"grad_norm": 0.6813497543334961,
"learning_rate": 8e-05,
"loss": 1.6712,
"step": 1262
},
{
"epoch": 0.6137771838172762,
"grad_norm": 0.691750705242157,
"learning_rate": 8e-05,
"loss": 1.6833,
"step": 1263
},
{
"epoch": 0.6142631515004252,
"grad_norm": 0.6817581057548523,
"learning_rate": 8e-05,
"loss": 1.7717,
"step": 1264
},
{
"epoch": 0.6147491191835743,
"grad_norm": 0.6964521408081055,
"learning_rate": 8e-05,
"loss": 1.7466,
"step": 1265
},
{
"epoch": 0.6152350868667233,
"grad_norm": 0.6699418425559998,
"learning_rate": 8e-05,
"loss": 1.7408,
"step": 1266
},
{
"epoch": 0.6157210545498725,
"grad_norm": 0.687675416469574,
"learning_rate": 8e-05,
"loss": 1.6859,
"step": 1267
},
{
"epoch": 0.6162070222330215,
"grad_norm": 0.6683962345123291,
"learning_rate": 8e-05,
"loss": 1.6489,
"step": 1268
},
{
"epoch": 0.6166929899161706,
"grad_norm": 0.6966105699539185,
"learning_rate": 8e-05,
"loss": 1.6827,
"step": 1269
},
{
"epoch": 0.6171789575993196,
"grad_norm": 0.7060234546661377,
"learning_rate": 8e-05,
"loss": 1.7079,
"step": 1270
},
{
"epoch": 0.6176649252824687,
"grad_norm": 0.7038748264312744,
"learning_rate": 8e-05,
"loss": 1.78,
"step": 1271
},
{
"epoch": 0.6181508929656178,
"grad_norm": 0.6980753540992737,
"learning_rate": 8e-05,
"loss": 1.8234,
"step": 1272
},
{
"epoch": 0.6186368606487669,
"grad_norm": 0.676101565361023,
"learning_rate": 8e-05,
"loss": 1.6625,
"step": 1273
},
{
"epoch": 0.6191228283319159,
"grad_norm": 0.6947356462478638,
"learning_rate": 8e-05,
"loss": 1.6889,
"step": 1274
},
{
"epoch": 0.619608796015065,
"grad_norm": 0.6201874017715454,
"learning_rate": 8e-05,
"loss": 1.5396,
"step": 1275
},
{
"epoch": 0.620094763698214,
"grad_norm": 0.7296407222747803,
"learning_rate": 8e-05,
"loss": 1.7167,
"step": 1276
},
{
"epoch": 0.6205807313813632,
"grad_norm": 0.6506848335266113,
"learning_rate": 8e-05,
"loss": 1.7229,
"step": 1277
},
{
"epoch": 0.6210666990645122,
"grad_norm": 0.704167902469635,
"learning_rate": 8e-05,
"loss": 1.7336,
"step": 1278
},
{
"epoch": 0.6215526667476613,
"grad_norm": 0.6549620628356934,
"learning_rate": 8e-05,
"loss": 1.6864,
"step": 1279
},
{
"epoch": 0.6220386344308103,
"grad_norm": 0.7394512891769409,
"learning_rate": 8e-05,
"loss": 1.8005,
"step": 1280
},
{
"epoch": 0.6225246021139594,
"grad_norm": 0.707434356212616,
"learning_rate": 8e-05,
"loss": 1.554,
"step": 1281
},
{
"epoch": 0.6230105697971084,
"grad_norm": 0.7132856845855713,
"learning_rate": 8e-05,
"loss": 1.6516,
"step": 1282
},
{
"epoch": 0.6234965374802576,
"grad_norm": 0.6508325338363647,
"learning_rate": 8e-05,
"loss": 1.5901,
"step": 1283
},
{
"epoch": 0.6239825051634066,
"grad_norm": 0.7017080187797546,
"learning_rate": 8e-05,
"loss": 1.7625,
"step": 1284
},
{
"epoch": 0.6244684728465557,
"grad_norm": 0.6544747352600098,
"learning_rate": 8e-05,
"loss": 1.7253,
"step": 1285
},
{
"epoch": 0.6249544405297047,
"grad_norm": 0.6635825037956238,
"learning_rate": 8e-05,
"loss": 1.7134,
"step": 1286
},
{
"epoch": 0.6254404082128538,
"grad_norm": 0.6872092485427856,
"learning_rate": 8e-05,
"loss": 1.7473,
"step": 1287
},
{
"epoch": 0.6259263758960029,
"grad_norm": 0.6724379658699036,
"learning_rate": 8e-05,
"loss": 1.6428,
"step": 1288
},
{
"epoch": 0.626412343579152,
"grad_norm": 0.6651939153671265,
"learning_rate": 8e-05,
"loss": 1.7405,
"step": 1289
},
{
"epoch": 0.6268983112623011,
"grad_norm": 0.6781435608863831,
"learning_rate": 8e-05,
"loss": 1.6416,
"step": 1290
},
{
"epoch": 0.6273842789454501,
"grad_norm": 0.699384868144989,
"learning_rate": 8e-05,
"loss": 1.6881,
"step": 1291
},
{
"epoch": 0.6278702466285992,
"grad_norm": 0.6593002676963806,
"learning_rate": 8e-05,
"loss": 1.7266,
"step": 1292
},
{
"epoch": 0.6283562143117483,
"grad_norm": 0.7015580534934998,
"learning_rate": 8e-05,
"loss": 1.6794,
"step": 1293
},
{
"epoch": 0.6288421819948974,
"grad_norm": 0.6750890612602234,
"learning_rate": 8e-05,
"loss": 1.7274,
"step": 1294
},
{
"epoch": 0.6293281496780464,
"grad_norm": 0.7169713973999023,
"learning_rate": 8e-05,
"loss": 1.7713,
"step": 1295
},
{
"epoch": 0.6298141173611955,
"grad_norm": 0.7050965428352356,
"learning_rate": 8e-05,
"loss": 1.7294,
"step": 1296
},
{
"epoch": 0.6303000850443445,
"grad_norm": 0.6698927879333496,
"learning_rate": 8e-05,
"loss": 1.6464,
"step": 1297
},
{
"epoch": 0.6307860527274937,
"grad_norm": 0.6851381659507751,
"learning_rate": 8e-05,
"loss": 1.7113,
"step": 1298
},
{
"epoch": 0.6312720204106427,
"grad_norm": 0.6928958892822266,
"learning_rate": 8e-05,
"loss": 1.7469,
"step": 1299
},
{
"epoch": 0.6317579880937918,
"grad_norm": 0.6474245190620422,
"learning_rate": 8e-05,
"loss": 1.61,
"step": 1300
},
{
"epoch": 0.6322439557769408,
"grad_norm": 0.6689447164535522,
"learning_rate": 8e-05,
"loss": 1.7407,
"step": 1301
},
{
"epoch": 0.6327299234600899,
"grad_norm": 0.7028213739395142,
"learning_rate": 8e-05,
"loss": 1.8221,
"step": 1302
},
{
"epoch": 0.633215891143239,
"grad_norm": 0.6999359726905823,
"learning_rate": 8e-05,
"loss": 1.7989,
"step": 1303
},
{
"epoch": 0.6337018588263881,
"grad_norm": 0.6687000393867493,
"learning_rate": 8e-05,
"loss": 1.7352,
"step": 1304
},
{
"epoch": 0.6341878265095371,
"grad_norm": 0.7043063044548035,
"learning_rate": 8e-05,
"loss": 1.7114,
"step": 1305
},
{
"epoch": 0.6346737941926862,
"grad_norm": 0.6646273732185364,
"learning_rate": 8e-05,
"loss": 1.6249,
"step": 1306
},
{
"epoch": 0.6351597618758352,
"grad_norm": 0.6701491475105286,
"learning_rate": 8e-05,
"loss": 1.6503,
"step": 1307
},
{
"epoch": 0.6356457295589844,
"grad_norm": 0.6761196851730347,
"learning_rate": 8e-05,
"loss": 1.6093,
"step": 1308
},
{
"epoch": 0.6361316972421334,
"grad_norm": 0.668424665927887,
"learning_rate": 8e-05,
"loss": 1.5847,
"step": 1309
},
{
"epoch": 0.6366176649252825,
"grad_norm": 0.6873188018798828,
"learning_rate": 8e-05,
"loss": 1.6984,
"step": 1310
},
{
"epoch": 0.6371036326084315,
"grad_norm": 0.6726406216621399,
"learning_rate": 8e-05,
"loss": 1.656,
"step": 1311
},
{
"epoch": 0.6375896002915806,
"grad_norm": 0.6651948094367981,
"learning_rate": 8e-05,
"loss": 1.698,
"step": 1312
},
{
"epoch": 0.6380755679747296,
"grad_norm": 0.67381751537323,
"learning_rate": 8e-05,
"loss": 1.7738,
"step": 1313
},
{
"epoch": 0.6385615356578788,
"grad_norm": 0.6783180832862854,
"learning_rate": 8e-05,
"loss": 1.6661,
"step": 1314
},
{
"epoch": 0.6390475033410278,
"grad_norm": 0.6703616976737976,
"learning_rate": 8e-05,
"loss": 1.6764,
"step": 1315
},
{
"epoch": 0.6395334710241769,
"grad_norm": 0.6743754744529724,
"learning_rate": 8e-05,
"loss": 1.6264,
"step": 1316
},
{
"epoch": 0.6400194387073259,
"grad_norm": 0.6861841678619385,
"learning_rate": 8e-05,
"loss": 1.6765,
"step": 1317
},
{
"epoch": 0.640505406390475,
"grad_norm": 0.6622785329818726,
"learning_rate": 8e-05,
"loss": 1.5932,
"step": 1318
},
{
"epoch": 0.640991374073624,
"grad_norm": 0.6901302337646484,
"learning_rate": 8e-05,
"loss": 1.729,
"step": 1319
},
{
"epoch": 0.6414773417567732,
"grad_norm": 0.6749575138092041,
"learning_rate": 8e-05,
"loss": 1.7342,
"step": 1320
},
{
"epoch": 0.6419633094399222,
"grad_norm": 0.6726229786872864,
"learning_rate": 8e-05,
"loss": 1.4928,
"step": 1321
},
{
"epoch": 0.6424492771230713,
"grad_norm": 0.7091121673583984,
"learning_rate": 8e-05,
"loss": 1.6146,
"step": 1322
},
{
"epoch": 0.6429352448062204,
"grad_norm": 0.6836438775062561,
"learning_rate": 8e-05,
"loss": 1.666,
"step": 1323
},
{
"epoch": 0.6434212124893695,
"grad_norm": 0.6668574810028076,
"learning_rate": 8e-05,
"loss": 1.7061,
"step": 1324
},
{
"epoch": 0.6439071801725186,
"grad_norm": 0.7155892252922058,
"learning_rate": 8e-05,
"loss": 1.7516,
"step": 1325
},
{
"epoch": 0.6443931478556676,
"grad_norm": 0.657418966293335,
"learning_rate": 8e-05,
"loss": 1.684,
"step": 1326
},
{
"epoch": 0.6448791155388167,
"grad_norm": 0.6631054282188416,
"learning_rate": 8e-05,
"loss": 1.69,
"step": 1327
},
{
"epoch": 0.6453650832219657,
"grad_norm": 0.6959031224250793,
"learning_rate": 8e-05,
"loss": 1.765,
"step": 1328
},
{
"epoch": 0.6458510509051149,
"grad_norm": 0.6901814937591553,
"learning_rate": 8e-05,
"loss": 1.8029,
"step": 1329
},
{
"epoch": 0.6463370185882639,
"grad_norm": 0.6989200115203857,
"learning_rate": 8e-05,
"loss": 1.7285,
"step": 1330
},
{
"epoch": 0.646822986271413,
"grad_norm": 0.6738196611404419,
"learning_rate": 8e-05,
"loss": 1.7413,
"step": 1331
},
{
"epoch": 0.647308953954562,
"grad_norm": 0.6639776229858398,
"learning_rate": 8e-05,
"loss": 1.7066,
"step": 1332
},
{
"epoch": 0.6477949216377111,
"grad_norm": 0.7205191850662231,
"learning_rate": 8e-05,
"loss": 1.708,
"step": 1333
},
{
"epoch": 0.6482808893208601,
"grad_norm": 0.6805105209350586,
"learning_rate": 8e-05,
"loss": 1.6536,
"step": 1334
},
{
"epoch": 0.6487668570040093,
"grad_norm": 0.6904272437095642,
"learning_rate": 8e-05,
"loss": 1.6673,
"step": 1335
},
{
"epoch": 0.6492528246871583,
"grad_norm": 0.6794489026069641,
"learning_rate": 8e-05,
"loss": 1.6791,
"step": 1336
},
{
"epoch": 0.6497387923703074,
"grad_norm": 0.694638192653656,
"learning_rate": 8e-05,
"loss": 1.7385,
"step": 1337
},
{
"epoch": 0.6502247600534564,
"grad_norm": 0.6804966926574707,
"learning_rate": 8e-05,
"loss": 1.8001,
"step": 1338
},
{
"epoch": 0.6507107277366055,
"grad_norm": 0.6919635534286499,
"learning_rate": 8e-05,
"loss": 1.6131,
"step": 1339
},
{
"epoch": 0.6511966954197546,
"grad_norm": 0.6869526505470276,
"learning_rate": 8e-05,
"loss": 1.5458,
"step": 1340
},
{
"epoch": 0.6516826631029037,
"grad_norm": 0.6857462525367737,
"learning_rate": 8e-05,
"loss": 1.712,
"step": 1341
},
{
"epoch": 0.6521686307860527,
"grad_norm": 0.6743590831756592,
"learning_rate": 8e-05,
"loss": 1.7009,
"step": 1342
},
{
"epoch": 0.6526545984692018,
"grad_norm": 0.684923529624939,
"learning_rate": 8e-05,
"loss": 1.6987,
"step": 1343
},
{
"epoch": 0.6531405661523508,
"grad_norm": 0.6665489673614502,
"learning_rate": 8e-05,
"loss": 1.7824,
"step": 1344
},
{
"epoch": 0.6536265338355,
"grad_norm": 0.7092958688735962,
"learning_rate": 8e-05,
"loss": 1.7133,
"step": 1345
},
{
"epoch": 0.654112501518649,
"grad_norm": 0.6865982413291931,
"learning_rate": 8e-05,
"loss": 1.5584,
"step": 1346
},
{
"epoch": 0.6545984692017981,
"grad_norm": 0.6830027103424072,
"learning_rate": 8e-05,
"loss": 1.7155,
"step": 1347
},
{
"epoch": 0.6550844368849471,
"grad_norm": 0.7074036002159119,
"learning_rate": 8e-05,
"loss": 1.6834,
"step": 1348
},
{
"epoch": 0.6555704045680962,
"grad_norm": 0.692531168460846,
"learning_rate": 8e-05,
"loss": 1.7312,
"step": 1349
},
{
"epoch": 0.6560563722512452,
"grad_norm": 0.7212434411048889,
"learning_rate": 8e-05,
"loss": 1.7338,
"step": 1350
},
{
"epoch": 0.6565423399343944,
"grad_norm": 0.6688287854194641,
"learning_rate": 8e-05,
"loss": 1.7299,
"step": 1351
},
{
"epoch": 0.6570283076175434,
"grad_norm": 0.6903539299964905,
"learning_rate": 8e-05,
"loss": 1.704,
"step": 1352
},
{
"epoch": 0.6575142753006925,
"grad_norm": 0.6791881322860718,
"learning_rate": 8e-05,
"loss": 1.7412,
"step": 1353
},
{
"epoch": 0.6580002429838415,
"grad_norm": 0.6559412479400635,
"learning_rate": 8e-05,
"loss": 1.6554,
"step": 1354
},
{
"epoch": 0.6584862106669906,
"grad_norm": 0.6751131415367126,
"learning_rate": 8e-05,
"loss": 1.658,
"step": 1355
},
{
"epoch": 0.6589721783501398,
"grad_norm": 0.6822508573532104,
"learning_rate": 8e-05,
"loss": 1.6652,
"step": 1356
},
{
"epoch": 0.6594581460332888,
"grad_norm": 0.6608313918113708,
"learning_rate": 8e-05,
"loss": 1.6322,
"step": 1357
},
{
"epoch": 0.6599441137164379,
"grad_norm": 0.6548648476600647,
"learning_rate": 8e-05,
"loss": 1.5695,
"step": 1358
},
{
"epoch": 0.6604300813995869,
"grad_norm": 0.7083562612533569,
"learning_rate": 8e-05,
"loss": 1.7514,
"step": 1359
},
{
"epoch": 0.660916049082736,
"grad_norm": 0.6535549163818359,
"learning_rate": 8e-05,
"loss": 1.6312,
"step": 1360
},
{
"epoch": 0.6614020167658851,
"grad_norm": 0.6861634254455566,
"learning_rate": 8e-05,
"loss": 1.7172,
"step": 1361
},
{
"epoch": 0.6618879844490342,
"grad_norm": 0.7570831179618835,
"learning_rate": 8e-05,
"loss": 1.5712,
"step": 1362
},
{
"epoch": 0.6623739521321832,
"grad_norm": 0.6796947121620178,
"learning_rate": 8e-05,
"loss": 1.6178,
"step": 1363
},
{
"epoch": 0.6628599198153323,
"grad_norm": 0.6609553098678589,
"learning_rate": 8e-05,
"loss": 1.6684,
"step": 1364
},
{
"epoch": 0.6633458874984813,
"grad_norm": 0.6480719447135925,
"learning_rate": 8e-05,
"loss": 1.6271,
"step": 1365
},
{
"epoch": 0.6638318551816305,
"grad_norm": 0.6644112467765808,
"learning_rate": 8e-05,
"loss": 1.7436,
"step": 1366
},
{
"epoch": 0.6643178228647795,
"grad_norm": 0.6798554062843323,
"learning_rate": 8e-05,
"loss": 1.7555,
"step": 1367
},
{
"epoch": 0.6648037905479286,
"grad_norm": 0.654283344745636,
"learning_rate": 8e-05,
"loss": 1.5828,
"step": 1368
},
{
"epoch": 0.6652897582310776,
"grad_norm": 0.6328772306442261,
"learning_rate": 8e-05,
"loss": 1.64,
"step": 1369
},
{
"epoch": 0.6657757259142267,
"grad_norm": 0.6956726908683777,
"learning_rate": 8e-05,
"loss": 1.6959,
"step": 1370
},
{
"epoch": 0.6662616935973757,
"grad_norm": 0.6871621012687683,
"learning_rate": 8e-05,
"loss": 1.7371,
"step": 1371
},
{
"epoch": 0.6667476612805249,
"grad_norm": 0.6861317157745361,
"learning_rate": 8e-05,
"loss": 1.7315,
"step": 1372
},
{
"epoch": 0.6672336289636739,
"grad_norm": 0.6818125247955322,
"learning_rate": 8e-05,
"loss": 1.7122,
"step": 1373
},
{
"epoch": 0.667719596646823,
"grad_norm": 0.6792329549789429,
"learning_rate": 8e-05,
"loss": 1.6096,
"step": 1374
},
{
"epoch": 0.668205564329972,
"grad_norm": 0.7161831259727478,
"learning_rate": 8e-05,
"loss": 1.7651,
"step": 1375
},
{
"epoch": 0.6686915320131211,
"grad_norm": 0.659145712852478,
"learning_rate": 8e-05,
"loss": 1.6682,
"step": 1376
},
{
"epoch": 0.6691774996962702,
"grad_norm": 0.6703169941902161,
"learning_rate": 8e-05,
"loss": 1.5936,
"step": 1377
},
{
"epoch": 0.6696634673794193,
"grad_norm": 0.6605799198150635,
"learning_rate": 8e-05,
"loss": 1.6434,
"step": 1378
},
{
"epoch": 0.6701494350625683,
"grad_norm": 0.6746866703033447,
"learning_rate": 8e-05,
"loss": 1.6715,
"step": 1379
},
{
"epoch": 0.6706354027457174,
"grad_norm": 0.6647935509681702,
"learning_rate": 8e-05,
"loss": 1.7254,
"step": 1380
},
{
"epoch": 0.6711213704288664,
"grad_norm": 0.663534939289093,
"learning_rate": 8e-05,
"loss": 1.6254,
"step": 1381
},
{
"epoch": 0.6716073381120156,
"grad_norm": 0.6398818492889404,
"learning_rate": 8e-05,
"loss": 1.6332,
"step": 1382
},
{
"epoch": 0.6720933057951646,
"grad_norm": 0.655539333820343,
"learning_rate": 8e-05,
"loss": 1.7303,
"step": 1383
},
{
"epoch": 0.6725792734783137,
"grad_norm": 0.6810123920440674,
"learning_rate": 8e-05,
"loss": 1.6059,
"step": 1384
},
{
"epoch": 0.6730652411614627,
"grad_norm": 0.6818733215332031,
"learning_rate": 8e-05,
"loss": 1.6315,
"step": 1385
},
{
"epoch": 0.6735512088446118,
"grad_norm": 0.6757489442825317,
"learning_rate": 8e-05,
"loss": 1.6512,
"step": 1386
},
{
"epoch": 0.6740371765277609,
"grad_norm": 0.7078661918640137,
"learning_rate": 8e-05,
"loss": 1.6592,
"step": 1387
},
{
"epoch": 0.67452314421091,
"grad_norm": 0.6805749535560608,
"learning_rate": 8e-05,
"loss": 1.7233,
"step": 1388
},
{
"epoch": 0.6750091118940591,
"grad_norm": 0.6596097946166992,
"learning_rate": 8e-05,
"loss": 1.6902,
"step": 1389
},
{
"epoch": 0.6754950795772081,
"grad_norm": 0.7238724827766418,
"learning_rate": 8e-05,
"loss": 1.8736,
"step": 1390
},
{
"epoch": 0.6759810472603572,
"grad_norm": 0.669191837310791,
"learning_rate": 8e-05,
"loss": 1.6578,
"step": 1391
},
{
"epoch": 0.6764670149435063,
"grad_norm": 0.6836249828338623,
"learning_rate": 8e-05,
"loss": 1.7131,
"step": 1392
},
{
"epoch": 0.6769529826266554,
"grad_norm": 0.6608012318611145,
"learning_rate": 8e-05,
"loss": 1.6703,
"step": 1393
},
{
"epoch": 0.6774389503098044,
"grad_norm": 0.6750621795654297,
"learning_rate": 8e-05,
"loss": 1.698,
"step": 1394
},
{
"epoch": 0.6779249179929535,
"grad_norm": 0.6800269484519958,
"learning_rate": 8e-05,
"loss": 1.6467,
"step": 1395
},
{
"epoch": 0.6784108856761025,
"grad_norm": 0.7043876647949219,
"learning_rate": 8e-05,
"loss": 1.8003,
"step": 1396
},
{
"epoch": 0.6788968533592517,
"grad_norm": 0.6708895564079285,
"learning_rate": 8e-05,
"loss": 1.6914,
"step": 1397
},
{
"epoch": 0.6793828210424007,
"grad_norm": 0.6897345781326294,
"learning_rate": 8e-05,
"loss": 1.7599,
"step": 1398
},
{
"epoch": 0.6798687887255498,
"grad_norm": 0.6640748977661133,
"learning_rate": 8e-05,
"loss": 1.7228,
"step": 1399
},
{
"epoch": 0.6803547564086988,
"grad_norm": 0.6811213493347168,
"learning_rate": 8e-05,
"loss": 1.6546,
"step": 1400
},
{
"epoch": 0.6808407240918479,
"grad_norm": 0.6596722602844238,
"learning_rate": 8e-05,
"loss": 1.5222,
"step": 1401
},
{
"epoch": 0.6813266917749969,
"grad_norm": 0.7102764248847961,
"learning_rate": 8e-05,
"loss": 1.7356,
"step": 1402
},
{
"epoch": 0.6818126594581461,
"grad_norm": 0.7005658149719238,
"learning_rate": 8e-05,
"loss": 1.7963,
"step": 1403
},
{
"epoch": 0.6822986271412951,
"grad_norm": 0.677031934261322,
"learning_rate": 8e-05,
"loss": 1.6516,
"step": 1404
},
{
"epoch": 0.6827845948244442,
"grad_norm": 0.6946262717247009,
"learning_rate": 8e-05,
"loss": 1.6604,
"step": 1405
},
{
"epoch": 0.6832705625075932,
"grad_norm": 0.6754231452941895,
"learning_rate": 8e-05,
"loss": 1.6781,
"step": 1406
},
{
"epoch": 0.6837565301907423,
"grad_norm": 0.6453408002853394,
"learning_rate": 8e-05,
"loss": 1.6473,
"step": 1407
},
{
"epoch": 0.6842424978738914,
"grad_norm": 0.6620815992355347,
"learning_rate": 8e-05,
"loss": 1.5911,
"step": 1408
},
{
"epoch": 0.6847284655570405,
"grad_norm": 0.6818002462387085,
"learning_rate": 8e-05,
"loss": 1.6635,
"step": 1409
},
{
"epoch": 0.6852144332401895,
"grad_norm": 0.6845067739486694,
"learning_rate": 8e-05,
"loss": 1.6658,
"step": 1410
},
{
"epoch": 0.6857004009233386,
"grad_norm": 0.6666234135627747,
"learning_rate": 8e-05,
"loss": 1.7712,
"step": 1411
},
{
"epoch": 0.6861863686064876,
"grad_norm": 0.6718552708625793,
"learning_rate": 8e-05,
"loss": 1.716,
"step": 1412
},
{
"epoch": 0.6866723362896368,
"grad_norm": 0.6619511842727661,
"learning_rate": 8e-05,
"loss": 1.6542,
"step": 1413
},
{
"epoch": 0.6871583039727858,
"grad_norm": 0.6662033200263977,
"learning_rate": 8e-05,
"loss": 1.7428,
"step": 1414
},
{
"epoch": 0.6876442716559349,
"grad_norm": 0.6738389134407043,
"learning_rate": 8e-05,
"loss": 1.7516,
"step": 1415
},
{
"epoch": 0.6881302393390839,
"grad_norm": 0.6641265749931335,
"learning_rate": 8e-05,
"loss": 1.6309,
"step": 1416
},
{
"epoch": 0.688616207022233,
"grad_norm": 3.941762924194336,
"learning_rate": 8e-05,
"loss": 1.723,
"step": 1417
},
{
"epoch": 0.689102174705382,
"grad_norm": 0.7161770462989807,
"learning_rate": 8e-05,
"loss": 1.6885,
"step": 1418
},
{
"epoch": 0.6895881423885312,
"grad_norm": 0.6878145933151245,
"learning_rate": 8e-05,
"loss": 1.74,
"step": 1419
},
{
"epoch": 0.6900741100716802,
"grad_norm": 0.6390286684036255,
"learning_rate": 8e-05,
"loss": 1.58,
"step": 1420
},
{
"epoch": 0.6905600777548293,
"grad_norm": 0.6442224383354187,
"learning_rate": 8e-05,
"loss": 1.6435,
"step": 1421
},
{
"epoch": 0.6910460454379784,
"grad_norm": 0.7129732966423035,
"learning_rate": 8e-05,
"loss": 1.7327,
"step": 1422
},
{
"epoch": 0.6915320131211274,
"grad_norm": 0.6785884499549866,
"learning_rate": 8e-05,
"loss": 1.5916,
"step": 1423
},
{
"epoch": 0.6920179808042766,
"grad_norm": 0.7008589506149292,
"learning_rate": 8e-05,
"loss": 1.7584,
"step": 1424
},
{
"epoch": 0.6925039484874256,
"grad_norm": 0.705269992351532,
"learning_rate": 8e-05,
"loss": 1.7994,
"step": 1425
},
{
"epoch": 0.6929899161705747,
"grad_norm": 0.72601717710495,
"learning_rate": 8e-05,
"loss": 1.7607,
"step": 1426
},
{
"epoch": 0.6934758838537237,
"grad_norm": 0.643891453742981,
"learning_rate": 8e-05,
"loss": 1.724,
"step": 1427
},
{
"epoch": 0.6939618515368728,
"grad_norm": 0.7093295454978943,
"learning_rate": 8e-05,
"loss": 1.7755,
"step": 1428
},
{
"epoch": 0.6944478192200219,
"grad_norm": 0.6784855127334595,
"learning_rate": 8e-05,
"loss": 1.7262,
"step": 1429
},
{
"epoch": 0.694933786903171,
"grad_norm": 0.6652461886405945,
"learning_rate": 8e-05,
"loss": 1.7047,
"step": 1430
},
{
"epoch": 0.69541975458632,
"grad_norm": 0.652651846408844,
"learning_rate": 8e-05,
"loss": 1.6512,
"step": 1431
},
{
"epoch": 0.6959057222694691,
"grad_norm": 0.6891076564788818,
"learning_rate": 8e-05,
"loss": 1.7693,
"step": 1432
},
{
"epoch": 0.6963916899526181,
"grad_norm": 0.6644092798233032,
"learning_rate": 8e-05,
"loss": 1.6581,
"step": 1433
},
{
"epoch": 0.6968776576357673,
"grad_norm": 0.6464894413948059,
"learning_rate": 8e-05,
"loss": 1.6842,
"step": 1434
},
{
"epoch": 0.6973636253189163,
"grad_norm": 0.6486520767211914,
"learning_rate": 8e-05,
"loss": 1.6178,
"step": 1435
},
{
"epoch": 0.6978495930020654,
"grad_norm": 0.7069161534309387,
"learning_rate": 8e-05,
"loss": 1.7413,
"step": 1436
},
{
"epoch": 0.6983355606852144,
"grad_norm": 0.6747106313705444,
"learning_rate": 8e-05,
"loss": 1.7151,
"step": 1437
},
{
"epoch": 0.6988215283683635,
"grad_norm": 0.6933255195617676,
"learning_rate": 8e-05,
"loss": 1.6957,
"step": 1438
},
{
"epoch": 0.6993074960515125,
"grad_norm": 0.6827980875968933,
"learning_rate": 8e-05,
"loss": 1.7849,
"step": 1439
},
{
"epoch": 0.6997934637346617,
"grad_norm": 0.6822814345359802,
"learning_rate": 8e-05,
"loss": 1.6494,
"step": 1440
},
{
"epoch": 0.7002794314178107,
"grad_norm": 0.6557889580726624,
"learning_rate": 8e-05,
"loss": 1.735,
"step": 1441
},
{
"epoch": 0.7007653991009598,
"grad_norm": 0.667428731918335,
"learning_rate": 8e-05,
"loss": 1.6354,
"step": 1442
},
{
"epoch": 0.7012513667841088,
"grad_norm": 0.6678896546363831,
"learning_rate": 8e-05,
"loss": 1.6234,
"step": 1443
},
{
"epoch": 0.701737334467258,
"grad_norm": 0.7056771516799927,
"learning_rate": 8e-05,
"loss": 1.628,
"step": 1444
},
{
"epoch": 0.702223302150407,
"grad_norm": 0.736592710018158,
"learning_rate": 8e-05,
"loss": 1.8718,
"step": 1445
},
{
"epoch": 0.7027092698335561,
"grad_norm": 0.6782841086387634,
"learning_rate": 8e-05,
"loss": 1.7248,
"step": 1446
},
{
"epoch": 0.7031952375167051,
"grad_norm": 0.6662434935569763,
"learning_rate": 8e-05,
"loss": 1.64,
"step": 1447
},
{
"epoch": 0.7036812051998542,
"grad_norm": 0.6652860641479492,
"learning_rate": 8e-05,
"loss": 1.7572,
"step": 1448
},
{
"epoch": 0.7041671728830032,
"grad_norm": 0.6745628714561462,
"learning_rate": 8e-05,
"loss": 1.6307,
"step": 1449
},
{
"epoch": 0.7046531405661524,
"grad_norm": 0.6709152460098267,
"learning_rate": 8e-05,
"loss": 1.6657,
"step": 1450
},
{
"epoch": 0.7051391082493014,
"grad_norm": 0.6735470294952393,
"learning_rate": 8e-05,
"loss": 1.6553,
"step": 1451
},
{
"epoch": 0.7056250759324505,
"grad_norm": 0.6663253307342529,
"learning_rate": 8e-05,
"loss": 1.7405,
"step": 1452
},
{
"epoch": 0.7061110436155995,
"grad_norm": 0.6561141014099121,
"learning_rate": 8e-05,
"loss": 1.6806,
"step": 1453
},
{
"epoch": 0.7065970112987486,
"grad_norm": 0.6699032187461853,
"learning_rate": 8e-05,
"loss": 1.7242,
"step": 1454
},
{
"epoch": 0.7070829789818978,
"grad_norm": 0.691829264163971,
"learning_rate": 8e-05,
"loss": 1.8473,
"step": 1455
},
{
"epoch": 0.7075689466650468,
"grad_norm": 0.6805023550987244,
"learning_rate": 8e-05,
"loss": 1.7381,
"step": 1456
},
{
"epoch": 0.7080549143481959,
"grad_norm": 0.6992806792259216,
"learning_rate": 8e-05,
"loss": 1.6336,
"step": 1457
},
{
"epoch": 0.7085408820313449,
"grad_norm": 0.7170582413673401,
"learning_rate": 8e-05,
"loss": 1.7041,
"step": 1458
},
{
"epoch": 0.709026849714494,
"grad_norm": 0.6936716437339783,
"learning_rate": 8e-05,
"loss": 1.7362,
"step": 1459
},
{
"epoch": 0.709512817397643,
"grad_norm": 0.7304790616035461,
"learning_rate": 8e-05,
"loss": 1.723,
"step": 1460
},
{
"epoch": 0.7099987850807922,
"grad_norm": 0.6815493106842041,
"learning_rate": 8e-05,
"loss": 1.6387,
"step": 1461
},
{
"epoch": 0.7104847527639412,
"grad_norm": 0.7028688788414001,
"learning_rate": 8e-05,
"loss": 1.7329,
"step": 1462
},
{
"epoch": 0.7109707204470903,
"grad_norm": 0.6570793986320496,
"learning_rate": 8e-05,
"loss": 1.6309,
"step": 1463
},
{
"epoch": 0.7114566881302393,
"grad_norm": 0.6735349893569946,
"learning_rate": 8e-05,
"loss": 1.7854,
"step": 1464
},
{
"epoch": 0.7119426558133884,
"grad_norm": 0.6720329523086548,
"learning_rate": 8e-05,
"loss": 1.6121,
"step": 1465
},
{
"epoch": 0.7124286234965375,
"grad_norm": 0.6715527176856995,
"learning_rate": 8e-05,
"loss": 1.6782,
"step": 1466
},
{
"epoch": 0.7129145911796866,
"grad_norm": 0.686566948890686,
"learning_rate": 8e-05,
"loss": 1.6949,
"step": 1467
},
{
"epoch": 0.7134005588628356,
"grad_norm": 0.7121155858039856,
"learning_rate": 8e-05,
"loss": 1.6766,
"step": 1468
},
{
"epoch": 0.7138865265459847,
"grad_norm": 0.748831570148468,
"learning_rate": 8e-05,
"loss": 1.7596,
"step": 1469
},
{
"epoch": 0.7143724942291337,
"grad_norm": 0.6564344167709351,
"learning_rate": 8e-05,
"loss": 1.6862,
"step": 1470
},
{
"epoch": 0.7148584619122829,
"grad_norm": 0.6715706586837769,
"learning_rate": 8e-05,
"loss": 1.7565,
"step": 1471
},
{
"epoch": 0.7153444295954319,
"grad_norm": 0.642023503780365,
"learning_rate": 8e-05,
"loss": 1.6628,
"step": 1472
},
{
"epoch": 0.715830397278581,
"grad_norm": 0.6556823253631592,
"learning_rate": 8e-05,
"loss": 1.6583,
"step": 1473
},
{
"epoch": 0.71631636496173,
"grad_norm": 0.6798869371414185,
"learning_rate": 8e-05,
"loss": 1.6664,
"step": 1474
},
{
"epoch": 0.7168023326448791,
"grad_norm": 0.6593642830848694,
"learning_rate": 8e-05,
"loss": 1.6915,
"step": 1475
},
{
"epoch": 0.7172883003280281,
"grad_norm": 0.6747823357582092,
"learning_rate": 8e-05,
"loss": 1.6472,
"step": 1476
},
{
"epoch": 0.7177742680111773,
"grad_norm": 0.6821336150169373,
"learning_rate": 8e-05,
"loss": 1.7324,
"step": 1477
},
{
"epoch": 0.7182602356943263,
"grad_norm": 0.6804844737052917,
"learning_rate": 8e-05,
"loss": 1.7345,
"step": 1478
},
{
"epoch": 0.7187462033774754,
"grad_norm": 0.6529686450958252,
"learning_rate": 8e-05,
"loss": 1.6022,
"step": 1479
},
{
"epoch": 0.7192321710606244,
"grad_norm": 0.6944935321807861,
"learning_rate": 8e-05,
"loss": 1.7574,
"step": 1480
},
{
"epoch": 0.7197181387437736,
"grad_norm": 0.6925895810127258,
"learning_rate": 8e-05,
"loss": 1.6824,
"step": 1481
},
{
"epoch": 0.7202041064269226,
"grad_norm": 0.6988723874092102,
"learning_rate": 8e-05,
"loss": 1.8013,
"step": 1482
},
{
"epoch": 0.7206900741100717,
"grad_norm": 0.6638310551643372,
"learning_rate": 8e-05,
"loss": 1.7021,
"step": 1483
},
{
"epoch": 0.7211760417932207,
"grad_norm": 0.6527820229530334,
"learning_rate": 8e-05,
"loss": 1.7442,
"step": 1484
},
{
"epoch": 0.7216620094763698,
"grad_norm": 0.6710399389266968,
"learning_rate": 8e-05,
"loss": 1.8107,
"step": 1485
},
{
"epoch": 0.7221479771595188,
"grad_norm": 0.6887544989585876,
"learning_rate": 8e-05,
"loss": 1.6263,
"step": 1486
},
{
"epoch": 0.722633944842668,
"grad_norm": 0.649701714515686,
"learning_rate": 8e-05,
"loss": 1.646,
"step": 1487
},
{
"epoch": 0.7231199125258171,
"grad_norm": 0.7085797786712646,
"learning_rate": 8e-05,
"loss": 1.7884,
"step": 1488
},
{
"epoch": 0.7236058802089661,
"grad_norm": 0.6887712478637695,
"learning_rate": 8e-05,
"loss": 1.7511,
"step": 1489
},
{
"epoch": 0.7240918478921152,
"grad_norm": 0.6780201196670532,
"learning_rate": 8e-05,
"loss": 1.6513,
"step": 1490
},
{
"epoch": 0.7245778155752642,
"grad_norm": 0.6555797457695007,
"learning_rate": 8e-05,
"loss": 1.686,
"step": 1491
},
{
"epoch": 0.7250637832584134,
"grad_norm": 0.693983793258667,
"learning_rate": 8e-05,
"loss": 1.684,
"step": 1492
},
{
"epoch": 0.7255497509415624,
"grad_norm": 0.6978248357772827,
"learning_rate": 8e-05,
"loss": 1.7647,
"step": 1493
},
{
"epoch": 0.7260357186247115,
"grad_norm": 0.6634182929992676,
"learning_rate": 8e-05,
"loss": 1.6457,
"step": 1494
},
{
"epoch": 0.7265216863078605,
"grad_norm": 0.687709629535675,
"learning_rate": 8e-05,
"loss": 1.7756,
"step": 1495
},
{
"epoch": 0.7270076539910096,
"grad_norm": 0.706290066242218,
"learning_rate": 8e-05,
"loss": 1.7554,
"step": 1496
},
{
"epoch": 0.7274936216741587,
"grad_norm": 0.6736918687820435,
"learning_rate": 8e-05,
"loss": 1.7314,
"step": 1497
},
{
"epoch": 0.7279795893573078,
"grad_norm": 0.6696215271949768,
"learning_rate": 8e-05,
"loss": 1.6678,
"step": 1498
},
{
"epoch": 0.7284655570404568,
"grad_norm": 0.6545590758323669,
"learning_rate": 8e-05,
"loss": 1.7346,
"step": 1499
},
{
"epoch": 0.7289515247236059,
"grad_norm": 0.6590006947517395,
"learning_rate": 8e-05,
"loss": 1.6795,
"step": 1500
},
{
"epoch": 0.7294374924067549,
"grad_norm": 0.7147099375724792,
"learning_rate": 8e-05,
"loss": 1.6581,
"step": 1501
},
{
"epoch": 0.729923460089904,
"grad_norm": 0.6545288562774658,
"learning_rate": 8e-05,
"loss": 1.6212,
"step": 1502
},
{
"epoch": 0.7304094277730531,
"grad_norm": 0.6668671369552612,
"learning_rate": 8e-05,
"loss": 1.6786,
"step": 1503
},
{
"epoch": 0.7308953954562022,
"grad_norm": 0.7035940885543823,
"learning_rate": 8e-05,
"loss": 1.7651,
"step": 1504
},
{
"epoch": 0.7313813631393512,
"grad_norm": 0.6678001880645752,
"learning_rate": 8e-05,
"loss": 1.6795,
"step": 1505
},
{
"epoch": 0.7318673308225003,
"grad_norm": 0.6693470478057861,
"learning_rate": 8e-05,
"loss": 1.6943,
"step": 1506
},
{
"epoch": 0.7323532985056493,
"grad_norm": 0.6441562175750732,
"learning_rate": 8e-05,
"loss": 1.6907,
"step": 1507
},
{
"epoch": 0.7328392661887985,
"grad_norm": 0.6622614860534668,
"learning_rate": 8e-05,
"loss": 1.6828,
"step": 1508
},
{
"epoch": 0.7333252338719475,
"grad_norm": 0.6824374198913574,
"learning_rate": 8e-05,
"loss": 1.7827,
"step": 1509
},
{
"epoch": 0.7338112015550966,
"grad_norm": 0.6756101250648499,
"learning_rate": 8e-05,
"loss": 1.6895,
"step": 1510
},
{
"epoch": 0.7342971692382456,
"grad_norm": 0.6672031879425049,
"learning_rate": 8e-05,
"loss": 1.6734,
"step": 1511
},
{
"epoch": 0.7347831369213947,
"grad_norm": 0.6641438603401184,
"learning_rate": 8e-05,
"loss": 1.5267,
"step": 1512
},
{
"epoch": 0.7352691046045438,
"grad_norm": 0.703173816204071,
"learning_rate": 8e-05,
"loss": 1.5887,
"step": 1513
},
{
"epoch": 0.7357550722876929,
"grad_norm": 0.6660346984863281,
"learning_rate": 8e-05,
"loss": 1.6545,
"step": 1514
},
{
"epoch": 0.7362410399708419,
"grad_norm": 0.7232402563095093,
"learning_rate": 8e-05,
"loss": 1.6503,
"step": 1515
},
{
"epoch": 0.736727007653991,
"grad_norm": 0.6885964870452881,
"learning_rate": 8e-05,
"loss": 1.6871,
"step": 1516
},
{
"epoch": 0.73721297533714,
"grad_norm": 0.6883174180984497,
"learning_rate": 8e-05,
"loss": 1.5044,
"step": 1517
},
{
"epoch": 0.7376989430202892,
"grad_norm": 0.658319354057312,
"learning_rate": 8e-05,
"loss": 1.6292,
"step": 1518
},
{
"epoch": 0.7381849107034382,
"grad_norm": 0.7118297815322876,
"learning_rate": 8e-05,
"loss": 1.671,
"step": 1519
},
{
"epoch": 0.7386708783865873,
"grad_norm": 0.6947025060653687,
"learning_rate": 8e-05,
"loss": 1.74,
"step": 1520
},
{
"epoch": 0.7391568460697364,
"grad_norm": 0.7174086570739746,
"learning_rate": 8e-05,
"loss": 1.7048,
"step": 1521
},
{
"epoch": 0.7396428137528854,
"grad_norm": 0.6907267570495605,
"learning_rate": 8e-05,
"loss": 1.7461,
"step": 1522
},
{
"epoch": 0.7401287814360346,
"grad_norm": 0.6679226756095886,
"learning_rate": 8e-05,
"loss": 1.7199,
"step": 1523
},
{
"epoch": 0.7406147491191836,
"grad_norm": 0.6862102746963501,
"learning_rate": 8e-05,
"loss": 1.6956,
"step": 1524
},
{
"epoch": 0.7411007168023327,
"grad_norm": 0.6751832962036133,
"learning_rate": 8e-05,
"loss": 1.7725,
"step": 1525
},
{
"epoch": 0.7415866844854817,
"grad_norm": 0.6852265000343323,
"learning_rate": 8e-05,
"loss": 1.7096,
"step": 1526
},
{
"epoch": 0.7420726521686308,
"grad_norm": 0.6521369218826294,
"learning_rate": 8e-05,
"loss": 1.6175,
"step": 1527
},
{
"epoch": 0.7425586198517798,
"grad_norm": 0.7039942145347595,
"learning_rate": 8e-05,
"loss": 1.7506,
"step": 1528
},
{
"epoch": 0.743044587534929,
"grad_norm": 0.6812547445297241,
"learning_rate": 8e-05,
"loss": 1.7117,
"step": 1529
},
{
"epoch": 0.743530555218078,
"grad_norm": 0.6890732645988464,
"learning_rate": 8e-05,
"loss": 1.6695,
"step": 1530
},
{
"epoch": 0.7440165229012271,
"grad_norm": 0.7119623422622681,
"learning_rate": 8e-05,
"loss": 1.6338,
"step": 1531
},
{
"epoch": 0.7445024905843761,
"grad_norm": 0.6888270378112793,
"learning_rate": 8e-05,
"loss": 1.7151,
"step": 1532
},
{
"epoch": 0.7449884582675252,
"grad_norm": 0.7158077955245972,
"learning_rate": 8e-05,
"loss": 1.7661,
"step": 1533
},
{
"epoch": 0.7454744259506743,
"grad_norm": 0.6932821273803711,
"learning_rate": 8e-05,
"loss": 1.7125,
"step": 1534
},
{
"epoch": 0.7459603936338234,
"grad_norm": 0.7240128517150879,
"learning_rate": 8e-05,
"loss": 1.7822,
"step": 1535
},
{
"epoch": 0.7464463613169724,
"grad_norm": 0.6781498193740845,
"learning_rate": 8e-05,
"loss": 1.6673,
"step": 1536
},
{
"epoch": 0.7469323290001215,
"grad_norm": 0.6755689382553101,
"learning_rate": 8e-05,
"loss": 1.7673,
"step": 1537
},
{
"epoch": 0.7474182966832705,
"grad_norm": 0.6547968983650208,
"learning_rate": 8e-05,
"loss": 1.6665,
"step": 1538
},
{
"epoch": 0.7479042643664197,
"grad_norm": 0.7272111773490906,
"learning_rate": 8e-05,
"loss": 1.6823,
"step": 1539
},
{
"epoch": 0.7483902320495687,
"grad_norm": 0.663520872592926,
"learning_rate": 8e-05,
"loss": 1.6619,
"step": 1540
},
{
"epoch": 0.7488761997327178,
"grad_norm": 0.7074699997901917,
"learning_rate": 8e-05,
"loss": 1.6971,
"step": 1541
},
{
"epoch": 0.7493621674158668,
"grad_norm": 0.6534743905067444,
"learning_rate": 8e-05,
"loss": 1.697,
"step": 1542
},
{
"epoch": 0.7498481350990159,
"grad_norm": 0.6739033460617065,
"learning_rate": 8e-05,
"loss": 1.7456,
"step": 1543
},
{
"epoch": 0.750334102782165,
"grad_norm": 0.6841751337051392,
"learning_rate": 8e-05,
"loss": 1.7071,
"step": 1544
},
{
"epoch": 0.7508200704653141,
"grad_norm": 0.6846221089363098,
"learning_rate": 8e-05,
"loss": 1.6859,
"step": 1545
},
{
"epoch": 0.7513060381484631,
"grad_norm": 0.6791633367538452,
"learning_rate": 8e-05,
"loss": 1.701,
"step": 1546
},
{
"epoch": 0.7517920058316122,
"grad_norm": 0.6597045063972473,
"learning_rate": 8e-05,
"loss": 1.6801,
"step": 1547
},
{
"epoch": 0.7522779735147612,
"grad_norm": 0.665151059627533,
"learning_rate": 8e-05,
"loss": 1.6949,
"step": 1548
},
{
"epoch": 0.7527639411979103,
"grad_norm": 0.6514449119567871,
"learning_rate": 8e-05,
"loss": 1.7199,
"step": 1549
},
{
"epoch": 0.7532499088810594,
"grad_norm": 0.6700636148452759,
"learning_rate": 8e-05,
"loss": 1.6602,
"step": 1550
},
{
"epoch": 0.7537358765642085,
"grad_norm": 0.6723707318305969,
"learning_rate": 8e-05,
"loss": 1.7314,
"step": 1551
},
{
"epoch": 0.7542218442473575,
"grad_norm": 0.6736507415771484,
"learning_rate": 8e-05,
"loss": 1.6586,
"step": 1552
},
{
"epoch": 0.7547078119305066,
"grad_norm": 0.6550686955451965,
"learning_rate": 8e-05,
"loss": 1.5756,
"step": 1553
},
{
"epoch": 0.7551937796136557,
"grad_norm": 0.6830410361289978,
"learning_rate": 8e-05,
"loss": 1.7073,
"step": 1554
},
{
"epoch": 0.7556797472968048,
"grad_norm": 0.6556110382080078,
"learning_rate": 8e-05,
"loss": 1.6739,
"step": 1555
},
{
"epoch": 0.7561657149799539,
"grad_norm": 0.6706458330154419,
"learning_rate": 8e-05,
"loss": 1.7234,
"step": 1556
},
{
"epoch": 0.7566516826631029,
"grad_norm": 0.6623361706733704,
"learning_rate": 8e-05,
"loss": 1.6544,
"step": 1557
},
{
"epoch": 0.757137650346252,
"grad_norm": 0.6876336336135864,
"learning_rate": 8e-05,
"loss": 1.7394,
"step": 1558
},
{
"epoch": 0.757623618029401,
"grad_norm": 0.6777063012123108,
"learning_rate": 8e-05,
"loss": 1.699,
"step": 1559
},
{
"epoch": 0.7581095857125502,
"grad_norm": 0.6903174519538879,
"learning_rate": 8e-05,
"loss": 1.7063,
"step": 1560
},
{
"epoch": 0.7585955533956992,
"grad_norm": 0.6869831681251526,
"learning_rate": 8e-05,
"loss": 1.7737,
"step": 1561
},
{
"epoch": 0.7590815210788483,
"grad_norm": 0.6692809462547302,
"learning_rate": 8e-05,
"loss": 1.7101,
"step": 1562
},
{
"epoch": 0.7595674887619973,
"grad_norm": 0.677603542804718,
"learning_rate": 8e-05,
"loss": 1.7068,
"step": 1563
},
{
"epoch": 0.7600534564451464,
"grad_norm": 0.680304765701294,
"learning_rate": 8e-05,
"loss": 1.7007,
"step": 1564
},
{
"epoch": 0.7605394241282954,
"grad_norm": 0.6710352301597595,
"learning_rate": 8e-05,
"loss": 1.6817,
"step": 1565
},
{
"epoch": 0.7610253918114446,
"grad_norm": 0.6891443133354187,
"learning_rate": 8e-05,
"loss": 1.628,
"step": 1566
},
{
"epoch": 0.7615113594945936,
"grad_norm": 0.6972365379333496,
"learning_rate": 8e-05,
"loss": 1.6964,
"step": 1567
},
{
"epoch": 0.7619973271777427,
"grad_norm": 0.6648043394088745,
"learning_rate": 8e-05,
"loss": 1.6418,
"step": 1568
},
{
"epoch": 0.7624832948608917,
"grad_norm": 0.6827974915504456,
"learning_rate": 8e-05,
"loss": 1.7091,
"step": 1569
},
{
"epoch": 0.7629692625440408,
"grad_norm": 0.6765845417976379,
"learning_rate": 8e-05,
"loss": 1.7379,
"step": 1570
},
{
"epoch": 0.7634552302271899,
"grad_norm": 0.6806741952896118,
"learning_rate": 8e-05,
"loss": 1.7331,
"step": 1571
},
{
"epoch": 0.763941197910339,
"grad_norm": 0.6496466994285583,
"learning_rate": 8e-05,
"loss": 1.7129,
"step": 1572
},
{
"epoch": 0.764427165593488,
"grad_norm": 0.6557418704032898,
"learning_rate": 8e-05,
"loss": 1.5704,
"step": 1573
},
{
"epoch": 0.7649131332766371,
"grad_norm": 0.6779592633247375,
"learning_rate": 8e-05,
"loss": 1.6916,
"step": 1574
},
{
"epoch": 0.7653991009597861,
"grad_norm": 0.6686397194862366,
"learning_rate": 8e-05,
"loss": 1.6843,
"step": 1575
},
{
"epoch": 0.7658850686429353,
"grad_norm": 0.6979429125785828,
"learning_rate": 8e-05,
"loss": 1.6683,
"step": 1576
},
{
"epoch": 0.7663710363260843,
"grad_norm": 0.6567559242248535,
"learning_rate": 8e-05,
"loss": 1.6165,
"step": 1577
},
{
"epoch": 0.7668570040092334,
"grad_norm": 0.6684688925743103,
"learning_rate": 8e-05,
"loss": 1.7502,
"step": 1578
},
{
"epoch": 0.7673429716923824,
"grad_norm": 0.6478898525238037,
"learning_rate": 8e-05,
"loss": 1.6816,
"step": 1579
},
{
"epoch": 0.7678289393755315,
"grad_norm": 0.6591485142707825,
"learning_rate": 8e-05,
"loss": 1.7777,
"step": 1580
},
{
"epoch": 0.7683149070586806,
"grad_norm": 0.6320804357528687,
"learning_rate": 8e-05,
"loss": 1.6093,
"step": 1581
},
{
"epoch": 0.7688008747418297,
"grad_norm": 0.6655343770980835,
"learning_rate": 8e-05,
"loss": 1.7313,
"step": 1582
},
{
"epoch": 0.7692868424249787,
"grad_norm": 0.6436070799827576,
"learning_rate": 8e-05,
"loss": 1.6085,
"step": 1583
},
{
"epoch": 0.7697728101081278,
"grad_norm": 0.6613366603851318,
"learning_rate": 8e-05,
"loss": 1.6363,
"step": 1584
},
{
"epoch": 0.7702587777912768,
"grad_norm": 0.7339720726013184,
"learning_rate": 8e-05,
"loss": 1.8011,
"step": 1585
},
{
"epoch": 0.770744745474426,
"grad_norm": 0.692807137966156,
"learning_rate": 8e-05,
"loss": 1.7141,
"step": 1586
},
{
"epoch": 0.7712307131575751,
"grad_norm": 0.7067819237709045,
"learning_rate": 8e-05,
"loss": 1.7612,
"step": 1587
},
{
"epoch": 0.7717166808407241,
"grad_norm": 0.6481447219848633,
"learning_rate": 8e-05,
"loss": 1.5679,
"step": 1588
},
{
"epoch": 0.7722026485238732,
"grad_norm": 0.6787363886833191,
"learning_rate": 8e-05,
"loss": 1.733,
"step": 1589
},
{
"epoch": 0.7726886162070222,
"grad_norm": 0.6830894351005554,
"learning_rate": 8e-05,
"loss": 1.6168,
"step": 1590
},
{
"epoch": 0.7731745838901714,
"grad_norm": 0.669689416885376,
"learning_rate": 8e-05,
"loss": 1.5582,
"step": 1591
},
{
"epoch": 0.7736605515733204,
"grad_norm": 0.6559072136878967,
"learning_rate": 8e-05,
"loss": 1.6642,
"step": 1592
},
{
"epoch": 0.7741465192564695,
"grad_norm": 0.7028058171272278,
"learning_rate": 8e-05,
"loss": 1.689,
"step": 1593
},
{
"epoch": 0.7746324869396185,
"grad_norm": 0.699228823184967,
"learning_rate": 8e-05,
"loss": 1.7172,
"step": 1594
},
{
"epoch": 0.7751184546227676,
"grad_norm": 0.6780297756195068,
"learning_rate": 8e-05,
"loss": 1.6654,
"step": 1595
},
{
"epoch": 0.7756044223059166,
"grad_norm": 0.6976983547210693,
"learning_rate": 8e-05,
"loss": 1.8002,
"step": 1596
},
{
"epoch": 0.7760903899890658,
"grad_norm": 0.6806316375732422,
"learning_rate": 8e-05,
"loss": 1.6447,
"step": 1597
},
{
"epoch": 0.7765763576722148,
"grad_norm": 0.7629841566085815,
"learning_rate": 8e-05,
"loss": 1.7308,
"step": 1598
},
{
"epoch": 0.7770623253553639,
"grad_norm": 0.6737505793571472,
"learning_rate": 8e-05,
"loss": 1.6911,
"step": 1599
},
{
"epoch": 0.7775482930385129,
"grad_norm": 0.6930803656578064,
"learning_rate": 8e-05,
"loss": 1.7309,
"step": 1600
},
{
"epoch": 0.778034260721662,
"grad_norm": 0.6944562196731567,
"learning_rate": 8e-05,
"loss": 1.6177,
"step": 1601
},
{
"epoch": 0.778520228404811,
"grad_norm": 0.641501784324646,
"learning_rate": 8e-05,
"loss": 1.5951,
"step": 1602
},
{
"epoch": 0.7790061960879602,
"grad_norm": 0.6454229950904846,
"learning_rate": 8e-05,
"loss": 1.6288,
"step": 1603
},
{
"epoch": 0.7794921637711092,
"grad_norm": 0.6681306958198547,
"learning_rate": 8e-05,
"loss": 1.5893,
"step": 1604
},
{
"epoch": 0.7799781314542583,
"grad_norm": 0.679373562335968,
"learning_rate": 8e-05,
"loss": 1.782,
"step": 1605
},
{
"epoch": 0.7804640991374073,
"grad_norm": 0.6566761136054993,
"learning_rate": 8e-05,
"loss": 1.6877,
"step": 1606
},
{
"epoch": 0.7809500668205565,
"grad_norm": 0.6666831970214844,
"learning_rate": 8e-05,
"loss": 1.7413,
"step": 1607
},
{
"epoch": 0.7814360345037055,
"grad_norm": 0.6732777953147888,
"learning_rate": 8e-05,
"loss": 1.6067,
"step": 1608
},
{
"epoch": 0.7819220021868546,
"grad_norm": 0.6751415133476257,
"learning_rate": 8e-05,
"loss": 1.5491,
"step": 1609
},
{
"epoch": 0.7824079698700036,
"grad_norm": 0.6782683730125427,
"learning_rate": 8e-05,
"loss": 1.7391,
"step": 1610
},
{
"epoch": 0.7828939375531527,
"grad_norm": 0.6788360476493835,
"learning_rate": 8e-05,
"loss": 1.7203,
"step": 1611
},
{
"epoch": 0.7833799052363017,
"grad_norm": 0.7067826986312866,
"learning_rate": 8e-05,
"loss": 1.7645,
"step": 1612
},
{
"epoch": 0.7838658729194509,
"grad_norm": 0.6764886379241943,
"learning_rate": 8e-05,
"loss": 1.5745,
"step": 1613
},
{
"epoch": 0.7843518406025999,
"grad_norm": 0.6709634065628052,
"learning_rate": 8e-05,
"loss": 1.6611,
"step": 1614
},
{
"epoch": 0.784837808285749,
"grad_norm": 0.6712684035301208,
"learning_rate": 8e-05,
"loss": 1.6091,
"step": 1615
},
{
"epoch": 0.785323775968898,
"grad_norm": 0.6782966256141663,
"learning_rate": 8e-05,
"loss": 1.763,
"step": 1616
},
{
"epoch": 0.7858097436520471,
"grad_norm": 0.696746289730072,
"learning_rate": 8e-05,
"loss": 1.6555,
"step": 1617
},
{
"epoch": 0.7862957113351962,
"grad_norm": 0.6854478120803833,
"learning_rate": 8e-05,
"loss": 1.6408,
"step": 1618
},
{
"epoch": 0.7867816790183453,
"grad_norm": 0.6995331645011902,
"learning_rate": 8e-05,
"loss": 1.8078,
"step": 1619
},
{
"epoch": 0.7872676467014944,
"grad_norm": 0.6669647693634033,
"learning_rate": 8e-05,
"loss": 1.7104,
"step": 1620
},
{
"epoch": 0.7877536143846434,
"grad_norm": 0.6792809963226318,
"learning_rate": 8e-05,
"loss": 1.5991,
"step": 1621
},
{
"epoch": 0.7882395820677925,
"grad_norm": 0.6517792344093323,
"learning_rate": 8e-05,
"loss": 1.5157,
"step": 1622
},
{
"epoch": 0.7887255497509416,
"grad_norm": 0.6973136067390442,
"learning_rate": 8e-05,
"loss": 1.6118,
"step": 1623
},
{
"epoch": 0.7892115174340907,
"grad_norm": 0.6901780962944031,
"learning_rate": 8e-05,
"loss": 1.6117,
"step": 1624
},
{
"epoch": 0.7896974851172397,
"grad_norm": 0.6688944697380066,
"learning_rate": 8e-05,
"loss": 1.6668,
"step": 1625
},
{
"epoch": 0.7901834528003888,
"grad_norm": 0.6714779138565063,
"learning_rate": 8e-05,
"loss": 1.6393,
"step": 1626
},
{
"epoch": 0.7906694204835378,
"grad_norm": 0.6646644473075867,
"learning_rate": 8e-05,
"loss": 1.7112,
"step": 1627
},
{
"epoch": 0.791155388166687,
"grad_norm": 0.7120575308799744,
"learning_rate": 8e-05,
"loss": 1.7342,
"step": 1628
},
{
"epoch": 0.791641355849836,
"grad_norm": 0.6747168898582458,
"learning_rate": 8e-05,
"loss": 1.7522,
"step": 1629
},
{
"epoch": 0.7921273235329851,
"grad_norm": 0.6929645538330078,
"learning_rate": 8e-05,
"loss": 1.649,
"step": 1630
},
{
"epoch": 0.7926132912161341,
"grad_norm": 0.6746399998664856,
"learning_rate": 8e-05,
"loss": 1.7183,
"step": 1631
},
{
"epoch": 0.7930992588992832,
"grad_norm": 0.7156386971473694,
"learning_rate": 8e-05,
"loss": 1.7142,
"step": 1632
},
{
"epoch": 0.7935852265824322,
"grad_norm": 0.674637496471405,
"learning_rate": 8e-05,
"loss": 1.6821,
"step": 1633
},
{
"epoch": 0.7940711942655814,
"grad_norm": 0.6594585180282593,
"learning_rate": 8e-05,
"loss": 1.6845,
"step": 1634
},
{
"epoch": 0.7945571619487304,
"grad_norm": 0.7104175686836243,
"learning_rate": 8e-05,
"loss": 1.6733,
"step": 1635
},
{
"epoch": 0.7950431296318795,
"grad_norm": 0.6906667947769165,
"learning_rate": 8e-05,
"loss": 1.7845,
"step": 1636
},
{
"epoch": 0.7955290973150285,
"grad_norm": 0.6826514601707458,
"learning_rate": 8e-05,
"loss": 1.7153,
"step": 1637
},
{
"epoch": 0.7960150649981776,
"grad_norm": 0.6801000833511353,
"learning_rate": 8e-05,
"loss": 1.6609,
"step": 1638
},
{
"epoch": 0.7965010326813267,
"grad_norm": 0.6828523278236389,
"learning_rate": 8e-05,
"loss": 1.669,
"step": 1639
},
{
"epoch": 0.7969870003644758,
"grad_norm": 0.664400577545166,
"learning_rate": 8e-05,
"loss": 1.6679,
"step": 1640
},
{
"epoch": 0.7974729680476248,
"grad_norm": 0.6488589644432068,
"learning_rate": 8e-05,
"loss": 1.6639,
"step": 1641
},
{
"epoch": 0.7979589357307739,
"grad_norm": 0.6523492932319641,
"learning_rate": 8e-05,
"loss": 1.6958,
"step": 1642
},
{
"epoch": 0.7984449034139229,
"grad_norm": 0.6777427196502686,
"learning_rate": 8e-05,
"loss": 1.6381,
"step": 1643
},
{
"epoch": 0.7989308710970721,
"grad_norm": 0.6841310858726501,
"learning_rate": 8e-05,
"loss": 1.7176,
"step": 1644
},
{
"epoch": 0.7994168387802211,
"grad_norm": 0.7123305797576904,
"learning_rate": 8e-05,
"loss": 1.7731,
"step": 1645
},
{
"epoch": 0.7999028064633702,
"grad_norm": 0.6457357406616211,
"learning_rate": 8e-05,
"loss": 1.6669,
"step": 1646
},
{
"epoch": 0.8003887741465192,
"grad_norm": 0.6546925902366638,
"learning_rate": 8e-05,
"loss": 1.6406,
"step": 1647
},
{
"epoch": 0.8008747418296683,
"grad_norm": 0.6729357838630676,
"learning_rate": 8e-05,
"loss": 1.6421,
"step": 1648
},
{
"epoch": 0.8013607095128173,
"grad_norm": 0.6574150323867798,
"learning_rate": 8e-05,
"loss": 1.7145,
"step": 1649
},
{
"epoch": 0.8018466771959665,
"grad_norm": 0.6656662821769714,
"learning_rate": 8e-05,
"loss": 1.6956,
"step": 1650
},
{
"epoch": 0.8023326448791155,
"grad_norm": 0.6765982508659363,
"learning_rate": 8e-05,
"loss": 1.6938,
"step": 1651
},
{
"epoch": 0.8028186125622646,
"grad_norm": 0.6516674160957336,
"learning_rate": 8e-05,
"loss": 1.5509,
"step": 1652
},
{
"epoch": 0.8033045802454137,
"grad_norm": 0.6466244459152222,
"learning_rate": 8e-05,
"loss": 1.6498,
"step": 1653
},
{
"epoch": 0.8037905479285627,
"grad_norm": 0.6649731397628784,
"learning_rate": 8e-05,
"loss": 1.6592,
"step": 1654
},
{
"epoch": 0.8042765156117119,
"grad_norm": 0.6725707054138184,
"learning_rate": 8e-05,
"loss": 1.6602,
"step": 1655
},
{
"epoch": 0.8047624832948609,
"grad_norm": 0.652783215045929,
"learning_rate": 8e-05,
"loss": 1.6981,
"step": 1656
},
{
"epoch": 0.80524845097801,
"grad_norm": 0.6978988647460938,
"learning_rate": 8e-05,
"loss": 1.7201,
"step": 1657
},
{
"epoch": 0.805734418661159,
"grad_norm": 0.6671270728111267,
"learning_rate": 8e-05,
"loss": 1.6079,
"step": 1658
},
{
"epoch": 0.8062203863443081,
"grad_norm": 0.6724951267242432,
"learning_rate": 8e-05,
"loss": 1.7342,
"step": 1659
},
{
"epoch": 0.8067063540274572,
"grad_norm": 0.6563026905059814,
"learning_rate": 8e-05,
"loss": 1.6017,
"step": 1660
},
{
"epoch": 0.8071923217106063,
"grad_norm": 0.6624600291252136,
"learning_rate": 8e-05,
"loss": 1.7211,
"step": 1661
},
{
"epoch": 0.8076782893937553,
"grad_norm": 0.6483842730522156,
"learning_rate": 8e-05,
"loss": 1.6784,
"step": 1662
},
{
"epoch": 0.8081642570769044,
"grad_norm": 0.6401728987693787,
"learning_rate": 8e-05,
"loss": 1.7595,
"step": 1663
},
{
"epoch": 0.8086502247600534,
"grad_norm": 0.6619211435317993,
"learning_rate": 8e-05,
"loss": 1.6146,
"step": 1664
},
{
"epoch": 0.8091361924432026,
"grad_norm": 0.6928635835647583,
"learning_rate": 8e-05,
"loss": 1.7063,
"step": 1665
},
{
"epoch": 0.8096221601263516,
"grad_norm": 0.683233916759491,
"learning_rate": 8e-05,
"loss": 1.7526,
"step": 1666
},
{
"epoch": 0.8101081278095007,
"grad_norm": 0.679440438747406,
"learning_rate": 8e-05,
"loss": 1.633,
"step": 1667
},
{
"epoch": 0.8105940954926497,
"grad_norm": 0.6649330854415894,
"learning_rate": 8e-05,
"loss": 1.6395,
"step": 1668
},
{
"epoch": 0.8110800631757988,
"grad_norm": 0.6575503349304199,
"learning_rate": 8e-05,
"loss": 1.6441,
"step": 1669
},
{
"epoch": 0.8115660308589479,
"grad_norm": 0.6423405408859253,
"learning_rate": 8e-05,
"loss": 1.6277,
"step": 1670
},
{
"epoch": 0.812051998542097,
"grad_norm": 0.6560837030410767,
"learning_rate": 8e-05,
"loss": 1.645,
"step": 1671
},
{
"epoch": 0.812537966225246,
"grad_norm": 0.6727367639541626,
"learning_rate": 8e-05,
"loss": 1.7722,
"step": 1672
},
{
"epoch": 0.8130239339083951,
"grad_norm": 0.6503103971481323,
"learning_rate": 8e-05,
"loss": 1.7444,
"step": 1673
},
{
"epoch": 0.8135099015915441,
"grad_norm": 0.6658689975738525,
"learning_rate": 8e-05,
"loss": 1.6827,
"step": 1674
},
{
"epoch": 0.8139958692746933,
"grad_norm": 0.6826659440994263,
"learning_rate": 8e-05,
"loss": 1.7027,
"step": 1675
},
{
"epoch": 0.8144818369578423,
"grad_norm": 0.6767047643661499,
"learning_rate": 8e-05,
"loss": 1.6451,
"step": 1676
},
{
"epoch": 0.8149678046409914,
"grad_norm": 0.6819015145301819,
"learning_rate": 8e-05,
"loss": 1.6973,
"step": 1677
},
{
"epoch": 0.8154537723241404,
"grad_norm": 0.7023846507072449,
"learning_rate": 8e-05,
"loss": 1.6463,
"step": 1678
},
{
"epoch": 0.8159397400072895,
"grad_norm": 0.65855872631073,
"learning_rate": 8e-05,
"loss": 1.6196,
"step": 1679
},
{
"epoch": 0.8164257076904385,
"grad_norm": 0.6517550349235535,
"learning_rate": 8e-05,
"loss": 1.6227,
"step": 1680
},
{
"epoch": 0.8169116753735877,
"grad_norm": 0.6774021983146667,
"learning_rate": 8e-05,
"loss": 1.691,
"step": 1681
},
{
"epoch": 0.8173976430567367,
"grad_norm": 0.6862616539001465,
"learning_rate": 8e-05,
"loss": 1.6686,
"step": 1682
},
{
"epoch": 0.8178836107398858,
"grad_norm": 0.674959659576416,
"learning_rate": 8e-05,
"loss": 1.6293,
"step": 1683
},
{
"epoch": 0.8183695784230348,
"grad_norm": 0.6573413014411926,
"learning_rate": 8e-05,
"loss": 1.6629,
"step": 1684
},
{
"epoch": 0.8188555461061839,
"grad_norm": 0.6346238255500793,
"learning_rate": 8e-05,
"loss": 1.5621,
"step": 1685
},
{
"epoch": 0.8193415137893331,
"grad_norm": 0.71464604139328,
"learning_rate": 8e-05,
"loss": 1.7703,
"step": 1686
},
{
"epoch": 0.8198274814724821,
"grad_norm": 0.6695051193237305,
"learning_rate": 8e-05,
"loss": 1.7363,
"step": 1687
},
{
"epoch": 0.8203134491556312,
"grad_norm": 0.6487131714820862,
"learning_rate": 8e-05,
"loss": 1.5845,
"step": 1688
},
{
"epoch": 0.8207994168387802,
"grad_norm": 0.7074949741363525,
"learning_rate": 8e-05,
"loss": 1.6874,
"step": 1689
},
{
"epoch": 0.8212853845219293,
"grad_norm": 0.6579431295394897,
"learning_rate": 8e-05,
"loss": 1.654,
"step": 1690
},
{
"epoch": 0.8217713522050784,
"grad_norm": 0.6742346286773682,
"learning_rate": 8e-05,
"loss": 1.6576,
"step": 1691
},
{
"epoch": 0.8222573198882275,
"grad_norm": 0.6613596081733704,
"learning_rate": 8e-05,
"loss": 1.7023,
"step": 1692
},
{
"epoch": 0.8227432875713765,
"grad_norm": 0.7009580731391907,
"learning_rate": 8e-05,
"loss": 1.7069,
"step": 1693
},
{
"epoch": 0.8232292552545256,
"grad_norm": 0.6702130436897278,
"learning_rate": 8e-05,
"loss": 1.6375,
"step": 1694
},
{
"epoch": 0.8237152229376746,
"grad_norm": 0.6954506039619446,
"learning_rate": 8e-05,
"loss": 1.6986,
"step": 1695
},
{
"epoch": 0.8242011906208238,
"grad_norm": 0.6572551727294922,
"learning_rate": 8e-05,
"loss": 1.6645,
"step": 1696
},
{
"epoch": 0.8246871583039728,
"grad_norm": 0.705899178981781,
"learning_rate": 8e-05,
"loss": 1.7666,
"step": 1697
},
{
"epoch": 0.8251731259871219,
"grad_norm": 0.6812430024147034,
"learning_rate": 8e-05,
"loss": 1.6612,
"step": 1698
},
{
"epoch": 0.8256590936702709,
"grad_norm": 0.7121571898460388,
"learning_rate": 8e-05,
"loss": 1.6573,
"step": 1699
},
{
"epoch": 0.82614506135342,
"grad_norm": 0.678684413433075,
"learning_rate": 8e-05,
"loss": 1.6757,
"step": 1700
},
{
"epoch": 0.826631029036569,
"grad_norm": 0.6954318881034851,
"learning_rate": 8e-05,
"loss": 1.6626,
"step": 1701
},
{
"epoch": 0.8271169967197182,
"grad_norm": 0.6913411021232605,
"learning_rate": 8e-05,
"loss": 1.5989,
"step": 1702
},
{
"epoch": 0.8276029644028672,
"grad_norm": 0.7107827663421631,
"learning_rate": 8e-05,
"loss": 1.6652,
"step": 1703
},
{
"epoch": 0.8280889320860163,
"grad_norm": 0.6907193660736084,
"learning_rate": 8e-05,
"loss": 1.5557,
"step": 1704
},
{
"epoch": 0.8285748997691653,
"grad_norm": 0.6549775004386902,
"learning_rate": 8e-05,
"loss": 1.7031,
"step": 1705
},
{
"epoch": 0.8290608674523144,
"grad_norm": 0.7105585336685181,
"learning_rate": 8e-05,
"loss": 1.7689,
"step": 1706
},
{
"epoch": 0.8295468351354635,
"grad_norm": 0.6628891229629517,
"learning_rate": 8e-05,
"loss": 1.6635,
"step": 1707
},
{
"epoch": 0.8300328028186126,
"grad_norm": 0.6859626770019531,
"learning_rate": 8e-05,
"loss": 1.6045,
"step": 1708
},
{
"epoch": 0.8305187705017616,
"grad_norm": 0.6636143922805786,
"learning_rate": 8e-05,
"loss": 1.6099,
"step": 1709
},
{
"epoch": 0.8310047381849107,
"grad_norm": 0.6762420535087585,
"learning_rate": 8e-05,
"loss": 1.6716,
"step": 1710
},
{
"epoch": 0.8314907058680597,
"grad_norm": 0.6610211730003357,
"learning_rate": 8e-05,
"loss": 1.6556,
"step": 1711
},
{
"epoch": 0.8319766735512089,
"grad_norm": 0.6736442446708679,
"learning_rate": 8e-05,
"loss": 1.704,
"step": 1712
},
{
"epoch": 0.8324626412343579,
"grad_norm": 0.653008222579956,
"learning_rate": 8e-05,
"loss": 1.6496,
"step": 1713
},
{
"epoch": 0.832948608917507,
"grad_norm": 0.6692138910293579,
"learning_rate": 8e-05,
"loss": 1.6101,
"step": 1714
},
{
"epoch": 0.833434576600656,
"grad_norm": 0.6916235685348511,
"learning_rate": 8e-05,
"loss": 1.8379,
"step": 1715
},
{
"epoch": 0.8339205442838051,
"grad_norm": 0.7164344787597656,
"learning_rate": 8e-05,
"loss": 1.72,
"step": 1716
},
{
"epoch": 0.8344065119669541,
"grad_norm": 0.6661994457244873,
"learning_rate": 8e-05,
"loss": 1.6264,
"step": 1717
},
{
"epoch": 0.8348924796501033,
"grad_norm": 0.6573705673217773,
"learning_rate": 8e-05,
"loss": 1.5695,
"step": 1718
},
{
"epoch": 0.8353784473332524,
"grad_norm": 0.726266086101532,
"learning_rate": 8e-05,
"loss": 1.7715,
"step": 1719
},
{
"epoch": 0.8358644150164014,
"grad_norm": 0.6787089705467224,
"learning_rate": 8e-05,
"loss": 1.7132,
"step": 1720
},
{
"epoch": 0.8363503826995505,
"grad_norm": 0.6832931041717529,
"learning_rate": 8e-05,
"loss": 1.7205,
"step": 1721
},
{
"epoch": 0.8368363503826995,
"grad_norm": 0.694518506526947,
"learning_rate": 8e-05,
"loss": 1.7265,
"step": 1722
},
{
"epoch": 0.8373223180658487,
"grad_norm": 0.6822140216827393,
"learning_rate": 8e-05,
"loss": 1.5942,
"step": 1723
},
{
"epoch": 0.8378082857489977,
"grad_norm": 0.6725249886512756,
"learning_rate": 8e-05,
"loss": 1.6734,
"step": 1724
},
{
"epoch": 0.8382942534321468,
"grad_norm": 0.6663880348205566,
"learning_rate": 8e-05,
"loss": 1.6267,
"step": 1725
},
{
"epoch": 0.8387802211152958,
"grad_norm": 0.7269116640090942,
"learning_rate": 8e-05,
"loss": 1.7339,
"step": 1726
},
{
"epoch": 0.839266188798445,
"grad_norm": 0.6689306497573853,
"learning_rate": 8e-05,
"loss": 1.7258,
"step": 1727
},
{
"epoch": 0.839752156481594,
"grad_norm": 0.7352984547615051,
"learning_rate": 8e-05,
"loss": 1.7896,
"step": 1728
},
{
"epoch": 0.8402381241647431,
"grad_norm": 0.6966548562049866,
"learning_rate": 8e-05,
"loss": 1.7736,
"step": 1729
},
{
"epoch": 0.8407240918478921,
"grad_norm": 0.6895444393157959,
"learning_rate": 8e-05,
"loss": 1.6579,
"step": 1730
},
{
"epoch": 0.8412100595310412,
"grad_norm": 0.6928750872612,
"learning_rate": 8e-05,
"loss": 1.664,
"step": 1731
},
{
"epoch": 0.8416960272141902,
"grad_norm": 0.6821669936180115,
"learning_rate": 8e-05,
"loss": 1.7176,
"step": 1732
},
{
"epoch": 0.8421819948973394,
"grad_norm": 0.6883020997047424,
"learning_rate": 8e-05,
"loss": 1.7509,
"step": 1733
},
{
"epoch": 0.8426679625804884,
"grad_norm": 0.6700498461723328,
"learning_rate": 8e-05,
"loss": 1.7517,
"step": 1734
},
{
"epoch": 0.8431539302636375,
"grad_norm": 0.7519075274467468,
"learning_rate": 8e-05,
"loss": 1.737,
"step": 1735
},
{
"epoch": 0.8436398979467865,
"grad_norm": 0.6804175972938538,
"learning_rate": 8e-05,
"loss": 1.6785,
"step": 1736
},
{
"epoch": 0.8441258656299356,
"grad_norm": 0.7451002597808838,
"learning_rate": 8e-05,
"loss": 1.6459,
"step": 1737
},
{
"epoch": 0.8446118333130846,
"grad_norm": 0.7072980403900146,
"learning_rate": 8e-05,
"loss": 1.5769,
"step": 1738
},
{
"epoch": 0.8450978009962338,
"grad_norm": 0.7238991260528564,
"learning_rate": 8e-05,
"loss": 1.7186,
"step": 1739
},
{
"epoch": 0.8455837686793828,
"grad_norm": 0.6569130420684814,
"learning_rate": 8e-05,
"loss": 1.6084,
"step": 1740
},
{
"epoch": 0.8460697363625319,
"grad_norm": 0.6824528574943542,
"learning_rate": 8e-05,
"loss": 1.6799,
"step": 1741
},
{
"epoch": 0.8465557040456809,
"grad_norm": 0.6826590299606323,
"learning_rate": 8e-05,
"loss": 1.679,
"step": 1742
},
{
"epoch": 0.84704167172883,
"grad_norm": 0.6838353872299194,
"learning_rate": 8e-05,
"loss": 1.7255,
"step": 1743
},
{
"epoch": 0.8475276394119791,
"grad_norm": 0.713826060295105,
"learning_rate": 8e-05,
"loss": 1.7195,
"step": 1744
},
{
"epoch": 0.8480136070951282,
"grad_norm": 0.6586517095565796,
"learning_rate": 8e-05,
"loss": 1.6722,
"step": 1745
},
{
"epoch": 0.8484995747782772,
"grad_norm": 0.7085291743278503,
"learning_rate": 8e-05,
"loss": 1.6341,
"step": 1746
},
{
"epoch": 0.8489855424614263,
"grad_norm": 0.7082439064979553,
"learning_rate": 8e-05,
"loss": 1.785,
"step": 1747
},
{
"epoch": 0.8494715101445753,
"grad_norm": 0.7442111372947693,
"learning_rate": 8e-05,
"loss": 1.7758,
"step": 1748
},
{
"epoch": 0.8499574778277245,
"grad_norm": 0.691892147064209,
"learning_rate": 8e-05,
"loss": 1.7063,
"step": 1749
},
{
"epoch": 0.8504434455108735,
"grad_norm": 0.7040305137634277,
"learning_rate": 8e-05,
"loss": 1.6244,
"step": 1750
},
{
"epoch": 0.8509294131940226,
"grad_norm": 0.71602463722229,
"learning_rate": 8e-05,
"loss": 1.8298,
"step": 1751
},
{
"epoch": 0.8514153808771717,
"grad_norm": 0.670871913433075,
"learning_rate": 8e-05,
"loss": 1.6063,
"step": 1752
},
{
"epoch": 0.8519013485603207,
"grad_norm": 0.7218164205551147,
"learning_rate": 8e-05,
"loss": 1.8289,
"step": 1753
},
{
"epoch": 0.8523873162434699,
"grad_norm": 0.6693719029426575,
"learning_rate": 8e-05,
"loss": 1.7267,
"step": 1754
},
{
"epoch": 0.8528732839266189,
"grad_norm": 0.6950780153274536,
"learning_rate": 8e-05,
"loss": 1.7439,
"step": 1755
},
{
"epoch": 0.853359251609768,
"grad_norm": 0.6605235934257507,
"learning_rate": 8e-05,
"loss": 1.613,
"step": 1756
},
{
"epoch": 0.853845219292917,
"grad_norm": 0.7284364104270935,
"learning_rate": 8e-05,
"loss": 1.8269,
"step": 1757
},
{
"epoch": 0.8543311869760661,
"grad_norm": 0.7385477423667908,
"learning_rate": 8e-05,
"loss": 1.613,
"step": 1758
},
{
"epoch": 0.8548171546592152,
"grad_norm": 0.688685953617096,
"learning_rate": 8e-05,
"loss": 1.7648,
"step": 1759
},
{
"epoch": 0.8553031223423643,
"grad_norm": 0.669029176235199,
"learning_rate": 8e-05,
"loss": 1.584,
"step": 1760
},
{
"epoch": 0.8557890900255133,
"grad_norm": 0.6909924745559692,
"learning_rate": 8e-05,
"loss": 1.6938,
"step": 1761
},
{
"epoch": 0.8562750577086624,
"grad_norm": 0.6854159235954285,
"learning_rate": 8e-05,
"loss": 1.6391,
"step": 1762
},
{
"epoch": 0.8567610253918114,
"grad_norm": 0.6548773050308228,
"learning_rate": 8e-05,
"loss": 1.6091,
"step": 1763
},
{
"epoch": 0.8572469930749606,
"grad_norm": 0.6639196872711182,
"learning_rate": 8e-05,
"loss": 1.675,
"step": 1764
},
{
"epoch": 0.8577329607581096,
"grad_norm": 0.6739024519920349,
"learning_rate": 8e-05,
"loss": 1.7121,
"step": 1765
},
{
"epoch": 0.8582189284412587,
"grad_norm": 0.6737241744995117,
"learning_rate": 8e-05,
"loss": 1.6915,
"step": 1766
},
{
"epoch": 0.8587048961244077,
"grad_norm": 0.6587862372398376,
"learning_rate": 8e-05,
"loss": 1.7431,
"step": 1767
},
{
"epoch": 0.8591908638075568,
"grad_norm": 0.6910162568092346,
"learning_rate": 8e-05,
"loss": 1.7145,
"step": 1768
},
{
"epoch": 0.8596768314907058,
"grad_norm": 0.653241753578186,
"learning_rate": 8e-05,
"loss": 1.6481,
"step": 1769
},
{
"epoch": 0.860162799173855,
"grad_norm": 0.67633056640625,
"learning_rate": 8e-05,
"loss": 1.728,
"step": 1770
},
{
"epoch": 0.860648766857004,
"grad_norm": 0.6771542429924011,
"learning_rate": 8e-05,
"loss": 1.7096,
"step": 1771
},
{
"epoch": 0.8611347345401531,
"grad_norm": 0.6699610948562622,
"learning_rate": 8e-05,
"loss": 1.6338,
"step": 1772
},
{
"epoch": 0.8616207022233021,
"grad_norm": 0.6539821624755859,
"learning_rate": 8e-05,
"loss": 1.702,
"step": 1773
},
{
"epoch": 0.8621066699064512,
"grad_norm": 0.6515360474586487,
"learning_rate": 8e-05,
"loss": 1.6844,
"step": 1774
},
{
"epoch": 0.8625926375896003,
"grad_norm": 0.6994085907936096,
"learning_rate": 8e-05,
"loss": 1.7161,
"step": 1775
},
{
"epoch": 0.8630786052727494,
"grad_norm": 0.6588917970657349,
"learning_rate": 8e-05,
"loss": 1.7202,
"step": 1776
},
{
"epoch": 0.8635645729558984,
"grad_norm": 0.682744026184082,
"learning_rate": 8e-05,
"loss": 1.6886,
"step": 1777
},
{
"epoch": 0.8640505406390475,
"grad_norm": 0.6884380578994751,
"learning_rate": 8e-05,
"loss": 1.8086,
"step": 1778
},
{
"epoch": 0.8645365083221965,
"grad_norm": 0.6964172720909119,
"learning_rate": 8e-05,
"loss": 1.6364,
"step": 1779
},
{
"epoch": 0.8650224760053457,
"grad_norm": 0.6899030208587646,
"learning_rate": 8e-05,
"loss": 1.7668,
"step": 1780
},
{
"epoch": 0.8655084436884947,
"grad_norm": 0.6597136855125427,
"learning_rate": 8e-05,
"loss": 1.729,
"step": 1781
},
{
"epoch": 0.8659944113716438,
"grad_norm": 0.6974450945854187,
"learning_rate": 8e-05,
"loss": 1.8707,
"step": 1782
},
{
"epoch": 0.8664803790547928,
"grad_norm": 1.038147211074829,
"learning_rate": 8e-05,
"loss": 1.6862,
"step": 1783
},
{
"epoch": 0.8669663467379419,
"grad_norm": 0.6581987738609314,
"learning_rate": 8e-05,
"loss": 1.7526,
"step": 1784
},
{
"epoch": 0.8674523144210909,
"grad_norm": 0.6819844245910645,
"learning_rate": 8e-05,
"loss": 1.6975,
"step": 1785
},
{
"epoch": 0.8679382821042401,
"grad_norm": 0.6529338955879211,
"learning_rate": 8e-05,
"loss": 1.7288,
"step": 1786
},
{
"epoch": 0.8684242497873892,
"grad_norm": 0.6735305190086365,
"learning_rate": 8e-05,
"loss": 1.7271,
"step": 1787
},
{
"epoch": 0.8689102174705382,
"grad_norm": 0.6424468755722046,
"learning_rate": 8e-05,
"loss": 1.681,
"step": 1788
},
{
"epoch": 0.8693961851536873,
"grad_norm": 0.7199828624725342,
"learning_rate": 8e-05,
"loss": 1.6234,
"step": 1789
},
{
"epoch": 0.8698821528368363,
"grad_norm": 0.6636258363723755,
"learning_rate": 8e-05,
"loss": 1.7106,
"step": 1790
},
{
"epoch": 0.8703681205199855,
"grad_norm": 0.6772566437721252,
"learning_rate": 8e-05,
"loss": 1.7446,
"step": 1791
},
{
"epoch": 0.8708540882031345,
"grad_norm": 0.6450766324996948,
"learning_rate": 8e-05,
"loss": 1.6259,
"step": 1792
},
{
"epoch": 0.8713400558862836,
"grad_norm": 0.7758142352104187,
"learning_rate": 8e-05,
"loss": 1.7174,
"step": 1793
},
{
"epoch": 0.8718260235694326,
"grad_norm": 0.6626909375190735,
"learning_rate": 8e-05,
"loss": 1.6014,
"step": 1794
},
{
"epoch": 0.8723119912525817,
"grad_norm": 0.6452525854110718,
"learning_rate": 8e-05,
"loss": 1.6926,
"step": 1795
},
{
"epoch": 0.8727979589357308,
"grad_norm": 0.70649254322052,
"learning_rate": 8e-05,
"loss": 1.7034,
"step": 1796
},
{
"epoch": 0.8732839266188799,
"grad_norm": 0.6932821273803711,
"learning_rate": 8e-05,
"loss": 1.6843,
"step": 1797
},
{
"epoch": 0.8737698943020289,
"grad_norm": 0.65278160572052,
"learning_rate": 8e-05,
"loss": 1.6776,
"step": 1798
},
{
"epoch": 0.874255861985178,
"grad_norm": 0.6483907699584961,
"learning_rate": 8e-05,
"loss": 1.681,
"step": 1799
},
{
"epoch": 0.874741829668327,
"grad_norm": 0.7433518171310425,
"learning_rate": 8e-05,
"loss": 1.7305,
"step": 1800
},
{
"epoch": 0.8752277973514762,
"grad_norm": 0.6555415987968445,
"learning_rate": 8e-05,
"loss": 1.6179,
"step": 1801
},
{
"epoch": 0.8757137650346252,
"grad_norm": 0.6878352761268616,
"learning_rate": 8e-05,
"loss": 1.6793,
"step": 1802
},
{
"epoch": 0.8761997327177743,
"grad_norm": 0.6694982051849365,
"learning_rate": 8e-05,
"loss": 1.7174,
"step": 1803
},
{
"epoch": 0.8766857004009233,
"grad_norm": 0.6793036460876465,
"learning_rate": 8e-05,
"loss": 1.6446,
"step": 1804
},
{
"epoch": 0.8771716680840724,
"grad_norm": 0.636803925037384,
"learning_rate": 8e-05,
"loss": 1.606,
"step": 1805
},
{
"epoch": 0.8776576357672214,
"grad_norm": 0.6755457520484924,
"learning_rate": 8e-05,
"loss": 1.6639,
"step": 1806
},
{
"epoch": 0.8781436034503706,
"grad_norm": 0.6671308279037476,
"learning_rate": 8e-05,
"loss": 1.6524,
"step": 1807
},
{
"epoch": 0.8786295711335196,
"grad_norm": 0.6934971809387207,
"learning_rate": 8e-05,
"loss": 1.4992,
"step": 1808
},
{
"epoch": 0.8791155388166687,
"grad_norm": 0.6724815964698792,
"learning_rate": 8e-05,
"loss": 1.6327,
"step": 1809
},
{
"epoch": 0.8796015064998177,
"grad_norm": 0.6763851642608643,
"learning_rate": 8e-05,
"loss": 1.7384,
"step": 1810
},
{
"epoch": 0.8800874741829668,
"grad_norm": 0.6741973757743835,
"learning_rate": 8e-05,
"loss": 1.6962,
"step": 1811
},
{
"epoch": 0.8805734418661159,
"grad_norm": 0.665202796459198,
"learning_rate": 8e-05,
"loss": 1.6914,
"step": 1812
},
{
"epoch": 0.881059409549265,
"grad_norm": 0.6968621611595154,
"learning_rate": 8e-05,
"loss": 1.7722,
"step": 1813
},
{
"epoch": 0.881545377232414,
"grad_norm": 0.6793668866157532,
"learning_rate": 8e-05,
"loss": 1.7416,
"step": 1814
},
{
"epoch": 0.8820313449155631,
"grad_norm": 0.6444291472434998,
"learning_rate": 8e-05,
"loss": 1.6307,
"step": 1815
},
{
"epoch": 0.8825173125987121,
"grad_norm": 0.6695525646209717,
"learning_rate": 8e-05,
"loss": 1.7799,
"step": 1816
},
{
"epoch": 0.8830032802818613,
"grad_norm": 0.6512060165405273,
"learning_rate": 8e-05,
"loss": 1.7006,
"step": 1817
},
{
"epoch": 0.8834892479650103,
"grad_norm": 0.6590713858604431,
"learning_rate": 8e-05,
"loss": 1.6507,
"step": 1818
},
{
"epoch": 0.8839752156481594,
"grad_norm": 0.6567363739013672,
"learning_rate": 8e-05,
"loss": 1.7363,
"step": 1819
},
{
"epoch": 0.8844611833313085,
"grad_norm": 0.6752829551696777,
"learning_rate": 8e-05,
"loss": 1.6546,
"step": 1820
},
{
"epoch": 0.8849471510144575,
"grad_norm": 0.6696698665618896,
"learning_rate": 8e-05,
"loss": 1.6801,
"step": 1821
},
{
"epoch": 0.8854331186976067,
"grad_norm": 0.6774300932884216,
"learning_rate": 8e-05,
"loss": 1.7286,
"step": 1822
},
{
"epoch": 0.8859190863807557,
"grad_norm": 0.6522386074066162,
"learning_rate": 8e-05,
"loss": 1.5695,
"step": 1823
},
{
"epoch": 0.8864050540639048,
"grad_norm": 0.6667071580886841,
"learning_rate": 8e-05,
"loss": 1.6367,
"step": 1824
},
{
"epoch": 0.8868910217470538,
"grad_norm": 0.6633548736572266,
"learning_rate": 8e-05,
"loss": 1.7939,
"step": 1825
},
{
"epoch": 0.8873769894302029,
"grad_norm": 0.6646820306777954,
"learning_rate": 8e-05,
"loss": 1.6714,
"step": 1826
},
{
"epoch": 0.887862957113352,
"grad_norm": 0.6537835001945496,
"learning_rate": 8e-05,
"loss": 1.6993,
"step": 1827
},
{
"epoch": 0.8883489247965011,
"grad_norm": 0.676357090473175,
"learning_rate": 8e-05,
"loss": 1.7023,
"step": 1828
},
{
"epoch": 0.8888348924796501,
"grad_norm": 0.6533125042915344,
"learning_rate": 8e-05,
"loss": 1.681,
"step": 1829
},
{
"epoch": 0.8893208601627992,
"grad_norm": 0.6852530241012573,
"learning_rate": 8e-05,
"loss": 1.7522,
"step": 1830
},
{
"epoch": 0.8898068278459482,
"grad_norm": 0.7191981077194214,
"learning_rate": 8e-05,
"loss": 1.7699,
"step": 1831
},
{
"epoch": 0.8902927955290973,
"grad_norm": 0.6434154510498047,
"learning_rate": 8e-05,
"loss": 1.6594,
"step": 1832
},
{
"epoch": 0.8907787632122464,
"grad_norm": 0.6748992800712585,
"learning_rate": 8e-05,
"loss": 1.6795,
"step": 1833
},
{
"epoch": 0.8912647308953955,
"grad_norm": 0.6599833965301514,
"learning_rate": 8e-05,
"loss": 1.5998,
"step": 1834
},
{
"epoch": 0.8917506985785445,
"grad_norm": 0.699705183506012,
"learning_rate": 8e-05,
"loss": 1.5868,
"step": 1835
},
{
"epoch": 0.8922366662616936,
"grad_norm": 0.6916149854660034,
"learning_rate": 8e-05,
"loss": 1.7316,
"step": 1836
},
{
"epoch": 0.8927226339448426,
"grad_norm": 0.6622377038002014,
"learning_rate": 8e-05,
"loss": 1.6823,
"step": 1837
},
{
"epoch": 0.8932086016279918,
"grad_norm": 0.6920107007026672,
"learning_rate": 8e-05,
"loss": 1.6552,
"step": 1838
},
{
"epoch": 0.8936945693111408,
"grad_norm": 0.6712560653686523,
"learning_rate": 8e-05,
"loss": 1.75,
"step": 1839
},
{
"epoch": 0.8941805369942899,
"grad_norm": 0.6574907898902893,
"learning_rate": 8e-05,
"loss": 1.6023,
"step": 1840
},
{
"epoch": 0.8946665046774389,
"grad_norm": 0.6636672019958496,
"learning_rate": 8e-05,
"loss": 1.7248,
"step": 1841
},
{
"epoch": 0.895152472360588,
"grad_norm": 0.7205569744110107,
"learning_rate": 8e-05,
"loss": 1.7368,
"step": 1842
},
{
"epoch": 0.895638440043737,
"grad_norm": 0.6884496808052063,
"learning_rate": 8e-05,
"loss": 1.6167,
"step": 1843
},
{
"epoch": 0.8961244077268862,
"grad_norm": 0.6836435198783875,
"learning_rate": 8e-05,
"loss": 1.7493,
"step": 1844
},
{
"epoch": 0.8966103754100352,
"grad_norm": 0.685732364654541,
"learning_rate": 8e-05,
"loss": 1.6886,
"step": 1845
},
{
"epoch": 0.8970963430931843,
"grad_norm": 0.6702524423599243,
"learning_rate": 8e-05,
"loss": 1.7277,
"step": 1846
},
{
"epoch": 0.8975823107763333,
"grad_norm": 0.6741942167282104,
"learning_rate": 8e-05,
"loss": 1.6611,
"step": 1847
},
{
"epoch": 0.8980682784594824,
"grad_norm": 0.6792390942573547,
"learning_rate": 8e-05,
"loss": 1.6092,
"step": 1848
},
{
"epoch": 0.8985542461426315,
"grad_norm": 0.7039898037910461,
"learning_rate": 8e-05,
"loss": 1.7165,
"step": 1849
},
{
"epoch": 0.8990402138257806,
"grad_norm": 0.6669453978538513,
"learning_rate": 8e-05,
"loss": 1.6709,
"step": 1850
},
{
"epoch": 0.8995261815089296,
"grad_norm": 0.6652847528457642,
"learning_rate": 8e-05,
"loss": 1.659,
"step": 1851
},
{
"epoch": 0.9000121491920787,
"grad_norm": 0.6428386569023132,
"learning_rate": 8e-05,
"loss": 1.5768,
"step": 1852
},
{
"epoch": 0.9004981168752278,
"grad_norm": 0.6929381489753723,
"learning_rate": 8e-05,
"loss": 1.6371,
"step": 1853
},
{
"epoch": 0.9009840845583769,
"grad_norm": 0.6701197028160095,
"learning_rate": 8e-05,
"loss": 1.7295,
"step": 1854
},
{
"epoch": 0.901470052241526,
"grad_norm": 0.691456139087677,
"learning_rate": 8e-05,
"loss": 1.7385,
"step": 1855
},
{
"epoch": 0.901956019924675,
"grad_norm": 0.6817391514778137,
"learning_rate": 8e-05,
"loss": 1.7548,
"step": 1856
},
{
"epoch": 0.9024419876078241,
"grad_norm": 0.681011438369751,
"learning_rate": 8e-05,
"loss": 1.6919,
"step": 1857
},
{
"epoch": 0.9029279552909731,
"grad_norm": 0.6870824694633484,
"learning_rate": 8e-05,
"loss": 1.7447,
"step": 1858
},
{
"epoch": 0.9034139229741223,
"grad_norm": 0.6770219206809998,
"learning_rate": 8e-05,
"loss": 1.6821,
"step": 1859
},
{
"epoch": 0.9038998906572713,
"grad_norm": 0.6652222871780396,
"learning_rate": 8e-05,
"loss": 1.6341,
"step": 1860
},
{
"epoch": 0.9043858583404204,
"grad_norm": 0.6655253767967224,
"learning_rate": 8e-05,
"loss": 1.6195,
"step": 1861
},
{
"epoch": 0.9048718260235694,
"grad_norm": 0.6936423182487488,
"learning_rate": 8e-05,
"loss": 1.7935,
"step": 1862
},
{
"epoch": 0.9053577937067185,
"grad_norm": 0.6778640747070312,
"learning_rate": 8e-05,
"loss": 1.7443,
"step": 1863
},
{
"epoch": 0.9058437613898676,
"grad_norm": 0.6915225386619568,
"learning_rate": 8e-05,
"loss": 1.7808,
"step": 1864
},
{
"epoch": 0.9063297290730167,
"grad_norm": 0.7043119668960571,
"learning_rate": 8e-05,
"loss": 1.7587,
"step": 1865
},
{
"epoch": 0.9068156967561657,
"grad_norm": 0.6860661506652832,
"learning_rate": 8e-05,
"loss": 1.6495,
"step": 1866
},
{
"epoch": 0.9073016644393148,
"grad_norm": 0.6997127532958984,
"learning_rate": 8e-05,
"loss": 1.7134,
"step": 1867
},
{
"epoch": 0.9077876321224638,
"grad_norm": 0.7089009881019592,
"learning_rate": 8e-05,
"loss": 1.7566,
"step": 1868
},
{
"epoch": 0.908273599805613,
"grad_norm": 0.7275925278663635,
"learning_rate": 8e-05,
"loss": 1.7301,
"step": 1869
},
{
"epoch": 0.908759567488762,
"grad_norm": 0.6632089018821716,
"learning_rate": 8e-05,
"loss": 1.6667,
"step": 1870
},
{
"epoch": 0.9092455351719111,
"grad_norm": 0.7107716798782349,
"learning_rate": 8e-05,
"loss": 1.6661,
"step": 1871
},
{
"epoch": 0.9097315028550601,
"grad_norm": 0.6700002551078796,
"learning_rate": 8e-05,
"loss": 1.653,
"step": 1872
},
{
"epoch": 0.9102174705382092,
"grad_norm": 0.6595785617828369,
"learning_rate": 8e-05,
"loss": 1.6834,
"step": 1873
},
{
"epoch": 0.9107034382213582,
"grad_norm": 0.6953185200691223,
"learning_rate": 8e-05,
"loss": 1.7588,
"step": 1874
},
{
"epoch": 0.9111894059045074,
"grad_norm": 0.7098914384841919,
"learning_rate": 8e-05,
"loss": 1.7344,
"step": 1875
},
{
"epoch": 0.9116753735876564,
"grad_norm": 0.6820032000541687,
"learning_rate": 8e-05,
"loss": 1.7254,
"step": 1876
},
{
"epoch": 0.9121613412708055,
"grad_norm": 0.6732338666915894,
"learning_rate": 8e-05,
"loss": 1.752,
"step": 1877
},
{
"epoch": 0.9126473089539545,
"grad_norm": 0.6845636963844299,
"learning_rate": 8e-05,
"loss": 1.8333,
"step": 1878
},
{
"epoch": 0.9131332766371036,
"grad_norm": 0.6543975472450256,
"learning_rate": 8e-05,
"loss": 1.6452,
"step": 1879
},
{
"epoch": 0.9136192443202527,
"grad_norm": 0.7024291157722473,
"learning_rate": 8e-05,
"loss": 1.6514,
"step": 1880
},
{
"epoch": 0.9141052120034018,
"grad_norm": 0.6852805018424988,
"learning_rate": 8e-05,
"loss": 1.7149,
"step": 1881
},
{
"epoch": 0.9145911796865508,
"grad_norm": 0.6897751092910767,
"learning_rate": 8e-05,
"loss": 1.6209,
"step": 1882
},
{
"epoch": 0.9150771473696999,
"grad_norm": 0.6961401104927063,
"learning_rate": 8e-05,
"loss": 1.7422,
"step": 1883
},
{
"epoch": 0.9155631150528489,
"grad_norm": 0.7274015545845032,
"learning_rate": 8e-05,
"loss": 1.7239,
"step": 1884
},
{
"epoch": 0.916049082735998,
"grad_norm": 0.6880594491958618,
"learning_rate": 8e-05,
"loss": 1.722,
"step": 1885
},
{
"epoch": 0.9165350504191472,
"grad_norm": 0.6525025963783264,
"learning_rate": 8e-05,
"loss": 1.5735,
"step": 1886
},
{
"epoch": 0.9170210181022962,
"grad_norm": 0.704132080078125,
"learning_rate": 8e-05,
"loss": 1.6057,
"step": 1887
},
{
"epoch": 0.9175069857854453,
"grad_norm": 0.6710562109947205,
"learning_rate": 8e-05,
"loss": 1.7014,
"step": 1888
},
{
"epoch": 0.9179929534685943,
"grad_norm": 0.6694965362548828,
"learning_rate": 8e-05,
"loss": 1.6529,
"step": 1889
},
{
"epoch": 0.9184789211517435,
"grad_norm": 0.6747124791145325,
"learning_rate": 8e-05,
"loss": 1.7293,
"step": 1890
},
{
"epoch": 0.9189648888348925,
"grad_norm": 0.6903659105300903,
"learning_rate": 8e-05,
"loss": 1.7617,
"step": 1891
},
{
"epoch": 0.9194508565180416,
"grad_norm": 0.6632493138313293,
"learning_rate": 8e-05,
"loss": 1.621,
"step": 1892
},
{
"epoch": 0.9199368242011906,
"grad_norm": 0.6672336459159851,
"learning_rate": 8e-05,
"loss": 1.6254,
"step": 1893
},
{
"epoch": 0.9204227918843397,
"grad_norm": 0.668280839920044,
"learning_rate": 8e-05,
"loss": 1.6998,
"step": 1894
},
{
"epoch": 0.9209087595674887,
"grad_norm": 0.6913830637931824,
"learning_rate": 8e-05,
"loss": 1.6751,
"step": 1895
},
{
"epoch": 0.9213947272506379,
"grad_norm": 0.6648537516593933,
"learning_rate": 8e-05,
"loss": 1.6817,
"step": 1896
},
{
"epoch": 0.9218806949337869,
"grad_norm": 0.7031324505805969,
"learning_rate": 8e-05,
"loss": 1.8166,
"step": 1897
},
{
"epoch": 0.922366662616936,
"grad_norm": 0.6807796359062195,
"learning_rate": 8e-05,
"loss": 1.7041,
"step": 1898
},
{
"epoch": 0.922852630300085,
"grad_norm": 0.6511052250862122,
"learning_rate": 8e-05,
"loss": 1.694,
"step": 1899
},
{
"epoch": 0.9233385979832341,
"grad_norm": 0.6683751940727234,
"learning_rate": 8e-05,
"loss": 1.7229,
"step": 1900
},
{
"epoch": 0.9238245656663832,
"grad_norm": 0.6733470559120178,
"learning_rate": 8e-05,
"loss": 1.741,
"step": 1901
},
{
"epoch": 0.9243105333495323,
"grad_norm": 0.6818079948425293,
"learning_rate": 8e-05,
"loss": 1.7791,
"step": 1902
},
{
"epoch": 0.9247965010326813,
"grad_norm": 0.6888931393623352,
"learning_rate": 8e-05,
"loss": 1.7789,
"step": 1903
},
{
"epoch": 0.9252824687158304,
"grad_norm": 0.6756711006164551,
"learning_rate": 8e-05,
"loss": 1.6754,
"step": 1904
},
{
"epoch": 0.9257684363989794,
"grad_norm": 0.6731768846511841,
"learning_rate": 8e-05,
"loss": 1.7241,
"step": 1905
},
{
"epoch": 0.9262544040821286,
"grad_norm": 0.6658243536949158,
"learning_rate": 8e-05,
"loss": 1.7372,
"step": 1906
},
{
"epoch": 0.9267403717652776,
"grad_norm": 0.6529601812362671,
"learning_rate": 8e-05,
"loss": 1.6711,
"step": 1907
},
{
"epoch": 0.9272263394484267,
"grad_norm": 0.6861560344696045,
"learning_rate": 8e-05,
"loss": 1.669,
"step": 1908
},
{
"epoch": 0.9277123071315757,
"grad_norm": 0.6518715620040894,
"learning_rate": 8e-05,
"loss": 1.5744,
"step": 1909
},
{
"epoch": 0.9281982748147248,
"grad_norm": 0.6419810652732849,
"learning_rate": 8e-05,
"loss": 1.6217,
"step": 1910
},
{
"epoch": 0.9286842424978738,
"grad_norm": 0.6589051485061646,
"learning_rate": 8e-05,
"loss": 1.67,
"step": 1911
},
{
"epoch": 0.929170210181023,
"grad_norm": 0.8275018334388733,
"learning_rate": 8e-05,
"loss": 1.7264,
"step": 1912
},
{
"epoch": 0.929656177864172,
"grad_norm": 0.6712974309921265,
"learning_rate": 8e-05,
"loss": 1.6506,
"step": 1913
},
{
"epoch": 0.9301421455473211,
"grad_norm": 0.6550801396369934,
"learning_rate": 8e-05,
"loss": 1.6848,
"step": 1914
},
{
"epoch": 0.9306281132304701,
"grad_norm": 0.6738362908363342,
"learning_rate": 8e-05,
"loss": 1.6446,
"step": 1915
},
{
"epoch": 0.9311140809136192,
"grad_norm": 0.6732152104377747,
"learning_rate": 8e-05,
"loss": 1.6632,
"step": 1916
},
{
"epoch": 0.9316000485967683,
"grad_norm": 0.6406624913215637,
"learning_rate": 8e-05,
"loss": 1.5793,
"step": 1917
},
{
"epoch": 0.9320860162799174,
"grad_norm": 0.6931325197219849,
"learning_rate": 8e-05,
"loss": 1.7022,
"step": 1918
},
{
"epoch": 0.9325719839630665,
"grad_norm": 0.6737642288208008,
"learning_rate": 8e-05,
"loss": 1.6845,
"step": 1919
},
{
"epoch": 0.9330579516462155,
"grad_norm": 0.6730310320854187,
"learning_rate": 8e-05,
"loss": 1.7769,
"step": 1920
},
{
"epoch": 0.9335439193293646,
"grad_norm": 0.6703587174415588,
"learning_rate": 8e-05,
"loss": 1.6726,
"step": 1921
},
{
"epoch": 0.9340298870125137,
"grad_norm": 0.6555189490318298,
"learning_rate": 8e-05,
"loss": 1.5739,
"step": 1922
},
{
"epoch": 0.9345158546956628,
"grad_norm": 0.6777920126914978,
"learning_rate": 8e-05,
"loss": 1.713,
"step": 1923
},
{
"epoch": 0.9350018223788118,
"grad_norm": 0.6492234468460083,
"learning_rate": 8e-05,
"loss": 1.6029,
"step": 1924
},
{
"epoch": 0.9354877900619609,
"grad_norm": 0.7048563957214355,
"learning_rate": 8e-05,
"loss": 1.8503,
"step": 1925
},
{
"epoch": 0.9359737577451099,
"grad_norm": 81.88847351074219,
"learning_rate": 8e-05,
"loss": 2.8225,
"step": 1926
},
{
"epoch": 0.9364597254282591,
"grad_norm": 0.6970681548118591,
"learning_rate": 8e-05,
"loss": 1.6511,
"step": 1927
},
{
"epoch": 0.9369456931114081,
"grad_norm": 1.0791670083999634,
"learning_rate": 8e-05,
"loss": 1.6184,
"step": 1928
},
{
"epoch": 0.9374316607945572,
"grad_norm": 0.6755528450012207,
"learning_rate": 8e-05,
"loss": 1.7592,
"step": 1929
},
{
"epoch": 0.9379176284777062,
"grad_norm": 0.694639265537262,
"learning_rate": 8e-05,
"loss": 1.699,
"step": 1930
},
{
"epoch": 0.9384035961608553,
"grad_norm": 0.6953614354133606,
"learning_rate": 8e-05,
"loss": 1.7019,
"step": 1931
},
{
"epoch": 0.9388895638440043,
"grad_norm": 0.6991117000579834,
"learning_rate": 8e-05,
"loss": 1.8366,
"step": 1932
},
{
"epoch": 0.9393755315271535,
"grad_norm": 0.7080687880516052,
"learning_rate": 8e-05,
"loss": 1.7688,
"step": 1933
},
{
"epoch": 0.9398614992103025,
"grad_norm": 0.6987957954406738,
"learning_rate": 8e-05,
"loss": 1.8652,
"step": 1934
},
{
"epoch": 0.9403474668934516,
"grad_norm": 0.6736930012702942,
"learning_rate": 8e-05,
"loss": 1.6071,
"step": 1935
},
{
"epoch": 0.9408334345766006,
"grad_norm": 0.6800463199615479,
"learning_rate": 8e-05,
"loss": 1.6788,
"step": 1936
},
{
"epoch": 0.9413194022597497,
"grad_norm": 0.681482195854187,
"learning_rate": 8e-05,
"loss": 1.6952,
"step": 1937
},
{
"epoch": 0.9418053699428988,
"grad_norm": 0.6519253849983215,
"learning_rate": 8e-05,
"loss": 1.7476,
"step": 1938
},
{
"epoch": 0.9422913376260479,
"grad_norm": 0.6812343597412109,
"learning_rate": 8e-05,
"loss": 1.6996,
"step": 1939
},
{
"epoch": 0.9427773053091969,
"grad_norm": 0.6913323402404785,
"learning_rate": 8e-05,
"loss": 1.8085,
"step": 1940
},
{
"epoch": 0.943263272992346,
"grad_norm": 0.6806484460830688,
"learning_rate": 8e-05,
"loss": 1.6121,
"step": 1941
},
{
"epoch": 0.943749240675495,
"grad_norm": 0.6447448134422302,
"learning_rate": 8e-05,
"loss": 1.5757,
"step": 1942
},
{
"epoch": 0.9442352083586442,
"grad_norm": 0.7153680920600891,
"learning_rate": 8e-05,
"loss": 1.684,
"step": 1943
},
{
"epoch": 0.9447211760417932,
"grad_norm": 565.7562866210938,
"learning_rate": 8e-05,
"loss": 2.1396,
"step": 1944
},
{
"epoch": 0.9452071437249423,
"grad_norm": 0.8228261470794678,
"learning_rate": 8e-05,
"loss": 1.7833,
"step": 1945
},
{
"epoch": 0.9456931114080913,
"grad_norm": 0.6688120365142822,
"learning_rate": 8e-05,
"loss": 1.652,
"step": 1946
},
{
"epoch": 0.9461790790912404,
"grad_norm": 0.6694374084472656,
"learning_rate": 8e-05,
"loss": 1.6227,
"step": 1947
},
{
"epoch": 0.9466650467743895,
"grad_norm": 0.6642143130302429,
"learning_rate": 8e-05,
"loss": 1.6887,
"step": 1948
},
{
"epoch": 0.9471510144575386,
"grad_norm": 0.7073469161987305,
"learning_rate": 8e-05,
"loss": 1.6873,
"step": 1949
},
{
"epoch": 0.9476369821406876,
"grad_norm": 0.6868941783905029,
"learning_rate": 8e-05,
"loss": 1.6839,
"step": 1950
},
{
"epoch": 0.9481229498238367,
"grad_norm": 1.081831932067871,
"learning_rate": 8e-05,
"loss": 1.621,
"step": 1951
},
{
"epoch": 0.9486089175069858,
"grad_norm": 0.6740509867668152,
"learning_rate": 8e-05,
"loss": 1.6968,
"step": 1952
},
{
"epoch": 0.9490948851901349,
"grad_norm": 0.6448697447776794,
"learning_rate": 8e-05,
"loss": 1.6187,
"step": 1953
},
{
"epoch": 0.949580852873284,
"grad_norm": 0.7188392877578735,
"learning_rate": 8e-05,
"loss": 1.6475,
"step": 1954
},
{
"epoch": 0.950066820556433,
"grad_norm": 0.6668784618377686,
"learning_rate": 8e-05,
"loss": 1.7158,
"step": 1955
},
{
"epoch": 0.9505527882395821,
"grad_norm": 0.723740816116333,
"learning_rate": 8e-05,
"loss": 1.7552,
"step": 1956
},
{
"epoch": 0.9510387559227311,
"grad_norm": 0.7027231454849243,
"learning_rate": 8e-05,
"loss": 1.7371,
"step": 1957
},
{
"epoch": 0.9515247236058803,
"grad_norm": 0.6790149211883545,
"learning_rate": 8e-05,
"loss": 1.6464,
"step": 1958
},
{
"epoch": 0.9520106912890293,
"grad_norm": 0.6989905834197998,
"learning_rate": 8e-05,
"loss": 1.7027,
"step": 1959
},
{
"epoch": 0.9524966589721784,
"grad_norm": 0.6942729949951172,
"learning_rate": 8e-05,
"loss": 1.6516,
"step": 1960
},
{
"epoch": 0.9529826266553274,
"grad_norm": 0.6786219477653503,
"learning_rate": 8e-05,
"loss": 1.6126,
"step": 1961
},
{
"epoch": 0.9534685943384765,
"grad_norm": 0.6560273170471191,
"learning_rate": 8e-05,
"loss": 1.5823,
"step": 1962
},
{
"epoch": 0.9539545620216255,
"grad_norm": 1.008255124092102,
"learning_rate": 8e-05,
"loss": 1.654,
"step": 1963
},
{
"epoch": 0.9544405297047747,
"grad_norm": 0.6998074650764465,
"learning_rate": 8e-05,
"loss": 1.6973,
"step": 1964
},
{
"epoch": 0.9549264973879237,
"grad_norm": 0.6675659418106079,
"learning_rate": 8e-05,
"loss": 1.6714,
"step": 1965
},
{
"epoch": 0.9554124650710728,
"grad_norm": 0.6875411868095398,
"learning_rate": 8e-05,
"loss": 1.6627,
"step": 1966
},
{
"epoch": 0.9558984327542218,
"grad_norm": 0.6995173096656799,
"learning_rate": 8e-05,
"loss": 1.7797,
"step": 1967
},
{
"epoch": 0.9563844004373709,
"grad_norm": 0.6697837114334106,
"learning_rate": 8e-05,
"loss": 1.6375,
"step": 1968
},
{
"epoch": 0.95687036812052,
"grad_norm": 0.6798880696296692,
"learning_rate": 8e-05,
"loss": 1.5995,
"step": 1969
},
{
"epoch": 0.9573563358036691,
"grad_norm": 0.6646199822425842,
"learning_rate": 8e-05,
"loss": 1.6801,
"step": 1970
},
{
"epoch": 0.9578423034868181,
"grad_norm": 0.6851860284805298,
"learning_rate": 8e-05,
"loss": 1.7365,
"step": 1971
},
{
"epoch": 0.9583282711699672,
"grad_norm": 0.6854037642478943,
"learning_rate": 8e-05,
"loss": 1.6446,
"step": 1972
},
{
"epoch": 0.9588142388531162,
"grad_norm": 0.7328615188598633,
"learning_rate": 8e-05,
"loss": 1.7077,
"step": 1973
},
{
"epoch": 0.9593002065362654,
"grad_norm": 0.6889229416847229,
"learning_rate": 8e-05,
"loss": 1.8134,
"step": 1974
},
{
"epoch": 0.9597861742194144,
"grad_norm": 0.6959888339042664,
"learning_rate": 8e-05,
"loss": 1.731,
"step": 1975
},
{
"epoch": 0.9602721419025635,
"grad_norm": 0.6775850057601929,
"learning_rate": 8e-05,
"loss": 1.6377,
"step": 1976
},
{
"epoch": 0.9607581095857125,
"grad_norm": 0.6867715716362,
"learning_rate": 8e-05,
"loss": 1.6263,
"step": 1977
},
{
"epoch": 0.9612440772688616,
"grad_norm": 0.6628663539886475,
"learning_rate": 8e-05,
"loss": 1.679,
"step": 1978
},
{
"epoch": 0.9617300449520106,
"grad_norm": 0.7282610535621643,
"learning_rate": 8e-05,
"loss": 1.6856,
"step": 1979
},
{
"epoch": 0.9622160126351598,
"grad_norm": 0.6672433614730835,
"learning_rate": 8e-05,
"loss": 1.6807,
"step": 1980
},
{
"epoch": 0.9627019803183088,
"grad_norm": 0.687490701675415,
"learning_rate": 8e-05,
"loss": 1.7556,
"step": 1981
},
{
"epoch": 0.9631879480014579,
"grad_norm": 0.6436827182769775,
"learning_rate": 8e-05,
"loss": 1.6022,
"step": 1982
},
{
"epoch": 0.9636739156846069,
"grad_norm": 0.7169700264930725,
"learning_rate": 8e-05,
"loss": 1.6667,
"step": 1983
},
{
"epoch": 0.964159883367756,
"grad_norm": 0.6826598048210144,
"learning_rate": 8e-05,
"loss": 1.6769,
"step": 1984
},
{
"epoch": 0.9646458510509052,
"grad_norm": 0.6779286861419678,
"learning_rate": 8e-05,
"loss": 1.6673,
"step": 1985
},
{
"epoch": 0.9651318187340542,
"grad_norm": 0.7093191742897034,
"learning_rate": 8e-05,
"loss": 1.7826,
"step": 1986
},
{
"epoch": 0.9656177864172033,
"grad_norm": 0.7144518494606018,
"learning_rate": 8e-05,
"loss": 1.8025,
"step": 1987
},
{
"epoch": 0.9661037541003523,
"grad_norm": 0.6522985696792603,
"learning_rate": 8e-05,
"loss": 1.6384,
"step": 1988
},
{
"epoch": 0.9665897217835014,
"grad_norm": 1.0146836042404175,
"learning_rate": 8e-05,
"loss": 1.6507,
"step": 1989
},
{
"epoch": 0.9670756894666505,
"grad_norm": 0.9237478971481323,
"learning_rate": 8e-05,
"loss": 1.7586,
"step": 1990
},
{
"epoch": 0.9675616571497996,
"grad_norm": 0.7173764109611511,
"learning_rate": 8e-05,
"loss": 1.7062,
"step": 1991
},
{
"epoch": 0.9680476248329486,
"grad_norm": 0.9615936875343323,
"learning_rate": 8e-05,
"loss": 1.6762,
"step": 1992
},
{
"epoch": 0.9685335925160977,
"grad_norm": 0.8683964014053345,
"learning_rate": 8e-05,
"loss": 1.5157,
"step": 1993
},
{
"epoch": 0.9690195601992467,
"grad_norm": 2.1727890968322754,
"learning_rate": 8e-05,
"loss": 1.7305,
"step": 1994
},
{
"epoch": 0.9695055278823959,
"grad_norm": 1.005670189857483,
"learning_rate": 8e-05,
"loss": 1.646,
"step": 1995
},
{
"epoch": 0.9699914955655449,
"grad_norm": 1.1713099479675293,
"learning_rate": 8e-05,
"loss": 1.6862,
"step": 1996
},
{
"epoch": 0.970477463248694,
"grad_norm": 0.7767489552497864,
"learning_rate": 8e-05,
"loss": 1.6026,
"step": 1997
},
{
"epoch": 0.970963430931843,
"grad_norm": 2.5601918697357178,
"learning_rate": 8e-05,
"loss": 1.7123,
"step": 1998
},
{
"epoch": 0.9714493986149921,
"grad_norm": 0.8085450530052185,
"learning_rate": 8e-05,
"loss": 1.6387,
"step": 1999
},
{
"epoch": 0.9719353662981411,
"grad_norm": 0.7340285181999207,
"learning_rate": 8e-05,
"loss": 1.7505,
"step": 2000
}
],
"logging_steps": 1,
"max_steps": 4114,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.62503126581248e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}