spycoder's picture
Upload folder using huggingface_hub
a92890d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9992810477946923,
"eval_steps": 500,
"global_step": 999,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001000281329123816,
"grad_norm": 0.9744666814804077,
"learning_rate": 4e-05,
"loss": 1.9866,
"step": 1
},
{
"epoch": 0.002000562658247632,
"grad_norm": 0.9364674687385559,
"learning_rate": 8e-05,
"loss": 1.9045,
"step": 2
},
{
"epoch": 0.003000843987371448,
"grad_norm": 0.9184039831161499,
"learning_rate": 0.00012,
"loss": 1.9518,
"step": 3
},
{
"epoch": 0.004001125316495264,
"grad_norm": 0.9024870991706848,
"learning_rate": 0.00016,
"loss": 1.8556,
"step": 4
},
{
"epoch": 0.005001406645619081,
"grad_norm": 0.952398955821991,
"learning_rate": 0.0002,
"loss": 1.7932,
"step": 5
},
{
"epoch": 0.006001687974742896,
"grad_norm": 0.8461670875549316,
"learning_rate": 0.0001999331550802139,
"loss": 1.4715,
"step": 6
},
{
"epoch": 0.007001969303866712,
"grad_norm": 1.0230799913406372,
"learning_rate": 0.00019986631016042782,
"loss": 1.2318,
"step": 7
},
{
"epoch": 0.008002250632990529,
"grad_norm": 0.8146515488624573,
"learning_rate": 0.00019979946524064174,
"loss": 1.0062,
"step": 8
},
{
"epoch": 0.009002531962114344,
"grad_norm": 0.8817946910858154,
"learning_rate": 0.00019973262032085563,
"loss": 0.8552,
"step": 9
},
{
"epoch": 0.010002813291238161,
"grad_norm": 0.7057645916938782,
"learning_rate": 0.00019966577540106952,
"loss": 0.7432,
"step": 10
},
{
"epoch": 0.011003094620361977,
"grad_norm": 0.7819643020629883,
"learning_rate": 0.00019959893048128344,
"loss": 0.6843,
"step": 11
},
{
"epoch": 0.012003375949485792,
"grad_norm": 0.9533947110176086,
"learning_rate": 0.00019953208556149733,
"loss": 0.697,
"step": 12
},
{
"epoch": 0.01300365727860961,
"grad_norm": 0.7520662546157837,
"learning_rate": 0.00019946524064171124,
"loss": 0.6386,
"step": 13
},
{
"epoch": 0.014003938607733425,
"grad_norm": 0.38872724771499634,
"learning_rate": 0.00019939839572192514,
"loss": 0.6368,
"step": 14
},
{
"epoch": 0.01500421993685724,
"grad_norm": 0.3418921232223511,
"learning_rate": 0.00019933155080213905,
"loss": 0.6726,
"step": 15
},
{
"epoch": 0.016004501265981057,
"grad_norm": 0.48597708344459534,
"learning_rate": 0.00019926470588235294,
"loss": 0.639,
"step": 16
},
{
"epoch": 0.017004782595104875,
"grad_norm": 0.23545189201831818,
"learning_rate": 0.00019919786096256686,
"loss": 0.6184,
"step": 17
},
{
"epoch": 0.01800506392422869,
"grad_norm": 0.21088548004627228,
"learning_rate": 0.00019913101604278075,
"loss": 0.6335,
"step": 18
},
{
"epoch": 0.019005345253352506,
"grad_norm": 0.216445192694664,
"learning_rate": 0.00019906417112299467,
"loss": 0.6259,
"step": 19
},
{
"epoch": 0.020005626582476323,
"grad_norm": 0.2561822235584259,
"learning_rate": 0.00019899732620320856,
"loss": 0.6099,
"step": 20
},
{
"epoch": 0.021005907911600136,
"grad_norm": 0.22108854353427887,
"learning_rate": 0.00019893048128342245,
"loss": 0.5994,
"step": 21
},
{
"epoch": 0.022006189240723954,
"grad_norm": 0.19824647903442383,
"learning_rate": 0.00019886363636363637,
"loss": 0.6077,
"step": 22
},
{
"epoch": 0.02300647056984777,
"grad_norm": 0.1788371354341507,
"learning_rate": 0.0001987967914438503,
"loss": 0.5628,
"step": 23
},
{
"epoch": 0.024006751898971584,
"grad_norm": 0.1805984526872635,
"learning_rate": 0.00019872994652406418,
"loss": 0.5898,
"step": 24
},
{
"epoch": 0.0250070332280954,
"grad_norm": 0.17772608995437622,
"learning_rate": 0.00019866310160427807,
"loss": 0.5735,
"step": 25
},
{
"epoch": 0.02600731455721922,
"grad_norm": 0.1585753858089447,
"learning_rate": 0.000198596256684492,
"loss": 0.5745,
"step": 26
},
{
"epoch": 0.027007595886343033,
"grad_norm": 0.16654984652996063,
"learning_rate": 0.0001985294117647059,
"loss": 0.5572,
"step": 27
},
{
"epoch": 0.02800787721546685,
"grad_norm": 0.1624041497707367,
"learning_rate": 0.0001984625668449198,
"loss": 0.5625,
"step": 28
},
{
"epoch": 0.029008158544590667,
"grad_norm": 0.17912402749061584,
"learning_rate": 0.0001983957219251337,
"loss": 0.5895,
"step": 29
},
{
"epoch": 0.03000843987371448,
"grad_norm": 0.16431595385074615,
"learning_rate": 0.0001983288770053476,
"loss": 0.5918,
"step": 30
},
{
"epoch": 0.031008721202838298,
"grad_norm": 0.1544601172208786,
"learning_rate": 0.00019826203208556152,
"loss": 0.5237,
"step": 31
},
{
"epoch": 0.032009002531962115,
"grad_norm": 0.17625634372234344,
"learning_rate": 0.00019819518716577541,
"loss": 0.5583,
"step": 32
},
{
"epoch": 0.03300928386108593,
"grad_norm": 0.15690909326076508,
"learning_rate": 0.0001981283422459893,
"loss": 0.5599,
"step": 33
},
{
"epoch": 0.03400956519020975,
"grad_norm": 0.15410259366035461,
"learning_rate": 0.00019806149732620322,
"loss": 0.5537,
"step": 34
},
{
"epoch": 0.03500984651933356,
"grad_norm": 0.1651620715856552,
"learning_rate": 0.0001979946524064171,
"loss": 0.542,
"step": 35
},
{
"epoch": 0.03601012784845738,
"grad_norm": 0.16008822619915009,
"learning_rate": 0.00019792780748663103,
"loss": 0.5579,
"step": 36
},
{
"epoch": 0.037010409177581194,
"grad_norm": 0.16451336443424225,
"learning_rate": 0.00019786096256684492,
"loss": 0.5494,
"step": 37
},
{
"epoch": 0.03801069050670501,
"grad_norm": 0.16137196123600006,
"learning_rate": 0.00019779411764705884,
"loss": 0.5308,
"step": 38
},
{
"epoch": 0.03901097183582883,
"grad_norm": 0.16389496624469757,
"learning_rate": 0.00019772727272727273,
"loss": 0.5785,
"step": 39
},
{
"epoch": 0.040011253164952645,
"grad_norm": 0.17476366460323334,
"learning_rate": 0.00019766042780748665,
"loss": 0.5455,
"step": 40
},
{
"epoch": 0.041011534494076456,
"grad_norm": 0.1790967434644699,
"learning_rate": 0.00019759358288770054,
"loss": 0.5334,
"step": 41
},
{
"epoch": 0.04201181582320027,
"grad_norm": 0.15343967080116272,
"learning_rate": 0.00019752673796791446,
"loss": 0.5078,
"step": 42
},
{
"epoch": 0.04301209715232409,
"grad_norm": 0.16167956590652466,
"learning_rate": 0.00019745989304812835,
"loss": 0.5199,
"step": 43
},
{
"epoch": 0.04401237848144791,
"grad_norm": 0.15342746675014496,
"learning_rate": 0.00019739304812834224,
"loss": 0.5214,
"step": 44
},
{
"epoch": 0.045012659810571724,
"grad_norm": 0.16108684241771698,
"learning_rate": 0.00019732620320855616,
"loss": 0.5073,
"step": 45
},
{
"epoch": 0.04601294113969554,
"grad_norm": 0.1659533828496933,
"learning_rate": 0.00019725935828877008,
"loss": 0.5285,
"step": 46
},
{
"epoch": 0.04701322246881936,
"grad_norm": 0.17120051383972168,
"learning_rate": 0.00019719251336898397,
"loss": 0.5175,
"step": 47
},
{
"epoch": 0.04801350379794317,
"grad_norm": 0.162173792719841,
"learning_rate": 0.00019712566844919786,
"loss": 0.5002,
"step": 48
},
{
"epoch": 0.049013785127066986,
"grad_norm": 0.17042332887649536,
"learning_rate": 0.00019705882352941177,
"loss": 0.5096,
"step": 49
},
{
"epoch": 0.0500140664561908,
"grad_norm": 0.16357912123203278,
"learning_rate": 0.0001969919786096257,
"loss": 0.5277,
"step": 50
},
{
"epoch": 0.05101434778531462,
"grad_norm": 0.18239375948905945,
"learning_rate": 0.00019692513368983958,
"loss": 0.5471,
"step": 51
},
{
"epoch": 0.05201462911443844,
"grad_norm": 0.16548025608062744,
"learning_rate": 0.00019685828877005347,
"loss": 0.5236,
"step": 52
},
{
"epoch": 0.053014910443562255,
"grad_norm": 0.16912941634655,
"learning_rate": 0.0001967914438502674,
"loss": 0.5052,
"step": 53
},
{
"epoch": 0.054015191772686065,
"grad_norm": 0.16741348803043365,
"learning_rate": 0.0001967245989304813,
"loss": 0.5248,
"step": 54
},
{
"epoch": 0.05501547310180988,
"grad_norm": 0.17378926277160645,
"learning_rate": 0.0001966577540106952,
"loss": 0.517,
"step": 55
},
{
"epoch": 0.0560157544309337,
"grad_norm": 0.17014221847057343,
"learning_rate": 0.0001965909090909091,
"loss": 0.5027,
"step": 56
},
{
"epoch": 0.05701603576005752,
"grad_norm": 0.17511023581027985,
"learning_rate": 0.000196524064171123,
"loss": 0.5288,
"step": 57
},
{
"epoch": 0.058016317089181334,
"grad_norm": 0.16974779963493347,
"learning_rate": 0.0001964572192513369,
"loss": 0.5045,
"step": 58
},
{
"epoch": 0.05901659841830515,
"grad_norm": 0.17105549573898315,
"learning_rate": 0.00019639037433155082,
"loss": 0.5224,
"step": 59
},
{
"epoch": 0.06001687974742896,
"grad_norm": 0.1767175942659378,
"learning_rate": 0.0001963235294117647,
"loss": 0.5308,
"step": 60
},
{
"epoch": 0.06101716107655278,
"grad_norm": 0.17822648584842682,
"learning_rate": 0.00019625668449197863,
"loss": 0.5035,
"step": 61
},
{
"epoch": 0.062017442405676595,
"grad_norm": 0.2005096822977066,
"learning_rate": 0.00019618983957219252,
"loss": 0.5319,
"step": 62
},
{
"epoch": 0.06301772373480041,
"grad_norm": 0.1792004257440567,
"learning_rate": 0.00019612299465240644,
"loss": 0.5202,
"step": 63
},
{
"epoch": 0.06401800506392423,
"grad_norm": 0.1759466826915741,
"learning_rate": 0.00019605614973262033,
"loss": 0.512,
"step": 64
},
{
"epoch": 0.06501828639304805,
"grad_norm": 0.18015748262405396,
"learning_rate": 0.00019598930481283424,
"loss": 0.5102,
"step": 65
},
{
"epoch": 0.06601856772217186,
"grad_norm": 0.16892513632774353,
"learning_rate": 0.00019592245989304814,
"loss": 0.5012,
"step": 66
},
{
"epoch": 0.06701884905129568,
"grad_norm": 0.20100218057632446,
"learning_rate": 0.00019585561497326203,
"loss": 0.5118,
"step": 67
},
{
"epoch": 0.0680191303804195,
"grad_norm": 0.19578860700130463,
"learning_rate": 0.00019578877005347594,
"loss": 0.5135,
"step": 68
},
{
"epoch": 0.06901941170954332,
"grad_norm": 0.17195583879947662,
"learning_rate": 0.00019572192513368986,
"loss": 0.5168,
"step": 69
},
{
"epoch": 0.07001969303866712,
"grad_norm": 0.17638365924358368,
"learning_rate": 0.00019565508021390375,
"loss": 0.4727,
"step": 70
},
{
"epoch": 0.07101997436779094,
"grad_norm": 0.18563459813594818,
"learning_rate": 0.00019558823529411764,
"loss": 0.5075,
"step": 71
},
{
"epoch": 0.07202025569691475,
"grad_norm": 0.18360653519630432,
"learning_rate": 0.00019552139037433156,
"loss": 0.491,
"step": 72
},
{
"epoch": 0.07302053702603857,
"grad_norm": 0.19104507565498352,
"learning_rate": 0.00019545454545454548,
"loss": 0.4864,
"step": 73
},
{
"epoch": 0.07402081835516239,
"grad_norm": 0.1746816188097,
"learning_rate": 0.00019538770053475937,
"loss": 0.5038,
"step": 74
},
{
"epoch": 0.0750210996842862,
"grad_norm": 0.17216187715530396,
"learning_rate": 0.00019532085561497326,
"loss": 0.4995,
"step": 75
},
{
"epoch": 0.07602138101341002,
"grad_norm": 0.19356052577495575,
"learning_rate": 0.00019525401069518718,
"loss": 0.4995,
"step": 76
},
{
"epoch": 0.07702166234253384,
"grad_norm": 0.17926344275474548,
"learning_rate": 0.00019518716577540107,
"loss": 0.4734,
"step": 77
},
{
"epoch": 0.07802194367165766,
"grad_norm": 0.18571226298809052,
"learning_rate": 0.000195120320855615,
"loss": 0.5089,
"step": 78
},
{
"epoch": 0.07902222500078147,
"grad_norm": 0.18840278685092926,
"learning_rate": 0.00019505347593582888,
"loss": 0.4905,
"step": 79
},
{
"epoch": 0.08002250632990529,
"grad_norm": 0.18464742600917816,
"learning_rate": 0.0001949866310160428,
"loss": 0.5073,
"step": 80
},
{
"epoch": 0.08102278765902911,
"grad_norm": 0.1761467307806015,
"learning_rate": 0.0001949197860962567,
"loss": 0.4837,
"step": 81
},
{
"epoch": 0.08202306898815291,
"grad_norm": 0.19362109899520874,
"learning_rate": 0.0001948529411764706,
"loss": 0.4807,
"step": 82
},
{
"epoch": 0.08302335031727673,
"grad_norm": 0.17530831694602966,
"learning_rate": 0.0001947860962566845,
"loss": 0.4904,
"step": 83
},
{
"epoch": 0.08402363164640055,
"grad_norm": 0.1803610920906067,
"learning_rate": 0.00019471925133689841,
"loss": 0.4909,
"step": 84
},
{
"epoch": 0.08502391297552436,
"grad_norm": 0.19175425171852112,
"learning_rate": 0.0001946524064171123,
"loss": 0.4839,
"step": 85
},
{
"epoch": 0.08602419430464818,
"grad_norm": 0.17780834436416626,
"learning_rate": 0.0001945855614973262,
"loss": 0.4627,
"step": 86
},
{
"epoch": 0.087024475633772,
"grad_norm": 0.18597464263439178,
"learning_rate": 0.0001945187165775401,
"loss": 0.4939,
"step": 87
},
{
"epoch": 0.08802475696289581,
"grad_norm": 0.1801149696111679,
"learning_rate": 0.00019445187165775403,
"loss": 0.4737,
"step": 88
},
{
"epoch": 0.08902503829201963,
"grad_norm": 0.18139155209064484,
"learning_rate": 0.00019438502673796792,
"loss": 0.4891,
"step": 89
},
{
"epoch": 0.09002531962114345,
"grad_norm": 0.1813085973262787,
"learning_rate": 0.0001943181818181818,
"loss": 0.4949,
"step": 90
},
{
"epoch": 0.09102560095026727,
"grad_norm": 0.1852802038192749,
"learning_rate": 0.00019425133689839573,
"loss": 0.4898,
"step": 91
},
{
"epoch": 0.09202588227939108,
"grad_norm": 0.19154123961925507,
"learning_rate": 0.00019418449197860965,
"loss": 0.4524,
"step": 92
},
{
"epoch": 0.0930261636085149,
"grad_norm": 0.1693875789642334,
"learning_rate": 0.00019411764705882354,
"loss": 0.4771,
"step": 93
},
{
"epoch": 0.09402644493763872,
"grad_norm": 0.1709897220134735,
"learning_rate": 0.00019405080213903743,
"loss": 0.5056,
"step": 94
},
{
"epoch": 0.09502672626676252,
"grad_norm": 0.19264625012874603,
"learning_rate": 0.00019398395721925135,
"loss": 0.5083,
"step": 95
},
{
"epoch": 0.09602700759588634,
"grad_norm": 0.18185105919837952,
"learning_rate": 0.00019391711229946527,
"loss": 0.4955,
"step": 96
},
{
"epoch": 0.09702728892501016,
"grad_norm": 0.18781577050685883,
"learning_rate": 0.00019385026737967916,
"loss": 0.5029,
"step": 97
},
{
"epoch": 0.09802757025413397,
"grad_norm": 0.18748541176319122,
"learning_rate": 0.00019378342245989305,
"loss": 0.5011,
"step": 98
},
{
"epoch": 0.09902785158325779,
"grad_norm": 0.17571064829826355,
"learning_rate": 0.00019371657754010697,
"loss": 0.4586,
"step": 99
},
{
"epoch": 0.1000281329123816,
"grad_norm": 0.1871766448020935,
"learning_rate": 0.00019364973262032086,
"loss": 0.4766,
"step": 100
},
{
"epoch": 0.10102841424150542,
"grad_norm": 0.1704852133989334,
"learning_rate": 0.00019358288770053477,
"loss": 0.4922,
"step": 101
},
{
"epoch": 0.10202869557062924,
"grad_norm": 0.18065835535526276,
"learning_rate": 0.00019351604278074867,
"loss": 0.4902,
"step": 102
},
{
"epoch": 0.10302897689975306,
"grad_norm": 0.18194100260734558,
"learning_rate": 0.00019344919786096258,
"loss": 0.4736,
"step": 103
},
{
"epoch": 0.10402925822887688,
"grad_norm": 0.1811995506286621,
"learning_rate": 0.00019338235294117647,
"loss": 0.486,
"step": 104
},
{
"epoch": 0.10502953955800069,
"grad_norm": 0.194939985871315,
"learning_rate": 0.0001933155080213904,
"loss": 0.4971,
"step": 105
},
{
"epoch": 0.10602982088712451,
"grad_norm": 0.18497943878173828,
"learning_rate": 0.00019324866310160428,
"loss": 0.4832,
"step": 106
},
{
"epoch": 0.10703010221624833,
"grad_norm": 0.1584584265947342,
"learning_rate": 0.0001931818181818182,
"loss": 0.4884,
"step": 107
},
{
"epoch": 0.10803038354537213,
"grad_norm": 0.1716640740633011,
"learning_rate": 0.0001931149732620321,
"loss": 0.5107,
"step": 108
},
{
"epoch": 0.10903066487449595,
"grad_norm": 0.17184816300868988,
"learning_rate": 0.00019304812834224598,
"loss": 0.4865,
"step": 109
},
{
"epoch": 0.11003094620361976,
"grad_norm": 0.1761111319065094,
"learning_rate": 0.0001929812834224599,
"loss": 0.4739,
"step": 110
},
{
"epoch": 0.11103122753274358,
"grad_norm": 0.17194178700447083,
"learning_rate": 0.00019291443850267382,
"loss": 0.4706,
"step": 111
},
{
"epoch": 0.1120315088618674,
"grad_norm": 0.18889540433883667,
"learning_rate": 0.0001928475935828877,
"loss": 0.4582,
"step": 112
},
{
"epoch": 0.11303179019099122,
"grad_norm": 0.20257653295993805,
"learning_rate": 0.0001927807486631016,
"loss": 0.4735,
"step": 113
},
{
"epoch": 0.11403207152011503,
"grad_norm": 0.17461931705474854,
"learning_rate": 0.00019271390374331552,
"loss": 0.4529,
"step": 114
},
{
"epoch": 0.11503235284923885,
"grad_norm": 0.1717446744441986,
"learning_rate": 0.00019264705882352944,
"loss": 0.4538,
"step": 115
},
{
"epoch": 0.11603263417836267,
"grad_norm": 0.1839551031589508,
"learning_rate": 0.00019258021390374333,
"loss": 0.4822,
"step": 116
},
{
"epoch": 0.11703291550748648,
"grad_norm": 0.19117318093776703,
"learning_rate": 0.00019251336898395722,
"loss": 0.4741,
"step": 117
},
{
"epoch": 0.1180331968366103,
"grad_norm": 0.17903394997119904,
"learning_rate": 0.00019244652406417114,
"loss": 0.4755,
"step": 118
},
{
"epoch": 0.11903347816573412,
"grad_norm": 0.18842382729053497,
"learning_rate": 0.00019237967914438503,
"loss": 0.4995,
"step": 119
},
{
"epoch": 0.12003375949485792,
"grad_norm": 0.18437859416007996,
"learning_rate": 0.00019231283422459894,
"loss": 0.5088,
"step": 120
},
{
"epoch": 0.12103404082398174,
"grad_norm": 0.17745694518089294,
"learning_rate": 0.00019224598930481283,
"loss": 0.4565,
"step": 121
},
{
"epoch": 0.12203432215310556,
"grad_norm": 0.17243270576000214,
"learning_rate": 0.00019217914438502675,
"loss": 0.4642,
"step": 122
},
{
"epoch": 0.12303460348222937,
"grad_norm": 0.2010149359703064,
"learning_rate": 0.00019211229946524064,
"loss": 0.4872,
"step": 123
},
{
"epoch": 0.12403488481135319,
"grad_norm": 0.19740119576454163,
"learning_rate": 0.00019204545454545456,
"loss": 0.5071,
"step": 124
},
{
"epoch": 0.12503516614047702,
"grad_norm": 0.17282332479953766,
"learning_rate": 0.00019197860962566845,
"loss": 0.4748,
"step": 125
},
{
"epoch": 0.12603544746960083,
"grad_norm": 0.2022191882133484,
"learning_rate": 0.00019191176470588237,
"loss": 0.4593,
"step": 126
},
{
"epoch": 0.12703572879872463,
"grad_norm": 0.20872025191783905,
"learning_rate": 0.00019184491978609626,
"loss": 0.4573,
"step": 127
},
{
"epoch": 0.12803601012784846,
"grad_norm": 0.17185282707214355,
"learning_rate": 0.00019177807486631015,
"loss": 0.4589,
"step": 128
},
{
"epoch": 0.12903629145697226,
"grad_norm": 0.20027658343315125,
"learning_rate": 0.00019171122994652407,
"loss": 0.4501,
"step": 129
},
{
"epoch": 0.1300365727860961,
"grad_norm": 0.20652532577514648,
"learning_rate": 0.000191644385026738,
"loss": 0.4961,
"step": 130
},
{
"epoch": 0.1310368541152199,
"grad_norm": 0.17151765525341034,
"learning_rate": 0.00019157754010695188,
"loss": 0.455,
"step": 131
},
{
"epoch": 0.13203713544434373,
"grad_norm": 0.1886252760887146,
"learning_rate": 0.00019151069518716577,
"loss": 0.486,
"step": 132
},
{
"epoch": 0.13303741677346753,
"grad_norm": 0.22897754609584808,
"learning_rate": 0.0001914438502673797,
"loss": 0.4953,
"step": 133
},
{
"epoch": 0.13403769810259136,
"grad_norm": 0.18379279971122742,
"learning_rate": 0.0001913770053475936,
"loss": 0.4931,
"step": 134
},
{
"epoch": 0.13503797943171517,
"grad_norm": 0.18525294959545135,
"learning_rate": 0.0001913101604278075,
"loss": 0.4543,
"step": 135
},
{
"epoch": 0.136038260760839,
"grad_norm": 0.17013776302337646,
"learning_rate": 0.0001912433155080214,
"loss": 0.4787,
"step": 136
},
{
"epoch": 0.1370385420899628,
"grad_norm": 0.1702297180891037,
"learning_rate": 0.0001911764705882353,
"loss": 0.4459,
"step": 137
},
{
"epoch": 0.13803882341908663,
"grad_norm": 0.17295871675014496,
"learning_rate": 0.00019110962566844922,
"loss": 0.4694,
"step": 138
},
{
"epoch": 0.13903910474821043,
"grad_norm": 0.16555902361869812,
"learning_rate": 0.0001910427807486631,
"loss": 0.4486,
"step": 139
},
{
"epoch": 0.14003938607733424,
"grad_norm": 0.1774980127811432,
"learning_rate": 0.000190975935828877,
"loss": 0.4728,
"step": 140
},
{
"epoch": 0.14103966740645807,
"grad_norm": 0.19104857742786407,
"learning_rate": 0.00019090909090909092,
"loss": 0.4583,
"step": 141
},
{
"epoch": 0.14203994873558187,
"grad_norm": 0.194419264793396,
"learning_rate": 0.0001908422459893048,
"loss": 0.4473,
"step": 142
},
{
"epoch": 0.1430402300647057,
"grad_norm": 0.18389761447906494,
"learning_rate": 0.00019077540106951873,
"loss": 0.5063,
"step": 143
},
{
"epoch": 0.1440405113938295,
"grad_norm": 0.1690511256456375,
"learning_rate": 0.00019070855614973262,
"loss": 0.4406,
"step": 144
},
{
"epoch": 0.14504079272295334,
"grad_norm": 0.18417517840862274,
"learning_rate": 0.00019064171122994654,
"loss": 0.5078,
"step": 145
},
{
"epoch": 0.14604107405207714,
"grad_norm": 0.1713017076253891,
"learning_rate": 0.00019057486631016043,
"loss": 0.493,
"step": 146
},
{
"epoch": 0.14704135538120097,
"grad_norm": 0.18398739397525787,
"learning_rate": 0.00019050802139037435,
"loss": 0.4594,
"step": 147
},
{
"epoch": 0.14804163671032478,
"grad_norm": 0.1759570688009262,
"learning_rate": 0.00019044117647058824,
"loss": 0.4714,
"step": 148
},
{
"epoch": 0.1490419180394486,
"grad_norm": 0.17486704885959625,
"learning_rate": 0.00019037433155080216,
"loss": 0.4855,
"step": 149
},
{
"epoch": 0.1500421993685724,
"grad_norm": 0.16296333074569702,
"learning_rate": 0.00019030748663101605,
"loss": 0.4766,
"step": 150
},
{
"epoch": 0.15104248069769624,
"grad_norm": 0.176128551363945,
"learning_rate": 0.00019024064171122994,
"loss": 0.4495,
"step": 151
},
{
"epoch": 0.15204276202682004,
"grad_norm": 0.191524475812912,
"learning_rate": 0.00019017379679144388,
"loss": 0.5134,
"step": 152
},
{
"epoch": 0.15304304335594385,
"grad_norm": 0.1700233370065689,
"learning_rate": 0.00019010695187165777,
"loss": 0.4567,
"step": 153
},
{
"epoch": 0.15404332468506768,
"grad_norm": 0.20025531947612762,
"learning_rate": 0.00019004010695187167,
"loss": 0.4878,
"step": 154
},
{
"epoch": 0.15504360601419148,
"grad_norm": 0.19142255187034607,
"learning_rate": 0.00018997326203208556,
"loss": 0.5049,
"step": 155
},
{
"epoch": 0.1560438873433153,
"grad_norm": 0.18157531321048737,
"learning_rate": 0.00018990641711229947,
"loss": 0.4678,
"step": 156
},
{
"epoch": 0.15704416867243912,
"grad_norm": 0.1831781417131424,
"learning_rate": 0.0001898395721925134,
"loss": 0.4737,
"step": 157
},
{
"epoch": 0.15804445000156295,
"grad_norm": 0.18358385562896729,
"learning_rate": 0.00018977272727272728,
"loss": 0.4813,
"step": 158
},
{
"epoch": 0.15904473133068675,
"grad_norm": 0.18451449275016785,
"learning_rate": 0.00018970588235294117,
"loss": 0.4726,
"step": 159
},
{
"epoch": 0.16004501265981058,
"grad_norm": 0.17466507852077484,
"learning_rate": 0.0001896390374331551,
"loss": 0.4616,
"step": 160
},
{
"epoch": 0.16104529398893438,
"grad_norm": 0.17851275205612183,
"learning_rate": 0.000189572192513369,
"loss": 0.4755,
"step": 161
},
{
"epoch": 0.16204557531805822,
"grad_norm": 0.17984986305236816,
"learning_rate": 0.0001895053475935829,
"loss": 0.4842,
"step": 162
},
{
"epoch": 0.16304585664718202,
"grad_norm": 0.1726839244365692,
"learning_rate": 0.0001894385026737968,
"loss": 0.4654,
"step": 163
},
{
"epoch": 0.16404613797630582,
"grad_norm": 0.1909494400024414,
"learning_rate": 0.0001893716577540107,
"loss": 0.466,
"step": 164
},
{
"epoch": 0.16504641930542965,
"grad_norm": 0.16181348264217377,
"learning_rate": 0.0001893048128342246,
"loss": 0.444,
"step": 165
},
{
"epoch": 0.16604670063455346,
"grad_norm": 0.17508991062641144,
"learning_rate": 0.00018923796791443852,
"loss": 0.451,
"step": 166
},
{
"epoch": 0.1670469819636773,
"grad_norm": 0.1983264833688736,
"learning_rate": 0.0001891711229946524,
"loss": 0.4763,
"step": 167
},
{
"epoch": 0.1680472632928011,
"grad_norm": 0.16887639462947845,
"learning_rate": 0.00018910427807486633,
"loss": 0.4481,
"step": 168
},
{
"epoch": 0.16904754462192492,
"grad_norm": 0.18914102017879486,
"learning_rate": 0.00018903743315508022,
"loss": 0.4888,
"step": 169
},
{
"epoch": 0.17004782595104873,
"grad_norm": 0.1777648627758026,
"learning_rate": 0.00018897058823529413,
"loss": 0.4541,
"step": 170
},
{
"epoch": 0.17104810728017256,
"grad_norm": 0.17672370374202728,
"learning_rate": 0.00018890374331550803,
"loss": 0.4518,
"step": 171
},
{
"epoch": 0.17204838860929636,
"grad_norm": 0.19094176590442657,
"learning_rate": 0.00018883689839572194,
"loss": 0.4672,
"step": 172
},
{
"epoch": 0.1730486699384202,
"grad_norm": 0.18507908284664154,
"learning_rate": 0.00018877005347593583,
"loss": 0.465,
"step": 173
},
{
"epoch": 0.174048951267544,
"grad_norm": 0.17785289883613586,
"learning_rate": 0.00018870320855614973,
"loss": 0.4934,
"step": 174
},
{
"epoch": 0.17504923259666783,
"grad_norm": 0.1623869389295578,
"learning_rate": 0.00018863636363636364,
"loss": 0.4614,
"step": 175
},
{
"epoch": 0.17604951392579163,
"grad_norm": 0.1781870424747467,
"learning_rate": 0.00018856951871657756,
"loss": 0.4428,
"step": 176
},
{
"epoch": 0.17704979525491543,
"grad_norm": 0.1758892834186554,
"learning_rate": 0.00018850267379679145,
"loss": 0.459,
"step": 177
},
{
"epoch": 0.17805007658403926,
"grad_norm": 0.16301238536834717,
"learning_rate": 0.00018843582887700534,
"loss": 0.4655,
"step": 178
},
{
"epoch": 0.17905035791316307,
"grad_norm": 0.1834089159965515,
"learning_rate": 0.00018836898395721926,
"loss": 0.4371,
"step": 179
},
{
"epoch": 0.1800506392422869,
"grad_norm": 0.19042764604091644,
"learning_rate": 0.00018830213903743318,
"loss": 0.443,
"step": 180
},
{
"epoch": 0.1810509205714107,
"grad_norm": 0.18761961162090302,
"learning_rate": 0.00018823529411764707,
"loss": 0.4698,
"step": 181
},
{
"epoch": 0.18205120190053453,
"grad_norm": 0.18886564671993256,
"learning_rate": 0.00018816844919786096,
"loss": 0.4751,
"step": 182
},
{
"epoch": 0.18305148322965833,
"grad_norm": 0.17123280465602875,
"learning_rate": 0.00018810160427807488,
"loss": 0.4504,
"step": 183
},
{
"epoch": 0.18405176455878217,
"grad_norm": 0.19330959022045135,
"learning_rate": 0.00018803475935828877,
"loss": 0.4919,
"step": 184
},
{
"epoch": 0.18505204588790597,
"grad_norm": 0.18290665745735168,
"learning_rate": 0.0001879679144385027,
"loss": 0.4474,
"step": 185
},
{
"epoch": 0.1860523272170298,
"grad_norm": 0.18846595287322998,
"learning_rate": 0.00018790106951871658,
"loss": 0.4569,
"step": 186
},
{
"epoch": 0.1870526085461536,
"grad_norm": 0.1864161491394043,
"learning_rate": 0.0001878342245989305,
"loss": 0.4618,
"step": 187
},
{
"epoch": 0.18805288987527743,
"grad_norm": 0.1784975528717041,
"learning_rate": 0.00018776737967914439,
"loss": 0.4541,
"step": 188
},
{
"epoch": 0.18905317120440124,
"grad_norm": 0.18015554547309875,
"learning_rate": 0.0001877005347593583,
"loss": 0.4566,
"step": 189
},
{
"epoch": 0.19005345253352504,
"grad_norm": 0.17766129970550537,
"learning_rate": 0.0001876336898395722,
"loss": 0.4825,
"step": 190
},
{
"epoch": 0.19105373386264887,
"grad_norm": 0.173505499958992,
"learning_rate": 0.0001875668449197861,
"loss": 0.4657,
"step": 191
},
{
"epoch": 0.19205401519177268,
"grad_norm": 0.17310304939746857,
"learning_rate": 0.0001875,
"loss": 0.4356,
"step": 192
},
{
"epoch": 0.1930542965208965,
"grad_norm": 0.19145220518112183,
"learning_rate": 0.0001874331550802139,
"loss": 0.4787,
"step": 193
},
{
"epoch": 0.1940545778500203,
"grad_norm": 0.17684215307235718,
"learning_rate": 0.00018736631016042784,
"loss": 0.4627,
"step": 194
},
{
"epoch": 0.19505485917914414,
"grad_norm": 0.18631704151630402,
"learning_rate": 0.00018729946524064173,
"loss": 0.447,
"step": 195
},
{
"epoch": 0.19605514050826794,
"grad_norm": 0.19067594408988953,
"learning_rate": 0.00018723262032085562,
"loss": 0.4646,
"step": 196
},
{
"epoch": 0.19705542183739178,
"grad_norm": 0.17477108538150787,
"learning_rate": 0.0001871657754010695,
"loss": 0.4656,
"step": 197
},
{
"epoch": 0.19805570316651558,
"grad_norm": 0.18243315815925598,
"learning_rate": 0.00018709893048128343,
"loss": 0.4692,
"step": 198
},
{
"epoch": 0.1990559844956394,
"grad_norm": 0.17050983011722565,
"learning_rate": 0.00018703208556149735,
"loss": 0.4406,
"step": 199
},
{
"epoch": 0.2000562658247632,
"grad_norm": 0.17637942731380463,
"learning_rate": 0.00018696524064171124,
"loss": 0.455,
"step": 200
},
{
"epoch": 0.20105654715388704,
"grad_norm": 0.19478300213813782,
"learning_rate": 0.00018689839572192513,
"loss": 0.44,
"step": 201
},
{
"epoch": 0.20205682848301085,
"grad_norm": 0.17724725604057312,
"learning_rate": 0.00018683155080213905,
"loss": 0.4577,
"step": 202
},
{
"epoch": 0.20305710981213465,
"grad_norm": 0.18279367685317993,
"learning_rate": 0.00018676470588235297,
"loss": 0.4457,
"step": 203
},
{
"epoch": 0.20405739114125848,
"grad_norm": 0.19004391133785248,
"learning_rate": 0.00018669786096256686,
"loss": 0.4398,
"step": 204
},
{
"epoch": 0.20505767247038229,
"grad_norm": 0.20774249732494354,
"learning_rate": 0.00018663101604278075,
"loss": 0.4504,
"step": 205
},
{
"epoch": 0.20605795379950612,
"grad_norm": 0.18987542390823364,
"learning_rate": 0.00018656417112299467,
"loss": 0.4714,
"step": 206
},
{
"epoch": 0.20705823512862992,
"grad_norm": 0.2253541797399521,
"learning_rate": 0.00018649732620320856,
"loss": 0.4674,
"step": 207
},
{
"epoch": 0.20805851645775375,
"grad_norm": 0.19410820305347443,
"learning_rate": 0.00018643048128342247,
"loss": 0.4561,
"step": 208
},
{
"epoch": 0.20905879778687755,
"grad_norm": 0.17445877194404602,
"learning_rate": 0.00018636363636363636,
"loss": 0.4272,
"step": 209
},
{
"epoch": 0.21005907911600138,
"grad_norm": 0.18313977122306824,
"learning_rate": 0.00018629679144385028,
"loss": 0.4982,
"step": 210
},
{
"epoch": 0.2110593604451252,
"grad_norm": 0.1821029633283615,
"learning_rate": 0.00018622994652406417,
"loss": 0.4556,
"step": 211
},
{
"epoch": 0.21205964177424902,
"grad_norm": 0.1718428134918213,
"learning_rate": 0.0001861631016042781,
"loss": 0.4434,
"step": 212
},
{
"epoch": 0.21305992310337282,
"grad_norm": 0.17503920197486877,
"learning_rate": 0.000186096256684492,
"loss": 0.4518,
"step": 213
},
{
"epoch": 0.21406020443249665,
"grad_norm": 0.19129222631454468,
"learning_rate": 0.0001860294117647059,
"loss": 0.4337,
"step": 214
},
{
"epoch": 0.21506048576162046,
"grad_norm": 0.18414440751075745,
"learning_rate": 0.0001859625668449198,
"loss": 0.4476,
"step": 215
},
{
"epoch": 0.21606076709074426,
"grad_norm": 0.1825624704360962,
"learning_rate": 0.00018589572192513368,
"loss": 0.4602,
"step": 216
},
{
"epoch": 0.2170610484198681,
"grad_norm": 0.20369993150234222,
"learning_rate": 0.0001858288770053476,
"loss": 0.449,
"step": 217
},
{
"epoch": 0.2180613297489919,
"grad_norm": 0.18359532952308655,
"learning_rate": 0.00018576203208556152,
"loss": 0.4717,
"step": 218
},
{
"epoch": 0.21906161107811573,
"grad_norm": 0.1811211258172989,
"learning_rate": 0.0001856951871657754,
"loss": 0.4502,
"step": 219
},
{
"epoch": 0.22006189240723953,
"grad_norm": 0.1943439245223999,
"learning_rate": 0.0001856283422459893,
"loss": 0.4358,
"step": 220
},
{
"epoch": 0.22106217373636336,
"grad_norm": 0.17112450301647186,
"learning_rate": 0.00018556149732620322,
"loss": 0.4379,
"step": 221
},
{
"epoch": 0.22206245506548716,
"grad_norm": 0.17877739667892456,
"learning_rate": 0.00018549465240641713,
"loss": 0.4666,
"step": 222
},
{
"epoch": 0.223062736394611,
"grad_norm": 0.1775282621383667,
"learning_rate": 0.00018542780748663103,
"loss": 0.4614,
"step": 223
},
{
"epoch": 0.2240630177237348,
"grad_norm": 0.2399931252002716,
"learning_rate": 0.00018536096256684492,
"loss": 0.469,
"step": 224
},
{
"epoch": 0.22506329905285863,
"grad_norm": 0.18117383122444153,
"learning_rate": 0.00018529411764705883,
"loss": 0.4533,
"step": 225
},
{
"epoch": 0.22606358038198243,
"grad_norm": 0.17575931549072266,
"learning_rate": 0.00018522727272727273,
"loss": 0.4488,
"step": 226
},
{
"epoch": 0.22706386171110624,
"grad_norm": 0.1864219754934311,
"learning_rate": 0.00018516042780748664,
"loss": 0.4783,
"step": 227
},
{
"epoch": 0.22806414304023007,
"grad_norm": 0.1910594254732132,
"learning_rate": 0.00018509358288770053,
"loss": 0.457,
"step": 228
},
{
"epoch": 0.22906442436935387,
"grad_norm": 0.19613304734230042,
"learning_rate": 0.00018502673796791445,
"loss": 0.4257,
"step": 229
},
{
"epoch": 0.2300647056984777,
"grad_norm": 0.18423783779144287,
"learning_rate": 0.00018495989304812834,
"loss": 0.4526,
"step": 230
},
{
"epoch": 0.2310649870276015,
"grad_norm": 0.17522463202476501,
"learning_rate": 0.00018489304812834226,
"loss": 0.4154,
"step": 231
},
{
"epoch": 0.23206526835672533,
"grad_norm": 0.1805536448955536,
"learning_rate": 0.00018482620320855615,
"loss": 0.4538,
"step": 232
},
{
"epoch": 0.23306554968584914,
"grad_norm": 0.1862708181142807,
"learning_rate": 0.00018475935828877007,
"loss": 0.4379,
"step": 233
},
{
"epoch": 0.23406583101497297,
"grad_norm": 0.1823825240135193,
"learning_rate": 0.00018469251336898396,
"loss": 0.4555,
"step": 234
},
{
"epoch": 0.23506611234409677,
"grad_norm": 0.19339920580387115,
"learning_rate": 0.00018462566844919785,
"loss": 0.4434,
"step": 235
},
{
"epoch": 0.2360663936732206,
"grad_norm": 0.18247473239898682,
"learning_rate": 0.0001845588235294118,
"loss": 0.4502,
"step": 236
},
{
"epoch": 0.2370666750023444,
"grad_norm": 0.18202941119670868,
"learning_rate": 0.0001844919786096257,
"loss": 0.4523,
"step": 237
},
{
"epoch": 0.23806695633146824,
"grad_norm": 0.16963312029838562,
"learning_rate": 0.00018442513368983958,
"loss": 0.472,
"step": 238
},
{
"epoch": 0.23906723766059204,
"grad_norm": 0.186141699552536,
"learning_rate": 0.00018435828877005347,
"loss": 0.4555,
"step": 239
},
{
"epoch": 0.24006751898971584,
"grad_norm": 0.19084033370018005,
"learning_rate": 0.00018429144385026739,
"loss": 0.4571,
"step": 240
},
{
"epoch": 0.24106780031883968,
"grad_norm": 0.1802280694246292,
"learning_rate": 0.0001842245989304813,
"loss": 0.4394,
"step": 241
},
{
"epoch": 0.24206808164796348,
"grad_norm": 0.17549753189086914,
"learning_rate": 0.0001841577540106952,
"loss": 0.4621,
"step": 242
},
{
"epoch": 0.2430683629770873,
"grad_norm": 0.16524949669837952,
"learning_rate": 0.00018409090909090909,
"loss": 0.4437,
"step": 243
},
{
"epoch": 0.2440686443062111,
"grad_norm": 0.18108424544334412,
"learning_rate": 0.000184024064171123,
"loss": 0.4701,
"step": 244
},
{
"epoch": 0.24506892563533494,
"grad_norm": 0.17770162224769592,
"learning_rate": 0.00018395721925133692,
"loss": 0.4459,
"step": 245
},
{
"epoch": 0.24606920696445875,
"grad_norm": 0.180390864610672,
"learning_rate": 0.0001838903743315508,
"loss": 0.4381,
"step": 246
},
{
"epoch": 0.24706948829358258,
"grad_norm": 0.17641372978687286,
"learning_rate": 0.0001838235294117647,
"loss": 0.4242,
"step": 247
},
{
"epoch": 0.24806976962270638,
"grad_norm": 0.1810798943042755,
"learning_rate": 0.00018375668449197862,
"loss": 0.4744,
"step": 248
},
{
"epoch": 0.2490700509518302,
"grad_norm": 0.18043872714042664,
"learning_rate": 0.0001836898395721925,
"loss": 0.4639,
"step": 249
},
{
"epoch": 0.25007033228095404,
"grad_norm": 0.18257585167884827,
"learning_rate": 0.00018362299465240643,
"loss": 0.4434,
"step": 250
},
{
"epoch": 0.2510706136100778,
"grad_norm": 0.1873181015253067,
"learning_rate": 0.00018355614973262032,
"loss": 0.4416,
"step": 251
},
{
"epoch": 0.25207089493920165,
"grad_norm": 0.19883769750595093,
"learning_rate": 0.00018348930481283424,
"loss": 0.4484,
"step": 252
},
{
"epoch": 0.2530711762683255,
"grad_norm": 0.19196969270706177,
"learning_rate": 0.00018342245989304813,
"loss": 0.4466,
"step": 253
},
{
"epoch": 0.25407145759744926,
"grad_norm": 0.1771295964717865,
"learning_rate": 0.00018335561497326205,
"loss": 0.4452,
"step": 254
},
{
"epoch": 0.2550717389265731,
"grad_norm": 0.17763710021972656,
"learning_rate": 0.00018328877005347597,
"loss": 0.4485,
"step": 255
},
{
"epoch": 0.2560720202556969,
"grad_norm": 0.18344314396381378,
"learning_rate": 0.00018322192513368986,
"loss": 0.4499,
"step": 256
},
{
"epoch": 0.25707230158482075,
"grad_norm": 0.1725986897945404,
"learning_rate": 0.00018315508021390375,
"loss": 0.4582,
"step": 257
},
{
"epoch": 0.2580725829139445,
"grad_norm": 0.179547518491745,
"learning_rate": 0.00018308823529411764,
"loss": 0.4522,
"step": 258
},
{
"epoch": 0.25907286424306836,
"grad_norm": 0.17802830040454865,
"learning_rate": 0.00018302139037433158,
"loss": 0.451,
"step": 259
},
{
"epoch": 0.2600731455721922,
"grad_norm": 0.1829269975423813,
"learning_rate": 0.00018295454545454547,
"loss": 0.494,
"step": 260
},
{
"epoch": 0.261073426901316,
"grad_norm": 0.17635449767112732,
"learning_rate": 0.00018288770053475936,
"loss": 0.4434,
"step": 261
},
{
"epoch": 0.2620737082304398,
"grad_norm": 0.1768188774585724,
"learning_rate": 0.00018282085561497326,
"loss": 0.4262,
"step": 262
},
{
"epoch": 0.2630739895595636,
"grad_norm": 0.16978022456169128,
"learning_rate": 0.00018275401069518717,
"loss": 0.4131,
"step": 263
},
{
"epoch": 0.26407427088868746,
"grad_norm": 0.1829555630683899,
"learning_rate": 0.0001826871657754011,
"loss": 0.4738,
"step": 264
},
{
"epoch": 0.26507455221781123,
"grad_norm": 0.1790144294500351,
"learning_rate": 0.00018262032085561498,
"loss": 0.4591,
"step": 265
},
{
"epoch": 0.26607483354693506,
"grad_norm": 0.16714085638523102,
"learning_rate": 0.00018255347593582887,
"loss": 0.45,
"step": 266
},
{
"epoch": 0.2670751148760589,
"grad_norm": 0.1914021223783493,
"learning_rate": 0.0001824866310160428,
"loss": 0.4452,
"step": 267
},
{
"epoch": 0.2680753962051827,
"grad_norm": 0.1649700105190277,
"learning_rate": 0.0001824197860962567,
"loss": 0.412,
"step": 268
},
{
"epoch": 0.2690756775343065,
"grad_norm": 0.1769041270017624,
"learning_rate": 0.0001823529411764706,
"loss": 0.4507,
"step": 269
},
{
"epoch": 0.27007595886343033,
"grad_norm": 0.17330801486968994,
"learning_rate": 0.0001822860962566845,
"loss": 0.434,
"step": 270
},
{
"epoch": 0.27107624019255416,
"grad_norm": 0.17363230884075165,
"learning_rate": 0.0001822192513368984,
"loss": 0.4284,
"step": 271
},
{
"epoch": 0.272076521521678,
"grad_norm": 0.1773332804441452,
"learning_rate": 0.0001821524064171123,
"loss": 0.4501,
"step": 272
},
{
"epoch": 0.27307680285080177,
"grad_norm": 0.18393732607364655,
"learning_rate": 0.00018208556149732622,
"loss": 0.4541,
"step": 273
},
{
"epoch": 0.2740770841799256,
"grad_norm": 0.1747572124004364,
"learning_rate": 0.0001820187165775401,
"loss": 0.4458,
"step": 274
},
{
"epoch": 0.27507736550904943,
"grad_norm": 0.17748300731182098,
"learning_rate": 0.00018195187165775403,
"loss": 0.4446,
"step": 275
},
{
"epoch": 0.27607764683817326,
"grad_norm": 0.16832216084003448,
"learning_rate": 0.00018188502673796792,
"loss": 0.4394,
"step": 276
},
{
"epoch": 0.27707792816729704,
"grad_norm": 0.1661766767501831,
"learning_rate": 0.00018181818181818183,
"loss": 0.468,
"step": 277
},
{
"epoch": 0.27807820949642087,
"grad_norm": 0.17143194377422333,
"learning_rate": 0.00018175133689839575,
"loss": 0.4391,
"step": 278
},
{
"epoch": 0.2790784908255447,
"grad_norm": 0.18259485065937042,
"learning_rate": 0.00018168449197860964,
"loss": 0.4466,
"step": 279
},
{
"epoch": 0.2800787721546685,
"grad_norm": 0.17107920348644257,
"learning_rate": 0.00018161764705882353,
"loss": 0.4306,
"step": 280
},
{
"epoch": 0.2810790534837923,
"grad_norm": 0.17599613964557648,
"learning_rate": 0.00018155080213903742,
"loss": 0.4699,
"step": 281
},
{
"epoch": 0.28207933481291614,
"grad_norm": 0.1756802499294281,
"learning_rate": 0.00018148395721925134,
"loss": 0.4634,
"step": 282
},
{
"epoch": 0.28307961614203997,
"grad_norm": 0.17890664935112,
"learning_rate": 0.00018141711229946526,
"loss": 0.4705,
"step": 283
},
{
"epoch": 0.28407989747116374,
"grad_norm": 0.17714929580688477,
"learning_rate": 0.00018135026737967915,
"loss": 0.4599,
"step": 284
},
{
"epoch": 0.2850801788002876,
"grad_norm": 0.16694459319114685,
"learning_rate": 0.00018128342245989304,
"loss": 0.4342,
"step": 285
},
{
"epoch": 0.2860804601294114,
"grad_norm": 0.1781182587146759,
"learning_rate": 0.00018121657754010696,
"loss": 0.457,
"step": 286
},
{
"epoch": 0.28708074145853524,
"grad_norm": 0.17375454306602478,
"learning_rate": 0.00018114973262032088,
"loss": 0.4485,
"step": 287
},
{
"epoch": 0.288081022787659,
"grad_norm": 0.17337113618850708,
"learning_rate": 0.00018108288770053477,
"loss": 0.4567,
"step": 288
},
{
"epoch": 0.28908130411678284,
"grad_norm": 0.18448792397975922,
"learning_rate": 0.00018101604278074866,
"loss": 0.4494,
"step": 289
},
{
"epoch": 0.2900815854459067,
"grad_norm": 0.17240051925182343,
"learning_rate": 0.00018094919786096258,
"loss": 0.4401,
"step": 290
},
{
"epoch": 0.29108186677503045,
"grad_norm": 0.16623741388320923,
"learning_rate": 0.00018088235294117647,
"loss": 0.4243,
"step": 291
},
{
"epoch": 0.2920821481041543,
"grad_norm": 0.17461100220680237,
"learning_rate": 0.00018081550802139039,
"loss": 0.4369,
"step": 292
},
{
"epoch": 0.2930824294332781,
"grad_norm": 0.16360893845558167,
"learning_rate": 0.00018074866310160428,
"loss": 0.4424,
"step": 293
},
{
"epoch": 0.29408271076240194,
"grad_norm": 0.17972786724567413,
"learning_rate": 0.0001806818181818182,
"loss": 0.4625,
"step": 294
},
{
"epoch": 0.2950829920915257,
"grad_norm": 0.17173191905021667,
"learning_rate": 0.00018061497326203209,
"loss": 0.4336,
"step": 295
},
{
"epoch": 0.29608327342064955,
"grad_norm": 0.18299077451229095,
"learning_rate": 0.000180548128342246,
"loss": 0.4417,
"step": 296
},
{
"epoch": 0.2970835547497734,
"grad_norm": 0.1699686497449875,
"learning_rate": 0.00018048128342245992,
"loss": 0.4414,
"step": 297
},
{
"epoch": 0.2980838360788972,
"grad_norm": 0.1805889904499054,
"learning_rate": 0.0001804144385026738,
"loss": 0.4583,
"step": 298
},
{
"epoch": 0.299084117408021,
"grad_norm": 0.17077642679214478,
"learning_rate": 0.0001803475935828877,
"loss": 0.4176,
"step": 299
},
{
"epoch": 0.3000843987371448,
"grad_norm": 0.18295934796333313,
"learning_rate": 0.0001802807486631016,
"loss": 0.4667,
"step": 300
},
{
"epoch": 0.30108468006626865,
"grad_norm": 0.17283697426319122,
"learning_rate": 0.00018021390374331554,
"loss": 0.4537,
"step": 301
},
{
"epoch": 0.3020849613953925,
"grad_norm": 0.1809026598930359,
"learning_rate": 0.00018014705882352943,
"loss": 0.4606,
"step": 302
},
{
"epoch": 0.30308524272451626,
"grad_norm": 0.18748724460601807,
"learning_rate": 0.00018008021390374332,
"loss": 0.4711,
"step": 303
},
{
"epoch": 0.3040855240536401,
"grad_norm": 0.1709994077682495,
"learning_rate": 0.0001800133689839572,
"loss": 0.4234,
"step": 304
},
{
"epoch": 0.3050858053827639,
"grad_norm": 0.17212459444999695,
"learning_rate": 0.00017994652406417113,
"loss": 0.4399,
"step": 305
},
{
"epoch": 0.3060860867118877,
"grad_norm": 0.18580351769924164,
"learning_rate": 0.00017987967914438505,
"loss": 0.4528,
"step": 306
},
{
"epoch": 0.3070863680410115,
"grad_norm": 0.1647576242685318,
"learning_rate": 0.00017981283422459894,
"loss": 0.4306,
"step": 307
},
{
"epoch": 0.30808664937013536,
"grad_norm": 0.1691340208053589,
"learning_rate": 0.00017974598930481283,
"loss": 0.4366,
"step": 308
},
{
"epoch": 0.3090869306992592,
"grad_norm": 0.16741327941417694,
"learning_rate": 0.00017967914438502675,
"loss": 0.4165,
"step": 309
},
{
"epoch": 0.31008721202838296,
"grad_norm": 0.1716233640909195,
"learning_rate": 0.00017961229946524066,
"loss": 0.4187,
"step": 310
},
{
"epoch": 0.3110874933575068,
"grad_norm": 0.17521385848522186,
"learning_rate": 0.00017954545454545456,
"loss": 0.4236,
"step": 311
},
{
"epoch": 0.3120877746866306,
"grad_norm": 0.1715785562992096,
"learning_rate": 0.00017947860962566845,
"loss": 0.4273,
"step": 312
},
{
"epoch": 0.31308805601575446,
"grad_norm": 0.18492746353149414,
"learning_rate": 0.00017941176470588236,
"loss": 0.4489,
"step": 313
},
{
"epoch": 0.31408833734487823,
"grad_norm": 0.17954295873641968,
"learning_rate": 0.00017934491978609626,
"loss": 0.4094,
"step": 314
},
{
"epoch": 0.31508861867400206,
"grad_norm": 0.17855146527290344,
"learning_rate": 0.00017927807486631017,
"loss": 0.4233,
"step": 315
},
{
"epoch": 0.3160889000031259,
"grad_norm": 0.17364732921123505,
"learning_rate": 0.00017921122994652406,
"loss": 0.4335,
"step": 316
},
{
"epoch": 0.31708918133224967,
"grad_norm": 0.17259429395198822,
"learning_rate": 0.00017914438502673798,
"loss": 0.4449,
"step": 317
},
{
"epoch": 0.3180894626613735,
"grad_norm": 0.17149266600608826,
"learning_rate": 0.00017907754010695187,
"loss": 0.456,
"step": 318
},
{
"epoch": 0.31908974399049733,
"grad_norm": 0.1876770406961441,
"learning_rate": 0.0001790106951871658,
"loss": 0.4284,
"step": 319
},
{
"epoch": 0.32009002531962116,
"grad_norm": 0.20950675010681152,
"learning_rate": 0.0001789438502673797,
"loss": 0.4351,
"step": 320
},
{
"epoch": 0.32109030664874494,
"grad_norm": 0.16910240054130554,
"learning_rate": 0.0001788770053475936,
"loss": 0.4298,
"step": 321
},
{
"epoch": 0.32209058797786877,
"grad_norm": 0.18063318729400635,
"learning_rate": 0.0001788101604278075,
"loss": 0.4493,
"step": 322
},
{
"epoch": 0.3230908693069926,
"grad_norm": 0.23141996562480927,
"learning_rate": 0.00017874331550802138,
"loss": 0.4466,
"step": 323
},
{
"epoch": 0.32409115063611643,
"grad_norm": 0.18119311332702637,
"learning_rate": 0.0001786764705882353,
"loss": 0.4255,
"step": 324
},
{
"epoch": 0.3250914319652402,
"grad_norm": 0.1791078895330429,
"learning_rate": 0.00017860962566844922,
"loss": 0.4235,
"step": 325
},
{
"epoch": 0.32609171329436404,
"grad_norm": 0.1803896129131317,
"learning_rate": 0.0001785427807486631,
"loss": 0.4035,
"step": 326
},
{
"epoch": 0.32709199462348787,
"grad_norm": 0.19173625111579895,
"learning_rate": 0.000178475935828877,
"loss": 0.4676,
"step": 327
},
{
"epoch": 0.32809227595261165,
"grad_norm": 0.17149046063423157,
"learning_rate": 0.00017840909090909092,
"loss": 0.4358,
"step": 328
},
{
"epoch": 0.3290925572817355,
"grad_norm": 0.17859594523906708,
"learning_rate": 0.00017834224598930483,
"loss": 0.426,
"step": 329
},
{
"epoch": 0.3300928386108593,
"grad_norm": 0.1794040948152542,
"learning_rate": 0.00017827540106951872,
"loss": 0.415,
"step": 330
},
{
"epoch": 0.33109311993998314,
"grad_norm": 0.19304677844047546,
"learning_rate": 0.00017820855614973262,
"loss": 0.4618,
"step": 331
},
{
"epoch": 0.3320934012691069,
"grad_norm": 0.17321263253688812,
"learning_rate": 0.00017814171122994653,
"loss": 0.4568,
"step": 332
},
{
"epoch": 0.33309368259823074,
"grad_norm": 0.1944608837366104,
"learning_rate": 0.00017807486631016042,
"loss": 0.4331,
"step": 333
},
{
"epoch": 0.3340939639273546,
"grad_norm": 0.17461110651493073,
"learning_rate": 0.00017800802139037434,
"loss": 0.4561,
"step": 334
},
{
"epoch": 0.3350942452564784,
"grad_norm": 0.19736169278621674,
"learning_rate": 0.00017794117647058823,
"loss": 0.4541,
"step": 335
},
{
"epoch": 0.3360945265856022,
"grad_norm": 0.17889437079429626,
"learning_rate": 0.00017787433155080215,
"loss": 0.4124,
"step": 336
},
{
"epoch": 0.337094807914726,
"grad_norm": 0.17399004101753235,
"learning_rate": 0.00017780748663101604,
"loss": 0.4532,
"step": 337
},
{
"epoch": 0.33809508924384984,
"grad_norm": 0.18272772431373596,
"learning_rate": 0.00017774064171122996,
"loss": 0.4232,
"step": 338
},
{
"epoch": 0.3390953705729737,
"grad_norm": 0.19176781177520752,
"learning_rate": 0.00017767379679144388,
"loss": 0.4596,
"step": 339
},
{
"epoch": 0.34009565190209745,
"grad_norm": 0.18667066097259521,
"learning_rate": 0.00017760695187165777,
"loss": 0.4456,
"step": 340
},
{
"epoch": 0.3410959332312213,
"grad_norm": 0.17947258055210114,
"learning_rate": 0.00017754010695187166,
"loss": 0.4425,
"step": 341
},
{
"epoch": 0.3420962145603451,
"grad_norm": 0.176508367061615,
"learning_rate": 0.00017747326203208555,
"loss": 0.4376,
"step": 342
},
{
"epoch": 0.3430964958894689,
"grad_norm": 0.1660817414522171,
"learning_rate": 0.0001774064171122995,
"loss": 0.4451,
"step": 343
},
{
"epoch": 0.3440967772185927,
"grad_norm": 0.17813463509082794,
"learning_rate": 0.00017733957219251339,
"loss": 0.4719,
"step": 344
},
{
"epoch": 0.34509705854771655,
"grad_norm": 0.1736927628517151,
"learning_rate": 0.00017727272727272728,
"loss": 0.4535,
"step": 345
},
{
"epoch": 0.3460973398768404,
"grad_norm": 0.17061454057693481,
"learning_rate": 0.00017720588235294117,
"loss": 0.425,
"step": 346
},
{
"epoch": 0.34709762120596416,
"grad_norm": 0.18123167753219604,
"learning_rate": 0.00017713903743315509,
"loss": 0.4473,
"step": 347
},
{
"epoch": 0.348097902535088,
"grad_norm": 0.1735294610261917,
"learning_rate": 0.000177072192513369,
"loss": 0.4456,
"step": 348
},
{
"epoch": 0.3490981838642118,
"grad_norm": 0.16325166821479797,
"learning_rate": 0.0001770053475935829,
"loss": 0.4368,
"step": 349
},
{
"epoch": 0.35009846519333565,
"grad_norm": 0.16635017096996307,
"learning_rate": 0.00017693850267379679,
"loss": 0.4358,
"step": 350
},
{
"epoch": 0.3510987465224594,
"grad_norm": 0.16072800755500793,
"learning_rate": 0.0001768716577540107,
"loss": 0.4159,
"step": 351
},
{
"epoch": 0.35209902785158326,
"grad_norm": 0.17705607414245605,
"learning_rate": 0.00017680481283422462,
"loss": 0.4338,
"step": 352
},
{
"epoch": 0.3530993091807071,
"grad_norm": 0.19407153129577637,
"learning_rate": 0.0001767379679144385,
"loss": 0.4487,
"step": 353
},
{
"epoch": 0.35409959050983086,
"grad_norm": 0.16685500741004944,
"learning_rate": 0.0001766711229946524,
"loss": 0.4335,
"step": 354
},
{
"epoch": 0.3550998718389547,
"grad_norm": 0.1735745668411255,
"learning_rate": 0.00017660427807486632,
"loss": 0.4456,
"step": 355
},
{
"epoch": 0.3561001531680785,
"grad_norm": 0.16689899563789368,
"learning_rate": 0.0001765374331550802,
"loss": 0.4432,
"step": 356
},
{
"epoch": 0.35710043449720236,
"grad_norm": 0.16612571477890015,
"learning_rate": 0.00017647058823529413,
"loss": 0.4451,
"step": 357
},
{
"epoch": 0.35810071582632613,
"grad_norm": 0.17355671525001526,
"learning_rate": 0.00017640374331550802,
"loss": 0.428,
"step": 358
},
{
"epoch": 0.35910099715544996,
"grad_norm": 0.16404558718204498,
"learning_rate": 0.00017633689839572194,
"loss": 0.4178,
"step": 359
},
{
"epoch": 0.3601012784845738,
"grad_norm": 0.16635802388191223,
"learning_rate": 0.00017627005347593583,
"loss": 0.4171,
"step": 360
},
{
"epoch": 0.3611015598136976,
"grad_norm": 0.17166917026042938,
"learning_rate": 0.00017620320855614975,
"loss": 0.4354,
"step": 361
},
{
"epoch": 0.3621018411428214,
"grad_norm": 0.1794259250164032,
"learning_rate": 0.00017613636363636366,
"loss": 0.4425,
"step": 362
},
{
"epoch": 0.36310212247194523,
"grad_norm": 0.1819561868906021,
"learning_rate": 0.00017606951871657756,
"loss": 0.453,
"step": 363
},
{
"epoch": 0.36410240380106906,
"grad_norm": 0.178354874253273,
"learning_rate": 0.00017600267379679145,
"loss": 0.4436,
"step": 364
},
{
"epoch": 0.3651026851301929,
"grad_norm": 0.17876660823822021,
"learning_rate": 0.00017593582887700534,
"loss": 0.4525,
"step": 365
},
{
"epoch": 0.36610296645931667,
"grad_norm": 0.18647396564483643,
"learning_rate": 0.00017586898395721928,
"loss": 0.458,
"step": 366
},
{
"epoch": 0.3671032477884405,
"grad_norm": 0.16759748756885529,
"learning_rate": 0.00017580213903743317,
"loss": 0.422,
"step": 367
},
{
"epoch": 0.36810352911756433,
"grad_norm": 0.18720442056655884,
"learning_rate": 0.00017573529411764706,
"loss": 0.4482,
"step": 368
},
{
"epoch": 0.3691038104466881,
"grad_norm": 0.17658625543117523,
"learning_rate": 0.00017566844919786095,
"loss": 0.4303,
"step": 369
},
{
"epoch": 0.37010409177581194,
"grad_norm": 0.17517362534999847,
"learning_rate": 0.00017560160427807487,
"loss": 0.4577,
"step": 370
},
{
"epoch": 0.37110437310493577,
"grad_norm": 0.17305578291416168,
"learning_rate": 0.0001755347593582888,
"loss": 0.4162,
"step": 371
},
{
"epoch": 0.3721046544340596,
"grad_norm": 0.18148252367973328,
"learning_rate": 0.00017546791443850268,
"loss": 0.4323,
"step": 372
},
{
"epoch": 0.3731049357631834,
"grad_norm": 0.18457911908626556,
"learning_rate": 0.00017540106951871657,
"loss": 0.4302,
"step": 373
},
{
"epoch": 0.3741052170923072,
"grad_norm": 0.18693894147872925,
"learning_rate": 0.0001753342245989305,
"loss": 0.4458,
"step": 374
},
{
"epoch": 0.37510549842143104,
"grad_norm": 0.17343318462371826,
"learning_rate": 0.0001752673796791444,
"loss": 0.432,
"step": 375
},
{
"epoch": 0.37610577975055487,
"grad_norm": 0.170964777469635,
"learning_rate": 0.0001752005347593583,
"loss": 0.4302,
"step": 376
},
{
"epoch": 0.37710606107967864,
"grad_norm": 0.1706034541130066,
"learning_rate": 0.0001751336898395722,
"loss": 0.4528,
"step": 377
},
{
"epoch": 0.3781063424088025,
"grad_norm": 0.19066473841667175,
"learning_rate": 0.0001750668449197861,
"loss": 0.4529,
"step": 378
},
{
"epoch": 0.3791066237379263,
"grad_norm": 0.18090970814228058,
"learning_rate": 0.000175,
"loss": 0.4485,
"step": 379
},
{
"epoch": 0.3801069050670501,
"grad_norm": 0.18235009908676147,
"learning_rate": 0.00017493315508021392,
"loss": 0.4482,
"step": 380
},
{
"epoch": 0.3811071863961739,
"grad_norm": 0.17675139009952545,
"learning_rate": 0.00017486631016042783,
"loss": 0.4108,
"step": 381
},
{
"epoch": 0.38210746772529774,
"grad_norm": 0.17461447417736053,
"learning_rate": 0.00017479946524064172,
"loss": 0.4445,
"step": 382
},
{
"epoch": 0.3831077490544216,
"grad_norm": 0.1662529855966568,
"learning_rate": 0.00017473262032085562,
"loss": 0.406,
"step": 383
},
{
"epoch": 0.38410803038354535,
"grad_norm": 0.1768869012594223,
"learning_rate": 0.00017466577540106953,
"loss": 0.4205,
"step": 384
},
{
"epoch": 0.3851083117126692,
"grad_norm": 0.18918122351169586,
"learning_rate": 0.00017459893048128345,
"loss": 0.4807,
"step": 385
},
{
"epoch": 0.386108593041793,
"grad_norm": 0.18057717382907867,
"learning_rate": 0.00017453208556149734,
"loss": 0.4359,
"step": 386
},
{
"epoch": 0.38710887437091684,
"grad_norm": 0.17287477850914001,
"learning_rate": 0.00017446524064171123,
"loss": 0.4233,
"step": 387
},
{
"epoch": 0.3881091557000406,
"grad_norm": 0.1814192831516266,
"learning_rate": 0.00017439839572192512,
"loss": 0.4592,
"step": 388
},
{
"epoch": 0.38910943702916445,
"grad_norm": 0.17137302458286285,
"learning_rate": 0.00017433155080213904,
"loss": 0.4102,
"step": 389
},
{
"epoch": 0.3901097183582883,
"grad_norm": 0.17359258234500885,
"learning_rate": 0.00017426470588235296,
"loss": 0.4134,
"step": 390
},
{
"epoch": 0.39110999968741206,
"grad_norm": 0.17649279534816742,
"learning_rate": 0.00017419786096256685,
"loss": 0.4224,
"step": 391
},
{
"epoch": 0.3921102810165359,
"grad_norm": 0.1819760948419571,
"learning_rate": 0.00017413101604278074,
"loss": 0.4128,
"step": 392
},
{
"epoch": 0.3931105623456597,
"grad_norm": 0.17651373147964478,
"learning_rate": 0.00017406417112299466,
"loss": 0.4424,
"step": 393
},
{
"epoch": 0.39411084367478355,
"grad_norm": 0.18870307505130768,
"learning_rate": 0.00017399732620320858,
"loss": 0.4475,
"step": 394
},
{
"epoch": 0.3951111250039073,
"grad_norm": 0.17702838778495789,
"learning_rate": 0.00017393048128342247,
"loss": 0.4412,
"step": 395
},
{
"epoch": 0.39611140633303116,
"grad_norm": 0.17925161123275757,
"learning_rate": 0.00017386363636363636,
"loss": 0.4643,
"step": 396
},
{
"epoch": 0.397111687662155,
"grad_norm": 0.1825191229581833,
"learning_rate": 0.00017379679144385028,
"loss": 0.4309,
"step": 397
},
{
"epoch": 0.3981119689912788,
"grad_norm": 0.18673871457576752,
"learning_rate": 0.00017372994652406417,
"loss": 0.4395,
"step": 398
},
{
"epoch": 0.3991122503204026,
"grad_norm": 0.1835436075925827,
"learning_rate": 0.00017366310160427809,
"loss": 0.414,
"step": 399
},
{
"epoch": 0.4001125316495264,
"grad_norm": 0.17538726329803467,
"learning_rate": 0.000173596256684492,
"loss": 0.4544,
"step": 400
},
{
"epoch": 0.40111281297865026,
"grad_norm": 0.19464363157749176,
"learning_rate": 0.0001735294117647059,
"loss": 0.4288,
"step": 401
},
{
"epoch": 0.4021130943077741,
"grad_norm": 0.1836351454257965,
"learning_rate": 0.00017346256684491979,
"loss": 0.4205,
"step": 402
},
{
"epoch": 0.40311337563689786,
"grad_norm": 0.16461104154586792,
"learning_rate": 0.0001733957219251337,
"loss": 0.4642,
"step": 403
},
{
"epoch": 0.4041136569660217,
"grad_norm": 0.1738802194595337,
"learning_rate": 0.00017332887700534762,
"loss": 0.4529,
"step": 404
},
{
"epoch": 0.4051139382951455,
"grad_norm": 0.17489774525165558,
"learning_rate": 0.0001732620320855615,
"loss": 0.4581,
"step": 405
},
{
"epoch": 0.4061142196242693,
"grad_norm": 0.17276865243911743,
"learning_rate": 0.0001731951871657754,
"loss": 0.4546,
"step": 406
},
{
"epoch": 0.40711450095339313,
"grad_norm": 0.17556218802928925,
"learning_rate": 0.0001731283422459893,
"loss": 0.4435,
"step": 407
},
{
"epoch": 0.40811478228251696,
"grad_norm": 0.1763908714056015,
"learning_rate": 0.00017306149732620324,
"loss": 0.4279,
"step": 408
},
{
"epoch": 0.4091150636116408,
"grad_norm": 0.18071916699409485,
"learning_rate": 0.00017299465240641713,
"loss": 0.4058,
"step": 409
},
{
"epoch": 0.41011534494076457,
"grad_norm": 0.17551599442958832,
"learning_rate": 0.00017292780748663102,
"loss": 0.4368,
"step": 410
},
{
"epoch": 0.4111156262698884,
"grad_norm": 0.18373918533325195,
"learning_rate": 0.0001728609625668449,
"loss": 0.4376,
"step": 411
},
{
"epoch": 0.41211590759901223,
"grad_norm": 0.19964686036109924,
"learning_rate": 0.00017279411764705883,
"loss": 0.4556,
"step": 412
},
{
"epoch": 0.41311618892813606,
"grad_norm": 0.20226283371448517,
"learning_rate": 0.00017272727272727275,
"loss": 0.4216,
"step": 413
},
{
"epoch": 0.41411647025725984,
"grad_norm": 0.17443345487117767,
"learning_rate": 0.00017266042780748664,
"loss": 0.4299,
"step": 414
},
{
"epoch": 0.41511675158638367,
"grad_norm": 0.16941744089126587,
"learning_rate": 0.00017259358288770053,
"loss": 0.4352,
"step": 415
},
{
"epoch": 0.4161170329155075,
"grad_norm": 0.2127050906419754,
"learning_rate": 0.00017252673796791445,
"loss": 0.4499,
"step": 416
},
{
"epoch": 0.4171173142446313,
"grad_norm": 0.19113492965698242,
"learning_rate": 0.00017245989304812836,
"loss": 0.4335,
"step": 417
},
{
"epoch": 0.4181175955737551,
"grad_norm": 0.17924468219280243,
"learning_rate": 0.00017239304812834225,
"loss": 0.4076,
"step": 418
},
{
"epoch": 0.41911787690287894,
"grad_norm": 0.1749243140220642,
"learning_rate": 0.00017232620320855615,
"loss": 0.466,
"step": 419
},
{
"epoch": 0.42011815823200277,
"grad_norm": 0.18644505739212036,
"learning_rate": 0.00017225935828877006,
"loss": 0.4352,
"step": 420
},
{
"epoch": 0.42111843956112655,
"grad_norm": 0.17397917807102203,
"learning_rate": 0.00017219251336898395,
"loss": 0.4388,
"step": 421
},
{
"epoch": 0.4221187208902504,
"grad_norm": 0.17085954546928406,
"learning_rate": 0.00017212566844919787,
"loss": 0.4515,
"step": 422
},
{
"epoch": 0.4231190022193742,
"grad_norm": 0.16099511086940765,
"learning_rate": 0.0001720588235294118,
"loss": 0.4076,
"step": 423
},
{
"epoch": 0.42411928354849804,
"grad_norm": 0.17795774340629578,
"learning_rate": 0.00017199197860962568,
"loss": 0.4389,
"step": 424
},
{
"epoch": 0.4251195648776218,
"grad_norm": 0.18014943599700928,
"learning_rate": 0.00017192513368983957,
"loss": 0.4256,
"step": 425
},
{
"epoch": 0.42611984620674564,
"grad_norm": 0.18704374134540558,
"learning_rate": 0.0001718582887700535,
"loss": 0.4492,
"step": 426
},
{
"epoch": 0.4271201275358695,
"grad_norm": 0.16948561370372772,
"learning_rate": 0.0001717914438502674,
"loss": 0.4482,
"step": 427
},
{
"epoch": 0.4281204088649933,
"grad_norm": 0.17665095627307892,
"learning_rate": 0.0001717245989304813,
"loss": 0.4234,
"step": 428
},
{
"epoch": 0.4291206901941171,
"grad_norm": 0.19772249460220337,
"learning_rate": 0.0001716577540106952,
"loss": 0.4612,
"step": 429
},
{
"epoch": 0.4301209715232409,
"grad_norm": 0.17998537421226501,
"learning_rate": 0.00017159090909090908,
"loss": 0.4551,
"step": 430
},
{
"epoch": 0.43112125285236474,
"grad_norm": 0.17477943003177643,
"learning_rate": 0.000171524064171123,
"loss": 0.4464,
"step": 431
},
{
"epoch": 0.4321215341814885,
"grad_norm": 0.16711243987083435,
"learning_rate": 0.00017145721925133692,
"loss": 0.4215,
"step": 432
},
{
"epoch": 0.43312181551061235,
"grad_norm": 0.17099756002426147,
"learning_rate": 0.0001713903743315508,
"loss": 0.4178,
"step": 433
},
{
"epoch": 0.4341220968397362,
"grad_norm": 0.17479564249515533,
"learning_rate": 0.0001713235294117647,
"loss": 0.412,
"step": 434
},
{
"epoch": 0.43512237816886,
"grad_norm": 0.1868622601032257,
"learning_rate": 0.00017125668449197862,
"loss": 0.4246,
"step": 435
},
{
"epoch": 0.4361226594979838,
"grad_norm": 0.18481655418872833,
"learning_rate": 0.00017118983957219253,
"loss": 0.4274,
"step": 436
},
{
"epoch": 0.4371229408271076,
"grad_norm": 0.17644591629505157,
"learning_rate": 0.00017112299465240642,
"loss": 0.4475,
"step": 437
},
{
"epoch": 0.43812322215623145,
"grad_norm": 0.18062442541122437,
"learning_rate": 0.00017105614973262032,
"loss": 0.4413,
"step": 438
},
{
"epoch": 0.4391235034853553,
"grad_norm": 0.18078821897506714,
"learning_rate": 0.00017098930481283423,
"loss": 0.4315,
"step": 439
},
{
"epoch": 0.44012378481447906,
"grad_norm": 0.17297616600990295,
"learning_rate": 0.00017092245989304812,
"loss": 0.41,
"step": 440
},
{
"epoch": 0.4411240661436029,
"grad_norm": 0.18727439641952515,
"learning_rate": 0.00017085561497326204,
"loss": 0.4345,
"step": 441
},
{
"epoch": 0.4421243474727267,
"grad_norm": 0.18139448761940002,
"learning_rate": 0.00017078877005347596,
"loss": 0.4126,
"step": 442
},
{
"epoch": 0.4431246288018505,
"grad_norm": 0.17658720910549164,
"learning_rate": 0.00017072192513368985,
"loss": 0.4343,
"step": 443
},
{
"epoch": 0.4441249101309743,
"grad_norm": 0.17612750828266144,
"learning_rate": 0.00017065508021390374,
"loss": 0.4179,
"step": 444
},
{
"epoch": 0.44512519146009816,
"grad_norm": 0.19309662282466888,
"learning_rate": 0.00017058823529411766,
"loss": 0.4469,
"step": 445
},
{
"epoch": 0.446125472789222,
"grad_norm": 0.17980198562145233,
"learning_rate": 0.00017052139037433158,
"loss": 0.4114,
"step": 446
},
{
"epoch": 0.44712575411834576,
"grad_norm": 0.17348790168762207,
"learning_rate": 0.00017045454545454547,
"loss": 0.4234,
"step": 447
},
{
"epoch": 0.4481260354474696,
"grad_norm": 0.1644233614206314,
"learning_rate": 0.00017038770053475936,
"loss": 0.4137,
"step": 448
},
{
"epoch": 0.4491263167765934,
"grad_norm": 0.18324849009513855,
"learning_rate": 0.00017032085561497325,
"loss": 0.4148,
"step": 449
},
{
"epoch": 0.45012659810571726,
"grad_norm": 0.18216699361801147,
"learning_rate": 0.0001702540106951872,
"loss": 0.4248,
"step": 450
},
{
"epoch": 0.45112687943484103,
"grad_norm": 0.1735803186893463,
"learning_rate": 0.00017018716577540109,
"loss": 0.4042,
"step": 451
},
{
"epoch": 0.45212716076396486,
"grad_norm": 0.1867716908454895,
"learning_rate": 0.00017012032085561498,
"loss": 0.4005,
"step": 452
},
{
"epoch": 0.4531274420930887,
"grad_norm": 0.1966044008731842,
"learning_rate": 0.00017005347593582887,
"loss": 0.4372,
"step": 453
},
{
"epoch": 0.45412772342221247,
"grad_norm": 0.20946615934371948,
"learning_rate": 0.00016998663101604278,
"loss": 0.4619,
"step": 454
},
{
"epoch": 0.4551280047513363,
"grad_norm": 0.16564136743545532,
"learning_rate": 0.0001699197860962567,
"loss": 0.4189,
"step": 455
},
{
"epoch": 0.45612828608046013,
"grad_norm": 0.17898212373256683,
"learning_rate": 0.0001698529411764706,
"loss": 0.4326,
"step": 456
},
{
"epoch": 0.45712856740958396,
"grad_norm": 0.16770191490650177,
"learning_rate": 0.00016978609625668448,
"loss": 0.4216,
"step": 457
},
{
"epoch": 0.45812884873870774,
"grad_norm": 0.1793423444032669,
"learning_rate": 0.0001697192513368984,
"loss": 0.4035,
"step": 458
},
{
"epoch": 0.45912913006783157,
"grad_norm": 0.17702436447143555,
"learning_rate": 0.00016965240641711232,
"loss": 0.4351,
"step": 459
},
{
"epoch": 0.4601294113969554,
"grad_norm": 0.16669146716594696,
"learning_rate": 0.0001695855614973262,
"loss": 0.4261,
"step": 460
},
{
"epoch": 0.46112969272607923,
"grad_norm": 0.1851879209280014,
"learning_rate": 0.0001695187165775401,
"loss": 0.432,
"step": 461
},
{
"epoch": 0.462129974055203,
"grad_norm": 0.18338064849376678,
"learning_rate": 0.00016945187165775402,
"loss": 0.4399,
"step": 462
},
{
"epoch": 0.46313025538432684,
"grad_norm": 0.1746056079864502,
"learning_rate": 0.0001693850267379679,
"loss": 0.4243,
"step": 463
},
{
"epoch": 0.46413053671345067,
"grad_norm": 0.18128280341625214,
"learning_rate": 0.00016931818181818183,
"loss": 0.458,
"step": 464
},
{
"epoch": 0.4651308180425745,
"grad_norm": 0.17172598838806152,
"learning_rate": 0.00016925133689839575,
"loss": 0.4352,
"step": 465
},
{
"epoch": 0.4661310993716983,
"grad_norm": 0.16762162744998932,
"learning_rate": 0.00016918449197860964,
"loss": 0.4287,
"step": 466
},
{
"epoch": 0.4671313807008221,
"grad_norm": 0.17377620935440063,
"learning_rate": 0.00016911764705882353,
"loss": 0.3961,
"step": 467
},
{
"epoch": 0.46813166202994594,
"grad_norm": 0.16892342269420624,
"learning_rate": 0.00016905080213903745,
"loss": 0.42,
"step": 468
},
{
"epoch": 0.4691319433590697,
"grad_norm": 0.1723669320344925,
"learning_rate": 0.00016898395721925136,
"loss": 0.4261,
"step": 469
},
{
"epoch": 0.47013222468819355,
"grad_norm": 0.17638066411018372,
"learning_rate": 0.00016891711229946525,
"loss": 0.423,
"step": 470
},
{
"epoch": 0.4711325060173174,
"grad_norm": 0.17456413805484772,
"learning_rate": 0.00016885026737967915,
"loss": 0.4443,
"step": 471
},
{
"epoch": 0.4721327873464412,
"grad_norm": 0.1629389524459839,
"learning_rate": 0.00016878342245989304,
"loss": 0.4,
"step": 472
},
{
"epoch": 0.473133068675565,
"grad_norm": 0.17360949516296387,
"learning_rate": 0.00016871657754010698,
"loss": 0.4235,
"step": 473
},
{
"epoch": 0.4741333500046888,
"grad_norm": 0.17233876883983612,
"learning_rate": 0.00016864973262032087,
"loss": 0.429,
"step": 474
},
{
"epoch": 0.47513363133381264,
"grad_norm": 0.17649391293525696,
"learning_rate": 0.00016858288770053476,
"loss": 0.4278,
"step": 475
},
{
"epoch": 0.4761339126629365,
"grad_norm": 0.17380329966545105,
"learning_rate": 0.00016851604278074865,
"loss": 0.4399,
"step": 476
},
{
"epoch": 0.47713419399206025,
"grad_norm": 0.1689458042383194,
"learning_rate": 0.00016844919786096257,
"loss": 0.4163,
"step": 477
},
{
"epoch": 0.4781344753211841,
"grad_norm": 0.16643808782100677,
"learning_rate": 0.0001683823529411765,
"loss": 0.4297,
"step": 478
},
{
"epoch": 0.4791347566503079,
"grad_norm": 0.16798333823680878,
"learning_rate": 0.00016831550802139038,
"loss": 0.4328,
"step": 479
},
{
"epoch": 0.4801350379794317,
"grad_norm": 0.16696397960186005,
"learning_rate": 0.00016824866310160427,
"loss": 0.4374,
"step": 480
},
{
"epoch": 0.4811353193085555,
"grad_norm": 0.16460229456424713,
"learning_rate": 0.0001681818181818182,
"loss": 0.4474,
"step": 481
},
{
"epoch": 0.48213560063767935,
"grad_norm": 0.17829792201519012,
"learning_rate": 0.0001681149732620321,
"loss": 0.457,
"step": 482
},
{
"epoch": 0.4831358819668032,
"grad_norm": 0.16649176180362701,
"learning_rate": 0.000168048128342246,
"loss": 0.4198,
"step": 483
},
{
"epoch": 0.48413616329592696,
"grad_norm": 0.17545387148857117,
"learning_rate": 0.00016798128342245992,
"loss": 0.4472,
"step": 484
},
{
"epoch": 0.4851364446250508,
"grad_norm": 0.17688940465450287,
"learning_rate": 0.0001679144385026738,
"loss": 0.4283,
"step": 485
},
{
"epoch": 0.4861367259541746,
"grad_norm": 0.17023973166942596,
"learning_rate": 0.0001678475935828877,
"loss": 0.4163,
"step": 486
},
{
"epoch": 0.48713700728329845,
"grad_norm": 0.17296594381332397,
"learning_rate": 0.00016778074866310162,
"loss": 0.4154,
"step": 487
},
{
"epoch": 0.4881372886124222,
"grad_norm": 0.17880389094352722,
"learning_rate": 0.00016771390374331553,
"loss": 0.4237,
"step": 488
},
{
"epoch": 0.48913756994154606,
"grad_norm": 0.17438553273677826,
"learning_rate": 0.00016764705882352942,
"loss": 0.4164,
"step": 489
},
{
"epoch": 0.4901378512706699,
"grad_norm": 0.1780180037021637,
"learning_rate": 0.00016758021390374331,
"loss": 0.418,
"step": 490
},
{
"epoch": 0.4911381325997937,
"grad_norm": 0.18408729135990143,
"learning_rate": 0.00016751336898395723,
"loss": 0.4103,
"step": 491
},
{
"epoch": 0.4921384139289175,
"grad_norm": 0.17175129055976868,
"learning_rate": 0.00016744652406417115,
"loss": 0.4385,
"step": 492
},
{
"epoch": 0.4931386952580413,
"grad_norm": 0.1726345419883728,
"learning_rate": 0.00016737967914438504,
"loss": 0.3934,
"step": 493
},
{
"epoch": 0.49413897658716516,
"grad_norm": 0.1681516170501709,
"learning_rate": 0.00016731283422459893,
"loss": 0.4063,
"step": 494
},
{
"epoch": 0.49513925791628893,
"grad_norm": 0.1690843254327774,
"learning_rate": 0.00016724598930481282,
"loss": 0.4309,
"step": 495
},
{
"epoch": 0.49613953924541276,
"grad_norm": 0.1773832142353058,
"learning_rate": 0.00016717914438502674,
"loss": 0.4512,
"step": 496
},
{
"epoch": 0.4971398205745366,
"grad_norm": 0.16625097393989563,
"learning_rate": 0.00016711229946524066,
"loss": 0.4025,
"step": 497
},
{
"epoch": 0.4981401019036604,
"grad_norm": 0.17904406785964966,
"learning_rate": 0.00016704545454545455,
"loss": 0.4398,
"step": 498
},
{
"epoch": 0.4991403832327842,
"grad_norm": 0.16707228124141693,
"learning_rate": 0.00016697860962566844,
"loss": 0.4309,
"step": 499
},
{
"epoch": 0.5001406645619081,
"grad_norm": 0.16499480605125427,
"learning_rate": 0.00016691176470588236,
"loss": 0.4105,
"step": 500
},
{
"epoch": 0.5011409458910319,
"grad_norm": 0.1724577099084854,
"learning_rate": 0.00016684491978609628,
"loss": 0.4278,
"step": 501
},
{
"epoch": 0.5021412272201556,
"grad_norm": 0.18185541033744812,
"learning_rate": 0.00016677807486631017,
"loss": 0.4176,
"step": 502
},
{
"epoch": 0.5031415085492795,
"grad_norm": 0.1753641813993454,
"learning_rate": 0.00016671122994652406,
"loss": 0.4153,
"step": 503
},
{
"epoch": 0.5041417898784033,
"grad_norm": 0.17050912976264954,
"learning_rate": 0.00016664438502673798,
"loss": 0.4111,
"step": 504
},
{
"epoch": 0.5051420712075271,
"grad_norm": 0.17663219571113586,
"learning_rate": 0.00016657754010695187,
"loss": 0.4226,
"step": 505
},
{
"epoch": 0.506142352536651,
"grad_norm": 0.1756785362958908,
"learning_rate": 0.00016651069518716578,
"loss": 0.4269,
"step": 506
},
{
"epoch": 0.5071426338657747,
"grad_norm": 0.1745450794696808,
"learning_rate": 0.0001664438502673797,
"loss": 0.3996,
"step": 507
},
{
"epoch": 0.5081429151948985,
"grad_norm": 0.17337395250797272,
"learning_rate": 0.0001663770053475936,
"loss": 0.401,
"step": 508
},
{
"epoch": 0.5091431965240224,
"grad_norm": 0.18209293484687805,
"learning_rate": 0.00016631016042780748,
"loss": 0.4115,
"step": 509
},
{
"epoch": 0.5101434778531462,
"grad_norm": 0.18765206634998322,
"learning_rate": 0.0001662433155080214,
"loss": 0.4352,
"step": 510
},
{
"epoch": 0.5111437591822701,
"grad_norm": 0.1839272677898407,
"learning_rate": 0.00016617647058823532,
"loss": 0.3999,
"step": 511
},
{
"epoch": 0.5121440405113938,
"grad_norm": 0.18588875234127045,
"learning_rate": 0.0001661096256684492,
"loss": 0.4352,
"step": 512
},
{
"epoch": 0.5131443218405176,
"grad_norm": 0.175035297870636,
"learning_rate": 0.0001660427807486631,
"loss": 0.4256,
"step": 513
},
{
"epoch": 0.5141446031696415,
"grad_norm": 0.19355317950248718,
"learning_rate": 0.000165975935828877,
"loss": 0.4157,
"step": 514
},
{
"epoch": 0.5151448844987653,
"grad_norm": 0.18002134561538696,
"learning_rate": 0.00016590909090909094,
"loss": 0.426,
"step": 515
},
{
"epoch": 0.516145165827889,
"grad_norm": 0.16922691464424133,
"learning_rate": 0.00016584224598930483,
"loss": 0.421,
"step": 516
},
{
"epoch": 0.5171454471570129,
"grad_norm": 0.16930259764194489,
"learning_rate": 0.00016577540106951872,
"loss": 0.4082,
"step": 517
},
{
"epoch": 0.5181457284861367,
"grad_norm": 0.19758272171020508,
"learning_rate": 0.0001657085561497326,
"loss": 0.3815,
"step": 518
},
{
"epoch": 0.5191460098152605,
"grad_norm": 0.18100370466709137,
"learning_rate": 0.00016564171122994653,
"loss": 0.3952,
"step": 519
},
{
"epoch": 0.5201462911443844,
"grad_norm": 0.17010869085788727,
"learning_rate": 0.00016557486631016045,
"loss": 0.4332,
"step": 520
},
{
"epoch": 0.5211465724735082,
"grad_norm": 0.17811369895935059,
"learning_rate": 0.00016550802139037434,
"loss": 0.4138,
"step": 521
},
{
"epoch": 0.522146853802632,
"grad_norm": 0.16998940706253052,
"learning_rate": 0.00016544117647058823,
"loss": 0.4176,
"step": 522
},
{
"epoch": 0.5231471351317558,
"grad_norm": 0.16401931643486023,
"learning_rate": 0.00016537433155080215,
"loss": 0.4115,
"step": 523
},
{
"epoch": 0.5241474164608796,
"grad_norm": 0.16618947684764862,
"learning_rate": 0.00016530748663101606,
"loss": 0.4224,
"step": 524
},
{
"epoch": 0.5251476977900035,
"grad_norm": 0.16931407153606415,
"learning_rate": 0.00016524064171122995,
"loss": 0.4226,
"step": 525
},
{
"epoch": 0.5261479791191273,
"grad_norm": 0.16492187976837158,
"learning_rate": 0.00016517379679144387,
"loss": 0.4126,
"step": 526
},
{
"epoch": 0.527148260448251,
"grad_norm": 0.16903190314769745,
"learning_rate": 0.00016510695187165776,
"loss": 0.4212,
"step": 527
},
{
"epoch": 0.5281485417773749,
"grad_norm": 0.1670171618461609,
"learning_rate": 0.00016504010695187165,
"loss": 0.4071,
"step": 528
},
{
"epoch": 0.5291488231064987,
"grad_norm": 0.1678818166255951,
"learning_rate": 0.00016497326203208557,
"loss": 0.4588,
"step": 529
},
{
"epoch": 0.5301491044356225,
"grad_norm": 0.16249947249889374,
"learning_rate": 0.0001649064171122995,
"loss": 0.3993,
"step": 530
},
{
"epoch": 0.5311493857647464,
"grad_norm": 0.17632173001766205,
"learning_rate": 0.00016483957219251338,
"loss": 0.418,
"step": 531
},
{
"epoch": 0.5321496670938701,
"grad_norm": 0.17249086499214172,
"learning_rate": 0.00016477272727272727,
"loss": 0.409,
"step": 532
},
{
"epoch": 0.533149948422994,
"grad_norm": 0.1681407243013382,
"learning_rate": 0.0001647058823529412,
"loss": 0.4019,
"step": 533
},
{
"epoch": 0.5341502297521178,
"grad_norm": 0.16458339989185333,
"learning_rate": 0.0001646390374331551,
"loss": 0.4104,
"step": 534
},
{
"epoch": 0.5351505110812416,
"grad_norm": 0.17575211822986603,
"learning_rate": 0.000164572192513369,
"loss": 0.4006,
"step": 535
},
{
"epoch": 0.5361507924103655,
"grad_norm": 0.17218567430973053,
"learning_rate": 0.0001645053475935829,
"loss": 0.422,
"step": 536
},
{
"epoch": 0.5371510737394892,
"grad_norm": 0.1740548014640808,
"learning_rate": 0.00016443850267379678,
"loss": 0.4138,
"step": 537
},
{
"epoch": 0.538151355068613,
"grad_norm": 0.18501082062721252,
"learning_rate": 0.0001643716577540107,
"loss": 0.4371,
"step": 538
},
{
"epoch": 0.5391516363977369,
"grad_norm": 0.17697785794734955,
"learning_rate": 0.00016430481283422462,
"loss": 0.4163,
"step": 539
},
{
"epoch": 0.5401519177268607,
"grad_norm": 0.1623258739709854,
"learning_rate": 0.0001642379679144385,
"loss": 0.3901,
"step": 540
},
{
"epoch": 0.5411521990559846,
"grad_norm": 0.1694764792919159,
"learning_rate": 0.0001641711229946524,
"loss": 0.4227,
"step": 541
},
{
"epoch": 0.5421524803851083,
"grad_norm": 0.16928663849830627,
"learning_rate": 0.00016410427807486631,
"loss": 0.3991,
"step": 542
},
{
"epoch": 0.5431527617142321,
"grad_norm": 0.17353345453739166,
"learning_rate": 0.00016403743315508023,
"loss": 0.396,
"step": 543
},
{
"epoch": 0.544153043043356,
"grad_norm": 0.19051139056682587,
"learning_rate": 0.00016397058823529412,
"loss": 0.4305,
"step": 544
},
{
"epoch": 0.5451533243724798,
"grad_norm": 0.18180570006370544,
"learning_rate": 0.00016390374331550801,
"loss": 0.4121,
"step": 545
},
{
"epoch": 0.5461536057016035,
"grad_norm": 0.1944774091243744,
"learning_rate": 0.00016383689839572193,
"loss": 0.4219,
"step": 546
},
{
"epoch": 0.5471538870307274,
"grad_norm": 0.1738865077495575,
"learning_rate": 0.00016377005347593582,
"loss": 0.4152,
"step": 547
},
{
"epoch": 0.5481541683598512,
"grad_norm": 0.17925100028514862,
"learning_rate": 0.00016370320855614974,
"loss": 0.4306,
"step": 548
},
{
"epoch": 0.549154449688975,
"grad_norm": 0.1889563798904419,
"learning_rate": 0.00016363636363636366,
"loss": 0.429,
"step": 549
},
{
"epoch": 0.5501547310180989,
"grad_norm": 0.19363461434841156,
"learning_rate": 0.00016356951871657755,
"loss": 0.431,
"step": 550
},
{
"epoch": 0.5511550123472226,
"grad_norm": 0.17269189655780792,
"learning_rate": 0.00016350267379679144,
"loss": 0.4177,
"step": 551
},
{
"epoch": 0.5521552936763465,
"grad_norm": 0.1760244518518448,
"learning_rate": 0.00016343582887700536,
"loss": 0.4369,
"step": 552
},
{
"epoch": 0.5531555750054703,
"grad_norm": 0.1785270869731903,
"learning_rate": 0.00016336898395721928,
"loss": 0.42,
"step": 553
},
{
"epoch": 0.5541558563345941,
"grad_norm": 0.18411394953727722,
"learning_rate": 0.00016330213903743317,
"loss": 0.4166,
"step": 554
},
{
"epoch": 0.555156137663718,
"grad_norm": 0.17528562247753143,
"learning_rate": 0.00016323529411764706,
"loss": 0.4415,
"step": 555
},
{
"epoch": 0.5561564189928417,
"grad_norm": 0.1735825389623642,
"learning_rate": 0.00016316844919786095,
"loss": 0.4038,
"step": 556
},
{
"epoch": 0.5571567003219655,
"grad_norm": 0.1755310446023941,
"learning_rate": 0.0001631016042780749,
"loss": 0.4119,
"step": 557
},
{
"epoch": 0.5581569816510894,
"grad_norm": 0.18940363824367523,
"learning_rate": 0.00016303475935828878,
"loss": 0.417,
"step": 558
},
{
"epoch": 0.5591572629802132,
"grad_norm": 0.18386200070381165,
"learning_rate": 0.00016296791443850268,
"loss": 0.4166,
"step": 559
},
{
"epoch": 0.560157544309337,
"grad_norm": 0.17416515946388245,
"learning_rate": 0.00016290106951871657,
"loss": 0.4006,
"step": 560
},
{
"epoch": 0.5611578256384608,
"grad_norm": 0.1847766488790512,
"learning_rate": 0.00016283422459893048,
"loss": 0.4399,
"step": 561
},
{
"epoch": 0.5621581069675846,
"grad_norm": 0.1844712197780609,
"learning_rate": 0.0001627673796791444,
"loss": 0.4153,
"step": 562
},
{
"epoch": 0.5631583882967085,
"grad_norm": 0.18717245757579803,
"learning_rate": 0.0001627005347593583,
"loss": 0.4587,
"step": 563
},
{
"epoch": 0.5641586696258323,
"grad_norm": 0.1745343953371048,
"learning_rate": 0.00016263368983957218,
"loss": 0.4271,
"step": 564
},
{
"epoch": 0.565158950954956,
"grad_norm": 0.17303511500358582,
"learning_rate": 0.0001625668449197861,
"loss": 0.4232,
"step": 565
},
{
"epoch": 0.5661592322840799,
"grad_norm": 0.18203690648078918,
"learning_rate": 0.00016250000000000002,
"loss": 0.4285,
"step": 566
},
{
"epoch": 0.5671595136132037,
"grad_norm": 0.179796501994133,
"learning_rate": 0.0001624331550802139,
"loss": 0.4043,
"step": 567
},
{
"epoch": 0.5681597949423275,
"grad_norm": 0.16504420340061188,
"learning_rate": 0.00016236631016042783,
"loss": 0.3814,
"step": 568
},
{
"epoch": 0.5691600762714514,
"grad_norm": 0.1814291626214981,
"learning_rate": 0.00016229946524064172,
"loss": 0.4174,
"step": 569
},
{
"epoch": 0.5701603576005752,
"grad_norm": 0.18535339832305908,
"learning_rate": 0.0001622326203208556,
"loss": 0.4347,
"step": 570
},
{
"epoch": 0.5711606389296989,
"grad_norm": 0.17747478187084198,
"learning_rate": 0.00016216577540106953,
"loss": 0.4079,
"step": 571
},
{
"epoch": 0.5721609202588228,
"grad_norm": 0.1741618514060974,
"learning_rate": 0.00016209893048128345,
"loss": 0.4227,
"step": 572
},
{
"epoch": 0.5731612015879466,
"grad_norm": 0.17553062736988068,
"learning_rate": 0.00016203208556149734,
"loss": 0.429,
"step": 573
},
{
"epoch": 0.5741614829170705,
"grad_norm": 0.19394700229167938,
"learning_rate": 0.00016196524064171123,
"loss": 0.4382,
"step": 574
},
{
"epoch": 0.5751617642461943,
"grad_norm": 0.17279013991355896,
"learning_rate": 0.00016189839572192515,
"loss": 0.4438,
"step": 575
},
{
"epoch": 0.576162045575318,
"grad_norm": 0.1781262457370758,
"learning_rate": 0.00016183155080213906,
"loss": 0.4095,
"step": 576
},
{
"epoch": 0.5771623269044419,
"grad_norm": 0.16312651336193085,
"learning_rate": 0.00016176470588235295,
"loss": 0.4036,
"step": 577
},
{
"epoch": 0.5781626082335657,
"grad_norm": 0.17451059818267822,
"learning_rate": 0.00016169786096256684,
"loss": 0.3971,
"step": 578
},
{
"epoch": 0.5791628895626895,
"grad_norm": 0.185821533203125,
"learning_rate": 0.00016163101604278074,
"loss": 0.4266,
"step": 579
},
{
"epoch": 0.5801631708918134,
"grad_norm": 0.1637738049030304,
"learning_rate": 0.00016156417112299468,
"loss": 0.4039,
"step": 580
},
{
"epoch": 0.5811634522209371,
"grad_norm": 0.17572863399982452,
"learning_rate": 0.00016149732620320857,
"loss": 0.4154,
"step": 581
},
{
"epoch": 0.5821637335500609,
"grad_norm": 0.17879387736320496,
"learning_rate": 0.00016143048128342246,
"loss": 0.4208,
"step": 582
},
{
"epoch": 0.5831640148791848,
"grad_norm": 0.17145629227161407,
"learning_rate": 0.00016136363636363635,
"loss": 0.4061,
"step": 583
},
{
"epoch": 0.5841642962083086,
"grad_norm": 0.17503105103969574,
"learning_rate": 0.00016129679144385027,
"loss": 0.4173,
"step": 584
},
{
"epoch": 0.5851645775374325,
"grad_norm": 0.17581413686275482,
"learning_rate": 0.0001612299465240642,
"loss": 0.4212,
"step": 585
},
{
"epoch": 0.5861648588665562,
"grad_norm": 0.17574447393417358,
"learning_rate": 0.00016116310160427808,
"loss": 0.4023,
"step": 586
},
{
"epoch": 0.58716514019568,
"grad_norm": 0.17156046628952026,
"learning_rate": 0.000161096256684492,
"loss": 0.4115,
"step": 587
},
{
"epoch": 0.5881654215248039,
"grad_norm": 0.1805231273174286,
"learning_rate": 0.0001610294117647059,
"loss": 0.4235,
"step": 588
},
{
"epoch": 0.5891657028539277,
"grad_norm": 0.17837534844875336,
"learning_rate": 0.0001609625668449198,
"loss": 0.4429,
"step": 589
},
{
"epoch": 0.5901659841830514,
"grad_norm": 0.16711211204528809,
"learning_rate": 0.0001608957219251337,
"loss": 0.4129,
"step": 590
},
{
"epoch": 0.5911662655121753,
"grad_norm": 0.182355597615242,
"learning_rate": 0.00016082887700534762,
"loss": 0.4417,
"step": 591
},
{
"epoch": 0.5921665468412991,
"grad_norm": 0.17298145592212677,
"learning_rate": 0.0001607620320855615,
"loss": 0.4206,
"step": 592
},
{
"epoch": 0.5931668281704229,
"grad_norm": 0.16272862255573273,
"learning_rate": 0.0001606951871657754,
"loss": 0.4032,
"step": 593
},
{
"epoch": 0.5941671094995468,
"grad_norm": 0.170026957988739,
"learning_rate": 0.00016062834224598931,
"loss": 0.399,
"step": 594
},
{
"epoch": 0.5951673908286705,
"grad_norm": 0.1715223640203476,
"learning_rate": 0.00016056149732620323,
"loss": 0.4024,
"step": 595
},
{
"epoch": 0.5961676721577944,
"grad_norm": 0.18015970289707184,
"learning_rate": 0.00016049465240641712,
"loss": 0.4189,
"step": 596
},
{
"epoch": 0.5971679534869182,
"grad_norm": 0.16714516282081604,
"learning_rate": 0.00016042780748663101,
"loss": 0.4135,
"step": 597
},
{
"epoch": 0.598168234816042,
"grad_norm": 0.17524467408657074,
"learning_rate": 0.00016036096256684493,
"loss": 0.3866,
"step": 598
},
{
"epoch": 0.5991685161451659,
"grad_norm": 0.1763477623462677,
"learning_rate": 0.00016029411764705885,
"loss": 0.3972,
"step": 599
},
{
"epoch": 0.6001687974742896,
"grad_norm": 0.1930224597454071,
"learning_rate": 0.00016022727272727274,
"loss": 0.4334,
"step": 600
},
{
"epoch": 0.6011690788034134,
"grad_norm": 0.1898859143257141,
"learning_rate": 0.00016016042780748663,
"loss": 0.4082,
"step": 601
},
{
"epoch": 0.6021693601325373,
"grad_norm": 0.18250437080860138,
"learning_rate": 0.00016009358288770052,
"loss": 0.4356,
"step": 602
},
{
"epoch": 0.6031696414616611,
"grad_norm": 0.16886284947395325,
"learning_rate": 0.00016002673796791444,
"loss": 0.4097,
"step": 603
},
{
"epoch": 0.604169922790785,
"grad_norm": 0.18321110308170319,
"learning_rate": 0.00015995989304812836,
"loss": 0.4082,
"step": 604
},
{
"epoch": 0.6051702041199087,
"grad_norm": 0.18324923515319824,
"learning_rate": 0.00015989304812834225,
"loss": 0.4378,
"step": 605
},
{
"epoch": 0.6061704854490325,
"grad_norm": 0.17818456888198853,
"learning_rate": 0.00015982620320855614,
"loss": 0.4212,
"step": 606
},
{
"epoch": 0.6071707667781564,
"grad_norm": 0.17111051082611084,
"learning_rate": 0.00015975935828877006,
"loss": 0.3938,
"step": 607
},
{
"epoch": 0.6081710481072802,
"grad_norm": 0.171490877866745,
"learning_rate": 0.00015969251336898398,
"loss": 0.4357,
"step": 608
},
{
"epoch": 0.609171329436404,
"grad_norm": 0.1783282458782196,
"learning_rate": 0.00015962566844919787,
"loss": 0.4241,
"step": 609
},
{
"epoch": 0.6101716107655278,
"grad_norm": 0.1665242314338684,
"learning_rate": 0.00015955882352941178,
"loss": 0.3968,
"step": 610
},
{
"epoch": 0.6111718920946516,
"grad_norm": 0.17004479467868805,
"learning_rate": 0.00015949197860962568,
"loss": 0.4215,
"step": 611
},
{
"epoch": 0.6121721734237754,
"grad_norm": 0.190244659781456,
"learning_rate": 0.00015942513368983957,
"loss": 0.4211,
"step": 612
},
{
"epoch": 0.6131724547528993,
"grad_norm": 0.17213775217533112,
"learning_rate": 0.00015935828877005348,
"loss": 0.4124,
"step": 613
},
{
"epoch": 0.614172736082023,
"grad_norm": 0.18273095786571503,
"learning_rate": 0.0001592914438502674,
"loss": 0.4075,
"step": 614
},
{
"epoch": 0.6151730174111469,
"grad_norm": 0.1769276261329651,
"learning_rate": 0.0001592245989304813,
"loss": 0.4242,
"step": 615
},
{
"epoch": 0.6161732987402707,
"grad_norm": 0.18576711416244507,
"learning_rate": 0.00015915775401069518,
"loss": 0.4247,
"step": 616
},
{
"epoch": 0.6171735800693945,
"grad_norm": 0.1755998581647873,
"learning_rate": 0.0001590909090909091,
"loss": 0.401,
"step": 617
},
{
"epoch": 0.6181738613985184,
"grad_norm": 0.17052628099918365,
"learning_rate": 0.00015902406417112302,
"loss": 0.4184,
"step": 618
},
{
"epoch": 0.6191741427276422,
"grad_norm": 0.16688649356365204,
"learning_rate": 0.0001589572192513369,
"loss": 0.4058,
"step": 619
},
{
"epoch": 0.6201744240567659,
"grad_norm": 0.15762940049171448,
"learning_rate": 0.0001588903743315508,
"loss": 0.4139,
"step": 620
},
{
"epoch": 0.6211747053858898,
"grad_norm": 0.1699974089860916,
"learning_rate": 0.0001588235294117647,
"loss": 0.4342,
"step": 621
},
{
"epoch": 0.6221749867150136,
"grad_norm": 0.1690402776002884,
"learning_rate": 0.00015875668449197864,
"loss": 0.4102,
"step": 622
},
{
"epoch": 0.6231752680441374,
"grad_norm": 0.17223213613033295,
"learning_rate": 0.00015868983957219253,
"loss": 0.417,
"step": 623
},
{
"epoch": 0.6241755493732613,
"grad_norm": 0.18006610870361328,
"learning_rate": 0.00015862299465240642,
"loss": 0.4183,
"step": 624
},
{
"epoch": 0.625175830702385,
"grad_norm": 0.16844414174556732,
"learning_rate": 0.0001585561497326203,
"loss": 0.4084,
"step": 625
},
{
"epoch": 0.6261761120315089,
"grad_norm": 0.17007195949554443,
"learning_rate": 0.00015848930481283423,
"loss": 0.3945,
"step": 626
},
{
"epoch": 0.6271763933606327,
"grad_norm": 0.17890439927577972,
"learning_rate": 0.00015842245989304815,
"loss": 0.3977,
"step": 627
},
{
"epoch": 0.6281766746897565,
"grad_norm": 0.17793171107769012,
"learning_rate": 0.00015835561497326204,
"loss": 0.397,
"step": 628
},
{
"epoch": 0.6291769560188804,
"grad_norm": 0.18395744264125824,
"learning_rate": 0.00015828877005347595,
"loss": 0.4228,
"step": 629
},
{
"epoch": 0.6301772373480041,
"grad_norm": 0.17771929502487183,
"learning_rate": 0.00015822192513368984,
"loss": 0.4093,
"step": 630
},
{
"epoch": 0.6311775186771279,
"grad_norm": 0.1839386522769928,
"learning_rate": 0.00015815508021390376,
"loss": 0.4028,
"step": 631
},
{
"epoch": 0.6321778000062518,
"grad_norm": 0.16454514861106873,
"learning_rate": 0.00015808823529411765,
"loss": 0.3936,
"step": 632
},
{
"epoch": 0.6331780813353756,
"grad_norm": 0.1736559122800827,
"learning_rate": 0.00015802139037433157,
"loss": 0.416,
"step": 633
},
{
"epoch": 0.6341783626644993,
"grad_norm": 0.1678173691034317,
"learning_rate": 0.00015795454545454546,
"loss": 0.4278,
"step": 634
},
{
"epoch": 0.6351786439936232,
"grad_norm": 0.17004351317882538,
"learning_rate": 0.00015788770053475935,
"loss": 0.4212,
"step": 635
},
{
"epoch": 0.636178925322747,
"grad_norm": 0.17825426161289215,
"learning_rate": 0.00015782085561497327,
"loss": 0.3898,
"step": 636
},
{
"epoch": 0.6371792066518709,
"grad_norm": 0.16482678055763245,
"learning_rate": 0.0001577540106951872,
"loss": 0.4055,
"step": 637
},
{
"epoch": 0.6381794879809947,
"grad_norm": 0.1691804975271225,
"learning_rate": 0.00015768716577540108,
"loss": 0.4137,
"step": 638
},
{
"epoch": 0.6391797693101184,
"grad_norm": 0.16740399599075317,
"learning_rate": 0.00015762032085561497,
"loss": 0.3891,
"step": 639
},
{
"epoch": 0.6401800506392423,
"grad_norm": 0.1827842891216278,
"learning_rate": 0.0001575534759358289,
"loss": 0.4156,
"step": 640
},
{
"epoch": 0.6411803319683661,
"grad_norm": 0.1753103882074356,
"learning_rate": 0.0001574866310160428,
"loss": 0.4046,
"step": 641
},
{
"epoch": 0.6421806132974899,
"grad_norm": 0.18446987867355347,
"learning_rate": 0.0001574197860962567,
"loss": 0.4195,
"step": 642
},
{
"epoch": 0.6431808946266138,
"grad_norm": 0.17731043696403503,
"learning_rate": 0.0001573529411764706,
"loss": 0.4227,
"step": 643
},
{
"epoch": 0.6441811759557375,
"grad_norm": 0.1867007315158844,
"learning_rate": 0.00015728609625668448,
"loss": 0.4386,
"step": 644
},
{
"epoch": 0.6451814572848613,
"grad_norm": 0.18152248859405518,
"learning_rate": 0.0001572192513368984,
"loss": 0.4237,
"step": 645
},
{
"epoch": 0.6461817386139852,
"grad_norm": 0.18429970741271973,
"learning_rate": 0.00015715240641711231,
"loss": 0.4346,
"step": 646
},
{
"epoch": 0.647182019943109,
"grad_norm": 0.16958941519260406,
"learning_rate": 0.0001570855614973262,
"loss": 0.4136,
"step": 647
},
{
"epoch": 0.6481823012722329,
"grad_norm": 0.17754653096199036,
"learning_rate": 0.0001570187165775401,
"loss": 0.4054,
"step": 648
},
{
"epoch": 0.6491825826013566,
"grad_norm": 0.1706933081150055,
"learning_rate": 0.00015695187165775401,
"loss": 0.3872,
"step": 649
},
{
"epoch": 0.6501828639304804,
"grad_norm": 0.1693519949913025,
"learning_rate": 0.00015688502673796793,
"loss": 0.4023,
"step": 650
},
{
"epoch": 0.6511831452596043,
"grad_norm": 0.18293803930282593,
"learning_rate": 0.00015681818181818182,
"loss": 0.4426,
"step": 651
},
{
"epoch": 0.6521834265887281,
"grad_norm": 0.18341802060604095,
"learning_rate": 0.00015675133689839574,
"loss": 0.4348,
"step": 652
},
{
"epoch": 0.6531837079178519,
"grad_norm": 0.17068350315093994,
"learning_rate": 0.00015668449197860963,
"loss": 0.3951,
"step": 653
},
{
"epoch": 0.6541839892469757,
"grad_norm": 0.1712416708469391,
"learning_rate": 0.00015661764705882352,
"loss": 0.4018,
"step": 654
},
{
"epoch": 0.6551842705760995,
"grad_norm": 0.17853021621704102,
"learning_rate": 0.00015655080213903744,
"loss": 0.4425,
"step": 655
},
{
"epoch": 0.6561845519052233,
"grad_norm": 0.17284280061721802,
"learning_rate": 0.00015648395721925136,
"loss": 0.4306,
"step": 656
},
{
"epoch": 0.6571848332343472,
"grad_norm": 0.17899803817272186,
"learning_rate": 0.00015641711229946525,
"loss": 0.4023,
"step": 657
},
{
"epoch": 0.658185114563471,
"grad_norm": 0.1919001191854477,
"learning_rate": 0.00015635026737967914,
"loss": 0.4415,
"step": 658
},
{
"epoch": 0.6591853958925948,
"grad_norm": 0.1722707599401474,
"learning_rate": 0.00015628342245989306,
"loss": 0.4176,
"step": 659
},
{
"epoch": 0.6601856772217186,
"grad_norm": 0.17643126845359802,
"learning_rate": 0.00015621657754010698,
"loss": 0.4014,
"step": 660
},
{
"epoch": 0.6611859585508424,
"grad_norm": 0.18269233405590057,
"learning_rate": 0.00015614973262032087,
"loss": 0.4113,
"step": 661
},
{
"epoch": 0.6621862398799663,
"grad_norm": 0.16375958919525146,
"learning_rate": 0.00015608288770053476,
"loss": 0.367,
"step": 662
},
{
"epoch": 0.66318652120909,
"grad_norm": 0.18737877905368805,
"learning_rate": 0.00015601604278074865,
"loss": 0.4145,
"step": 663
},
{
"epoch": 0.6641868025382138,
"grad_norm": 0.17648738622665405,
"learning_rate": 0.0001559491978609626,
"loss": 0.3852,
"step": 664
},
{
"epoch": 0.6651870838673377,
"grad_norm": 0.18149302899837494,
"learning_rate": 0.00015588235294117648,
"loss": 0.4187,
"step": 665
},
{
"epoch": 0.6661873651964615,
"grad_norm": 0.17750906944274902,
"learning_rate": 0.00015581550802139037,
"loss": 0.4126,
"step": 666
},
{
"epoch": 0.6671876465255854,
"grad_norm": 0.17197686433792114,
"learning_rate": 0.00015574866310160427,
"loss": 0.4182,
"step": 667
},
{
"epoch": 0.6681879278547092,
"grad_norm": 0.1686813086271286,
"learning_rate": 0.00015568181818181818,
"loss": 0.3887,
"step": 668
},
{
"epoch": 0.6691882091838329,
"grad_norm": 0.1689004749059677,
"learning_rate": 0.0001556149732620321,
"loss": 0.4121,
"step": 669
},
{
"epoch": 0.6701884905129568,
"grad_norm": 0.18247756361961365,
"learning_rate": 0.000155548128342246,
"loss": 0.4158,
"step": 670
},
{
"epoch": 0.6711887718420806,
"grad_norm": 0.18172405660152435,
"learning_rate": 0.0001554812834224599,
"loss": 0.4221,
"step": 671
},
{
"epoch": 0.6721890531712044,
"grad_norm": 0.18004614114761353,
"learning_rate": 0.0001554144385026738,
"loss": 0.4157,
"step": 672
},
{
"epoch": 0.6731893345003283,
"grad_norm": 0.1740560680627823,
"learning_rate": 0.00015534759358288772,
"loss": 0.3958,
"step": 673
},
{
"epoch": 0.674189615829452,
"grad_norm": 0.1756337434053421,
"learning_rate": 0.0001552807486631016,
"loss": 0.4217,
"step": 674
},
{
"epoch": 0.6751898971585758,
"grad_norm": 0.16236595809459686,
"learning_rate": 0.00015521390374331553,
"loss": 0.4009,
"step": 675
},
{
"epoch": 0.6761901784876997,
"grad_norm": 0.17603887617588043,
"learning_rate": 0.00015514705882352942,
"loss": 0.4351,
"step": 676
},
{
"epoch": 0.6771904598168235,
"grad_norm": 0.1763356328010559,
"learning_rate": 0.0001550802139037433,
"loss": 0.4334,
"step": 677
},
{
"epoch": 0.6781907411459474,
"grad_norm": 0.17445343732833862,
"learning_rate": 0.00015501336898395723,
"loss": 0.4199,
"step": 678
},
{
"epoch": 0.6791910224750711,
"grad_norm": 0.17688532173633575,
"learning_rate": 0.00015494652406417115,
"loss": 0.4314,
"step": 679
},
{
"epoch": 0.6801913038041949,
"grad_norm": 0.17520317435264587,
"learning_rate": 0.00015487967914438504,
"loss": 0.4128,
"step": 680
},
{
"epoch": 0.6811915851333188,
"grad_norm": 0.17253170907497406,
"learning_rate": 0.00015481283422459893,
"loss": 0.4095,
"step": 681
},
{
"epoch": 0.6821918664624426,
"grad_norm": 0.17683500051498413,
"learning_rate": 0.00015474598930481284,
"loss": 0.4037,
"step": 682
},
{
"epoch": 0.6831921477915663,
"grad_norm": 0.16928136348724365,
"learning_rate": 0.00015467914438502676,
"loss": 0.397,
"step": 683
},
{
"epoch": 0.6841924291206902,
"grad_norm": 0.18264882266521454,
"learning_rate": 0.00015461229946524065,
"loss": 0.4387,
"step": 684
},
{
"epoch": 0.685192710449814,
"grad_norm": 0.18944154679775238,
"learning_rate": 0.00015454545454545454,
"loss": 0.4114,
"step": 685
},
{
"epoch": 0.6861929917789378,
"grad_norm": 0.17461170256137848,
"learning_rate": 0.00015447860962566844,
"loss": 0.4068,
"step": 686
},
{
"epoch": 0.6871932731080617,
"grad_norm": 0.16725128889083862,
"learning_rate": 0.00015441176470588238,
"loss": 0.3937,
"step": 687
},
{
"epoch": 0.6881935544371854,
"grad_norm": 0.1661311835050583,
"learning_rate": 0.00015434491978609627,
"loss": 0.4102,
"step": 688
},
{
"epoch": 0.6891938357663093,
"grad_norm": 0.17577211558818817,
"learning_rate": 0.00015427807486631016,
"loss": 0.4033,
"step": 689
},
{
"epoch": 0.6901941170954331,
"grad_norm": 0.17130760848522186,
"learning_rate": 0.00015421122994652405,
"loss": 0.412,
"step": 690
},
{
"epoch": 0.6911943984245569,
"grad_norm": 0.1697661131620407,
"learning_rate": 0.00015414438502673797,
"loss": 0.3953,
"step": 691
},
{
"epoch": 0.6921946797536808,
"grad_norm": 0.17887946963310242,
"learning_rate": 0.0001540775401069519,
"loss": 0.431,
"step": 692
},
{
"epoch": 0.6931949610828045,
"grad_norm": 0.1887810230255127,
"learning_rate": 0.00015401069518716578,
"loss": 0.4294,
"step": 693
},
{
"epoch": 0.6941952424119283,
"grad_norm": 0.19596914947032928,
"learning_rate": 0.0001539438502673797,
"loss": 0.4292,
"step": 694
},
{
"epoch": 0.6951955237410522,
"grad_norm": 0.18208423256874084,
"learning_rate": 0.0001538770053475936,
"loss": 0.4102,
"step": 695
},
{
"epoch": 0.696195805070176,
"grad_norm": 0.1716201901435852,
"learning_rate": 0.0001538101604278075,
"loss": 0.4067,
"step": 696
},
{
"epoch": 0.6971960863992998,
"grad_norm": 0.18010923266410828,
"learning_rate": 0.0001537433155080214,
"loss": 0.4016,
"step": 697
},
{
"epoch": 0.6981963677284236,
"grad_norm": 0.1718294769525528,
"learning_rate": 0.00015367647058823531,
"loss": 0.3937,
"step": 698
},
{
"epoch": 0.6991966490575474,
"grad_norm": 0.16809529066085815,
"learning_rate": 0.0001536096256684492,
"loss": 0.3989,
"step": 699
},
{
"epoch": 0.7001969303866713,
"grad_norm": 0.17799147963523865,
"learning_rate": 0.0001535427807486631,
"loss": 0.4316,
"step": 700
},
{
"epoch": 0.7011972117157951,
"grad_norm": 0.16764914989471436,
"learning_rate": 0.00015347593582887701,
"loss": 0.3922,
"step": 701
},
{
"epoch": 0.7021974930449189,
"grad_norm": 0.1715439260005951,
"learning_rate": 0.00015340909090909093,
"loss": 0.3947,
"step": 702
},
{
"epoch": 0.7031977743740427,
"grad_norm": 0.17045070230960846,
"learning_rate": 0.00015334224598930482,
"loss": 0.3825,
"step": 703
},
{
"epoch": 0.7041980557031665,
"grad_norm": 0.16872522234916687,
"learning_rate": 0.00015327540106951871,
"loss": 0.4129,
"step": 704
},
{
"epoch": 0.7051983370322903,
"grad_norm": 0.18743546307086945,
"learning_rate": 0.00015320855614973263,
"loss": 0.42,
"step": 705
},
{
"epoch": 0.7061986183614142,
"grad_norm": 0.1783863753080368,
"learning_rate": 0.00015314171122994655,
"loss": 0.4162,
"step": 706
},
{
"epoch": 0.707198899690538,
"grad_norm": 0.17544718086719513,
"learning_rate": 0.00015307486631016044,
"loss": 0.4107,
"step": 707
},
{
"epoch": 0.7081991810196617,
"grad_norm": 0.16872859001159668,
"learning_rate": 0.00015300802139037433,
"loss": 0.4039,
"step": 708
},
{
"epoch": 0.7091994623487856,
"grad_norm": 0.16296930611133575,
"learning_rate": 0.00015294117647058822,
"loss": 0.3857,
"step": 709
},
{
"epoch": 0.7101997436779094,
"grad_norm": 0.16989687085151672,
"learning_rate": 0.00015287433155080214,
"loss": 0.4097,
"step": 710
},
{
"epoch": 0.7112000250070333,
"grad_norm": 0.17646653950214386,
"learning_rate": 0.00015280748663101606,
"loss": 0.4004,
"step": 711
},
{
"epoch": 0.712200306336157,
"grad_norm": 0.16280825436115265,
"learning_rate": 0.00015274064171122995,
"loss": 0.386,
"step": 712
},
{
"epoch": 0.7132005876652808,
"grad_norm": 0.18282656371593475,
"learning_rate": 0.00015267379679144387,
"loss": 0.4271,
"step": 713
},
{
"epoch": 0.7142008689944047,
"grad_norm": 0.17502710223197937,
"learning_rate": 0.00015260695187165776,
"loss": 0.4426,
"step": 714
},
{
"epoch": 0.7152011503235285,
"grad_norm": 0.16774967312812805,
"learning_rate": 0.00015254010695187168,
"loss": 0.3889,
"step": 715
},
{
"epoch": 0.7162014316526523,
"grad_norm": 0.17474064230918884,
"learning_rate": 0.00015247326203208557,
"loss": 0.4182,
"step": 716
},
{
"epoch": 0.7172017129817762,
"grad_norm": 0.1727539449930191,
"learning_rate": 0.00015240641711229948,
"loss": 0.3914,
"step": 717
},
{
"epoch": 0.7182019943108999,
"grad_norm": 0.17711150646209717,
"learning_rate": 0.00015233957219251337,
"loss": 0.3967,
"step": 718
},
{
"epoch": 0.7192022756400237,
"grad_norm": 0.17282328009605408,
"learning_rate": 0.00015227272727272727,
"loss": 0.4186,
"step": 719
},
{
"epoch": 0.7202025569691476,
"grad_norm": 0.18157802522182465,
"learning_rate": 0.00015220588235294118,
"loss": 0.4219,
"step": 720
},
{
"epoch": 0.7212028382982714,
"grad_norm": 0.1840929538011551,
"learning_rate": 0.0001521390374331551,
"loss": 0.4004,
"step": 721
},
{
"epoch": 0.7222031196273953,
"grad_norm": 0.17365121841430664,
"learning_rate": 0.000152072192513369,
"loss": 0.399,
"step": 722
},
{
"epoch": 0.723203400956519,
"grad_norm": 0.1816079169511795,
"learning_rate": 0.00015200534759358288,
"loss": 0.4073,
"step": 723
},
{
"epoch": 0.7242036822856428,
"grad_norm": 0.17313836514949799,
"learning_rate": 0.0001519385026737968,
"loss": 0.3875,
"step": 724
},
{
"epoch": 0.7252039636147667,
"grad_norm": 0.16790300607681274,
"learning_rate": 0.00015187165775401072,
"loss": 0.4075,
"step": 725
},
{
"epoch": 0.7262042449438905,
"grad_norm": 0.16900648176670074,
"learning_rate": 0.0001518048128342246,
"loss": 0.4126,
"step": 726
},
{
"epoch": 0.7272045262730142,
"grad_norm": 0.16767656803131104,
"learning_rate": 0.0001517379679144385,
"loss": 0.409,
"step": 727
},
{
"epoch": 0.7282048076021381,
"grad_norm": 0.17568565905094147,
"learning_rate": 0.0001516711229946524,
"loss": 0.4075,
"step": 728
},
{
"epoch": 0.7292050889312619,
"grad_norm": 0.17152975499629974,
"learning_rate": 0.00015160427807486634,
"loss": 0.4199,
"step": 729
},
{
"epoch": 0.7302053702603858,
"grad_norm": 0.17456263303756714,
"learning_rate": 0.00015153743315508023,
"loss": 0.4115,
"step": 730
},
{
"epoch": 0.7312056515895096,
"grad_norm": 0.1764313280582428,
"learning_rate": 0.00015147058823529412,
"loss": 0.4074,
"step": 731
},
{
"epoch": 0.7322059329186333,
"grad_norm": 0.17887412011623383,
"learning_rate": 0.000151403743315508,
"loss": 0.4105,
"step": 732
},
{
"epoch": 0.7332062142477572,
"grad_norm": 0.18053936958312988,
"learning_rate": 0.00015133689839572193,
"loss": 0.4224,
"step": 733
},
{
"epoch": 0.734206495576881,
"grad_norm": 0.1864321231842041,
"learning_rate": 0.00015127005347593584,
"loss": 0.3945,
"step": 734
},
{
"epoch": 0.7352067769060048,
"grad_norm": 0.17719848453998566,
"learning_rate": 0.00015120320855614974,
"loss": 0.4173,
"step": 735
},
{
"epoch": 0.7362070582351287,
"grad_norm": 0.17382051050662994,
"learning_rate": 0.00015113636363636365,
"loss": 0.412,
"step": 736
},
{
"epoch": 0.7372073395642524,
"grad_norm": 0.17831949889659882,
"learning_rate": 0.00015106951871657754,
"loss": 0.3926,
"step": 737
},
{
"epoch": 0.7382076208933762,
"grad_norm": 0.19203975796699524,
"learning_rate": 0.00015100267379679146,
"loss": 0.4061,
"step": 738
},
{
"epoch": 0.7392079022225001,
"grad_norm": 0.1851518303155899,
"learning_rate": 0.00015093582887700535,
"loss": 0.4254,
"step": 739
},
{
"epoch": 0.7402081835516239,
"grad_norm": 0.17518197000026703,
"learning_rate": 0.00015086898395721927,
"loss": 0.4094,
"step": 740
},
{
"epoch": 0.7412084648807478,
"grad_norm": 0.19005051255226135,
"learning_rate": 0.00015080213903743316,
"loss": 0.435,
"step": 741
},
{
"epoch": 0.7422087462098715,
"grad_norm": 0.17512501776218414,
"learning_rate": 0.00015073529411764705,
"loss": 0.3818,
"step": 742
},
{
"epoch": 0.7432090275389953,
"grad_norm": 0.17218168079853058,
"learning_rate": 0.00015066844919786097,
"loss": 0.3938,
"step": 743
},
{
"epoch": 0.7442093088681192,
"grad_norm": 0.16856470704078674,
"learning_rate": 0.0001506016042780749,
"loss": 0.4186,
"step": 744
},
{
"epoch": 0.745209590197243,
"grad_norm": 0.185384601354599,
"learning_rate": 0.00015053475935828878,
"loss": 0.3965,
"step": 745
},
{
"epoch": 0.7462098715263668,
"grad_norm": 0.18854860961437225,
"learning_rate": 0.00015046791443850267,
"loss": 0.4281,
"step": 746
},
{
"epoch": 0.7472101528554906,
"grad_norm": 0.17106805741786957,
"learning_rate": 0.0001504010695187166,
"loss": 0.403,
"step": 747
},
{
"epoch": 0.7482104341846144,
"grad_norm": 0.17635445296764374,
"learning_rate": 0.0001503342245989305,
"loss": 0.4045,
"step": 748
},
{
"epoch": 0.7492107155137382,
"grad_norm": 0.17554466426372528,
"learning_rate": 0.0001502673796791444,
"loss": 0.4216,
"step": 749
},
{
"epoch": 0.7502109968428621,
"grad_norm": 0.16902974247932434,
"learning_rate": 0.0001502005347593583,
"loss": 0.3842,
"step": 750
},
{
"epoch": 0.7512112781719859,
"grad_norm": 0.17811493575572968,
"learning_rate": 0.00015013368983957218,
"loss": 0.4085,
"step": 751
},
{
"epoch": 0.7522115595011097,
"grad_norm": 0.17670631408691406,
"learning_rate": 0.0001500668449197861,
"loss": 0.4142,
"step": 752
},
{
"epoch": 0.7532118408302335,
"grad_norm": 0.1737818568944931,
"learning_rate": 0.00015000000000000001,
"loss": 0.4171,
"step": 753
},
{
"epoch": 0.7542121221593573,
"grad_norm": 0.17644794285297394,
"learning_rate": 0.0001499331550802139,
"loss": 0.41,
"step": 754
},
{
"epoch": 0.7552124034884812,
"grad_norm": 0.17215976119041443,
"learning_rate": 0.00014986631016042782,
"loss": 0.397,
"step": 755
},
{
"epoch": 0.756212684817605,
"grad_norm": 0.17125418782234192,
"learning_rate": 0.0001497994652406417,
"loss": 0.42,
"step": 756
},
{
"epoch": 0.7572129661467287,
"grad_norm": 0.19707784056663513,
"learning_rate": 0.00014973262032085563,
"loss": 0.4085,
"step": 757
},
{
"epoch": 0.7582132474758526,
"grad_norm": 0.17742498219013214,
"learning_rate": 0.00014966577540106952,
"loss": 0.4297,
"step": 758
},
{
"epoch": 0.7592135288049764,
"grad_norm": 0.17373313009738922,
"learning_rate": 0.00014959893048128344,
"loss": 0.4203,
"step": 759
},
{
"epoch": 0.7602138101341002,
"grad_norm": 0.17742851376533508,
"learning_rate": 0.00014953208556149733,
"loss": 0.4225,
"step": 760
},
{
"epoch": 0.761214091463224,
"grad_norm": 0.18568547070026398,
"learning_rate": 0.00014946524064171122,
"loss": 0.4175,
"step": 761
},
{
"epoch": 0.7622143727923478,
"grad_norm": 0.16926662623882294,
"learning_rate": 0.00014939839572192514,
"loss": 0.4182,
"step": 762
},
{
"epoch": 0.7632146541214717,
"grad_norm": 0.1670301854610443,
"learning_rate": 0.00014933155080213906,
"loss": 0.4032,
"step": 763
},
{
"epoch": 0.7642149354505955,
"grad_norm": 0.17339031398296356,
"learning_rate": 0.00014926470588235295,
"loss": 0.43,
"step": 764
},
{
"epoch": 0.7652152167797193,
"grad_norm": 0.17211312055587769,
"learning_rate": 0.00014919786096256684,
"loss": 0.4086,
"step": 765
},
{
"epoch": 0.7662154981088432,
"grad_norm": 0.17297977209091187,
"learning_rate": 0.00014913101604278076,
"loss": 0.3909,
"step": 766
},
{
"epoch": 0.7672157794379669,
"grad_norm": 0.18003138899803162,
"learning_rate": 0.00014906417112299468,
"loss": 0.4073,
"step": 767
},
{
"epoch": 0.7682160607670907,
"grad_norm": 0.18356819450855255,
"learning_rate": 0.00014899732620320857,
"loss": 0.4105,
"step": 768
},
{
"epoch": 0.7692163420962146,
"grad_norm": 0.1751221865415573,
"learning_rate": 0.00014893048128342246,
"loss": 0.4033,
"step": 769
},
{
"epoch": 0.7702166234253384,
"grad_norm": 0.1809505969285965,
"learning_rate": 0.00014886363636363635,
"loss": 0.4033,
"step": 770
},
{
"epoch": 0.7712169047544621,
"grad_norm": 0.17458176612854004,
"learning_rate": 0.0001487967914438503,
"loss": 0.415,
"step": 771
},
{
"epoch": 0.772217186083586,
"grad_norm": 0.17972098290920258,
"learning_rate": 0.00014872994652406418,
"loss": 0.4304,
"step": 772
},
{
"epoch": 0.7732174674127098,
"grad_norm": 0.17451392114162445,
"learning_rate": 0.00014866310160427807,
"loss": 0.3945,
"step": 773
},
{
"epoch": 0.7742177487418337,
"grad_norm": 0.18923582136631012,
"learning_rate": 0.000148596256684492,
"loss": 0.4039,
"step": 774
},
{
"epoch": 0.7752180300709575,
"grad_norm": 0.17579270899295807,
"learning_rate": 0.00014852941176470588,
"loss": 0.4115,
"step": 775
},
{
"epoch": 0.7762183114000812,
"grad_norm": 0.18641281127929688,
"learning_rate": 0.0001484625668449198,
"loss": 0.4028,
"step": 776
},
{
"epoch": 0.7772185927292051,
"grad_norm": 0.1804158091545105,
"learning_rate": 0.0001483957219251337,
"loss": 0.363,
"step": 777
},
{
"epoch": 0.7782188740583289,
"grad_norm": 0.1881331503391266,
"learning_rate": 0.0001483288770053476,
"loss": 0.3875,
"step": 778
},
{
"epoch": 0.7792191553874527,
"grad_norm": 0.17641088366508484,
"learning_rate": 0.0001482620320855615,
"loss": 0.4124,
"step": 779
},
{
"epoch": 0.7802194367165766,
"grad_norm": 0.18274515867233276,
"learning_rate": 0.00014819518716577542,
"loss": 0.4048,
"step": 780
},
{
"epoch": 0.7812197180457003,
"grad_norm": 0.17852523922920227,
"learning_rate": 0.0001481283422459893,
"loss": 0.4094,
"step": 781
},
{
"epoch": 0.7822199993748241,
"grad_norm": 0.1843167096376419,
"learning_rate": 0.00014806149732620323,
"loss": 0.4222,
"step": 782
},
{
"epoch": 0.783220280703948,
"grad_norm": 0.19953066110610962,
"learning_rate": 0.00014799465240641712,
"loss": 0.3951,
"step": 783
},
{
"epoch": 0.7842205620330718,
"grad_norm": 0.1708746999502182,
"learning_rate": 0.000147927807486631,
"loss": 0.4331,
"step": 784
},
{
"epoch": 0.7852208433621957,
"grad_norm": 0.16868069767951965,
"learning_rate": 0.00014786096256684493,
"loss": 0.3986,
"step": 785
},
{
"epoch": 0.7862211246913194,
"grad_norm": 0.17892426252365112,
"learning_rate": 0.00014779411764705884,
"loss": 0.4108,
"step": 786
},
{
"epoch": 0.7872214060204432,
"grad_norm": 0.16507907211780548,
"learning_rate": 0.00014772727272727274,
"loss": 0.3735,
"step": 787
},
{
"epoch": 0.7882216873495671,
"grad_norm": 0.17571504414081573,
"learning_rate": 0.00014766042780748663,
"loss": 0.4135,
"step": 788
},
{
"epoch": 0.7892219686786909,
"grad_norm": 0.16615907847881317,
"learning_rate": 0.00014759358288770054,
"loss": 0.4048,
"step": 789
},
{
"epoch": 0.7902222500078147,
"grad_norm": 0.18569040298461914,
"learning_rate": 0.00014752673796791446,
"loss": 0.427,
"step": 790
},
{
"epoch": 0.7912225313369385,
"grad_norm": 0.17103785276412964,
"learning_rate": 0.00014745989304812835,
"loss": 0.3877,
"step": 791
},
{
"epoch": 0.7922228126660623,
"grad_norm": 0.1752656102180481,
"learning_rate": 0.00014739304812834224,
"loss": 0.393,
"step": 792
},
{
"epoch": 0.7932230939951862,
"grad_norm": 0.17465591430664062,
"learning_rate": 0.00014732620320855613,
"loss": 0.4149,
"step": 793
},
{
"epoch": 0.79422337532431,
"grad_norm": 0.18063925206661224,
"learning_rate": 0.00014725935828877008,
"loss": 0.3961,
"step": 794
},
{
"epoch": 0.7952236566534338,
"grad_norm": 0.1805940717458725,
"learning_rate": 0.00014719251336898397,
"loss": 0.4112,
"step": 795
},
{
"epoch": 0.7962239379825576,
"grad_norm": 0.17591600120067596,
"learning_rate": 0.00014712566844919786,
"loss": 0.3919,
"step": 796
},
{
"epoch": 0.7972242193116814,
"grad_norm": 0.17145437002182007,
"learning_rate": 0.00014705882352941178,
"loss": 0.4263,
"step": 797
},
{
"epoch": 0.7982245006408052,
"grad_norm": 0.17273344099521637,
"learning_rate": 0.00014699197860962567,
"loss": 0.3945,
"step": 798
},
{
"epoch": 0.7992247819699291,
"grad_norm": 0.16053451597690582,
"learning_rate": 0.0001469251336898396,
"loss": 0.3728,
"step": 799
},
{
"epoch": 0.8002250632990529,
"grad_norm": 0.1720341145992279,
"learning_rate": 0.00014685828877005348,
"loss": 0.4029,
"step": 800
},
{
"epoch": 0.8012253446281766,
"grad_norm": 0.1686064451932907,
"learning_rate": 0.0001467914438502674,
"loss": 0.3996,
"step": 801
},
{
"epoch": 0.8022256259573005,
"grad_norm": 0.17165598273277283,
"learning_rate": 0.0001467245989304813,
"loss": 0.3785,
"step": 802
},
{
"epoch": 0.8032259072864243,
"grad_norm": 0.17315958440303802,
"learning_rate": 0.0001466577540106952,
"loss": 0.3989,
"step": 803
},
{
"epoch": 0.8042261886155482,
"grad_norm": 0.17488616704940796,
"learning_rate": 0.0001465909090909091,
"loss": 0.4087,
"step": 804
},
{
"epoch": 0.805226469944672,
"grad_norm": 0.17673259973526,
"learning_rate": 0.00014652406417112301,
"loss": 0.4126,
"step": 805
},
{
"epoch": 0.8062267512737957,
"grad_norm": 0.17650727927684784,
"learning_rate": 0.0001464572192513369,
"loss": 0.4115,
"step": 806
},
{
"epoch": 0.8072270326029196,
"grad_norm": 0.17311464250087738,
"learning_rate": 0.0001463903743315508,
"loss": 0.4175,
"step": 807
},
{
"epoch": 0.8082273139320434,
"grad_norm": 0.18814238905906677,
"learning_rate": 0.0001463235294117647,
"loss": 0.4082,
"step": 808
},
{
"epoch": 0.8092275952611672,
"grad_norm": 0.16839633882045746,
"learning_rate": 0.00014625668449197863,
"loss": 0.3909,
"step": 809
},
{
"epoch": 0.810227876590291,
"grad_norm": 0.1769394725561142,
"learning_rate": 0.00014618983957219252,
"loss": 0.3913,
"step": 810
},
{
"epoch": 0.8112281579194148,
"grad_norm": 0.17125903069972992,
"learning_rate": 0.0001461229946524064,
"loss": 0.3914,
"step": 811
},
{
"epoch": 0.8122284392485386,
"grad_norm": 0.17443816363811493,
"learning_rate": 0.00014605614973262033,
"loss": 0.4183,
"step": 812
},
{
"epoch": 0.8132287205776625,
"grad_norm": 0.18099425733089447,
"learning_rate": 0.00014598930481283425,
"loss": 0.4043,
"step": 813
},
{
"epoch": 0.8142290019067863,
"grad_norm": 0.17355240881443024,
"learning_rate": 0.00014592245989304814,
"loss": 0.4082,
"step": 814
},
{
"epoch": 0.8152292832359102,
"grad_norm": 0.16909727454185486,
"learning_rate": 0.00014585561497326203,
"loss": 0.4056,
"step": 815
},
{
"epoch": 0.8162295645650339,
"grad_norm": 0.1898905634880066,
"learning_rate": 0.00014578877005347595,
"loss": 0.4352,
"step": 816
},
{
"epoch": 0.8172298458941577,
"grad_norm": 0.17900870740413666,
"learning_rate": 0.00014572192513368984,
"loss": 0.4205,
"step": 817
},
{
"epoch": 0.8182301272232816,
"grad_norm": 0.17821773886680603,
"learning_rate": 0.00014565508021390376,
"loss": 0.4248,
"step": 818
},
{
"epoch": 0.8192304085524054,
"grad_norm": 0.19676046073436737,
"learning_rate": 0.00014558823529411765,
"loss": 0.4096,
"step": 819
},
{
"epoch": 0.8202306898815291,
"grad_norm": 0.17447529733181,
"learning_rate": 0.00014552139037433157,
"loss": 0.4089,
"step": 820
},
{
"epoch": 0.821230971210653,
"grad_norm": 0.1961289793252945,
"learning_rate": 0.00014545454545454546,
"loss": 0.4046,
"step": 821
},
{
"epoch": 0.8222312525397768,
"grad_norm": 0.18811632692813873,
"learning_rate": 0.00014538770053475937,
"loss": 0.4265,
"step": 822
},
{
"epoch": 0.8232315338689006,
"grad_norm": 0.16100798547267914,
"learning_rate": 0.00014532085561497327,
"loss": 0.3986,
"step": 823
},
{
"epoch": 0.8242318151980245,
"grad_norm": 0.1786595582962036,
"learning_rate": 0.00014525401069518718,
"loss": 0.4095,
"step": 824
},
{
"epoch": 0.8252320965271482,
"grad_norm": 0.17071866989135742,
"learning_rate": 0.00014518716577540107,
"loss": 0.3784,
"step": 825
},
{
"epoch": 0.8262323778562721,
"grad_norm": 0.18947453796863556,
"learning_rate": 0.00014512032085561496,
"loss": 0.4101,
"step": 826
},
{
"epoch": 0.8272326591853959,
"grad_norm": 0.17812004685401917,
"learning_rate": 0.00014505347593582888,
"loss": 0.4102,
"step": 827
},
{
"epoch": 0.8282329405145197,
"grad_norm": 0.17082251608371735,
"learning_rate": 0.0001449866310160428,
"loss": 0.3902,
"step": 828
},
{
"epoch": 0.8292332218436436,
"grad_norm": 0.19824115931987762,
"learning_rate": 0.0001449197860962567,
"loss": 0.4108,
"step": 829
},
{
"epoch": 0.8302335031727673,
"grad_norm": 0.19035981595516205,
"learning_rate": 0.00014485294117647058,
"loss": 0.4299,
"step": 830
},
{
"epoch": 0.8312337845018911,
"grad_norm": 0.17072008550167084,
"learning_rate": 0.0001447860962566845,
"loss": 0.3892,
"step": 831
},
{
"epoch": 0.832234065831015,
"grad_norm": 0.16947698593139648,
"learning_rate": 0.00014471925133689842,
"loss": 0.3799,
"step": 832
},
{
"epoch": 0.8332343471601388,
"grad_norm": 0.173639714717865,
"learning_rate": 0.0001446524064171123,
"loss": 0.3889,
"step": 833
},
{
"epoch": 0.8342346284892626,
"grad_norm": 0.17889994382858276,
"learning_rate": 0.0001445855614973262,
"loss": 0.4097,
"step": 834
},
{
"epoch": 0.8352349098183864,
"grad_norm": 0.17331556975841522,
"learning_rate": 0.0001445187165775401,
"loss": 0.4041,
"step": 835
},
{
"epoch": 0.8362351911475102,
"grad_norm": 0.17438872158527374,
"learning_rate": 0.00014445187165775404,
"loss": 0.3966,
"step": 836
},
{
"epoch": 0.8372354724766341,
"grad_norm": 0.1759490966796875,
"learning_rate": 0.00014438502673796793,
"loss": 0.4043,
"step": 837
},
{
"epoch": 0.8382357538057579,
"grad_norm": 0.16946400701999664,
"learning_rate": 0.00014431818181818182,
"loss": 0.4282,
"step": 838
},
{
"epoch": 0.8392360351348817,
"grad_norm": 0.16447113454341888,
"learning_rate": 0.00014425133689839574,
"loss": 0.3731,
"step": 839
},
{
"epoch": 0.8402363164640055,
"grad_norm": 0.1674252301454544,
"learning_rate": 0.00014418449197860963,
"loss": 0.3879,
"step": 840
},
{
"epoch": 0.8412365977931293,
"grad_norm": 0.1837848722934723,
"learning_rate": 0.00014411764705882354,
"loss": 0.4217,
"step": 841
},
{
"epoch": 0.8422368791222531,
"grad_norm": 0.1785300076007843,
"learning_rate": 0.00014405080213903743,
"loss": 0.4125,
"step": 842
},
{
"epoch": 0.843237160451377,
"grad_norm": 0.20810818672180176,
"learning_rate": 0.00014398395721925135,
"loss": 0.409,
"step": 843
},
{
"epoch": 0.8442374417805008,
"grad_norm": 0.17327488958835602,
"learning_rate": 0.00014391711229946524,
"loss": 0.3981,
"step": 844
},
{
"epoch": 0.8452377231096245,
"grad_norm": 0.2608954608440399,
"learning_rate": 0.00014385026737967916,
"loss": 0.3927,
"step": 845
},
{
"epoch": 0.8462380044387484,
"grad_norm": 0.18573161959648132,
"learning_rate": 0.00014378342245989305,
"loss": 0.4287,
"step": 846
},
{
"epoch": 0.8472382857678722,
"grad_norm": 0.1860964000225067,
"learning_rate": 0.00014371657754010697,
"loss": 0.3982,
"step": 847
},
{
"epoch": 0.8482385670969961,
"grad_norm": 0.17765875160694122,
"learning_rate": 0.00014364973262032086,
"loss": 0.3898,
"step": 848
},
{
"epoch": 0.8492388484261199,
"grad_norm": 0.17560985684394836,
"learning_rate": 0.00014358288770053475,
"loss": 0.4192,
"step": 849
},
{
"epoch": 0.8502391297552436,
"grad_norm": 0.17350338399410248,
"learning_rate": 0.00014351604278074867,
"loss": 0.4087,
"step": 850
},
{
"epoch": 0.8512394110843675,
"grad_norm": 0.17332197725772858,
"learning_rate": 0.0001434491978609626,
"loss": 0.4037,
"step": 851
},
{
"epoch": 0.8522396924134913,
"grad_norm": 0.17164768278598785,
"learning_rate": 0.00014338235294117648,
"loss": 0.4132,
"step": 852
},
{
"epoch": 0.8532399737426151,
"grad_norm": 0.17493771016597748,
"learning_rate": 0.00014331550802139037,
"loss": 0.3898,
"step": 853
},
{
"epoch": 0.854240255071739,
"grad_norm": 0.1740700900554657,
"learning_rate": 0.0001432486631016043,
"loss": 0.3857,
"step": 854
},
{
"epoch": 0.8552405364008627,
"grad_norm": 0.17457234859466553,
"learning_rate": 0.0001431818181818182,
"loss": 0.401,
"step": 855
},
{
"epoch": 0.8562408177299866,
"grad_norm": 0.1648370772600174,
"learning_rate": 0.0001431149732620321,
"loss": 0.3838,
"step": 856
},
{
"epoch": 0.8572410990591104,
"grad_norm": 0.1766262650489807,
"learning_rate": 0.000143048128342246,
"loss": 0.4083,
"step": 857
},
{
"epoch": 0.8582413803882342,
"grad_norm": 0.16761666536331177,
"learning_rate": 0.0001429812834224599,
"loss": 0.3771,
"step": 858
},
{
"epoch": 0.859241661717358,
"grad_norm": 0.1663917452096939,
"learning_rate": 0.0001429144385026738,
"loss": 0.4198,
"step": 859
},
{
"epoch": 0.8602419430464818,
"grad_norm": 0.17130149900913239,
"learning_rate": 0.0001428475935828877,
"loss": 0.3755,
"step": 860
},
{
"epoch": 0.8612422243756056,
"grad_norm": 0.16928842663764954,
"learning_rate": 0.0001427807486631016,
"loss": 0.3922,
"step": 861
},
{
"epoch": 0.8622425057047295,
"grad_norm": 0.1808835119009018,
"learning_rate": 0.00014271390374331552,
"loss": 0.3831,
"step": 862
},
{
"epoch": 0.8632427870338533,
"grad_norm": 0.17441269755363464,
"learning_rate": 0.0001426470588235294,
"loss": 0.4159,
"step": 863
},
{
"epoch": 0.864243068362977,
"grad_norm": 0.17643436789512634,
"learning_rate": 0.00014258021390374333,
"loss": 0.4119,
"step": 864
},
{
"epoch": 0.8652433496921009,
"grad_norm": 0.1707470417022705,
"learning_rate": 0.00014251336898395722,
"loss": 0.3818,
"step": 865
},
{
"epoch": 0.8662436310212247,
"grad_norm": 0.16326646506786346,
"learning_rate": 0.00014244652406417114,
"loss": 0.3755,
"step": 866
},
{
"epoch": 0.8672439123503486,
"grad_norm": 0.17867451906204224,
"learning_rate": 0.00014237967914438503,
"loss": 0.4158,
"step": 867
},
{
"epoch": 0.8682441936794724,
"grad_norm": 0.17721618711948395,
"learning_rate": 0.00014231283422459892,
"loss": 0.4329,
"step": 868
},
{
"epoch": 0.8692444750085961,
"grad_norm": 0.16564089059829712,
"learning_rate": 0.00014224598930481284,
"loss": 0.4011,
"step": 869
},
{
"epoch": 0.87024475633772,
"grad_norm": 0.174989715218544,
"learning_rate": 0.00014217914438502676,
"loss": 0.389,
"step": 870
},
{
"epoch": 0.8712450376668438,
"grad_norm": 0.17899653315544128,
"learning_rate": 0.00014211229946524065,
"loss": 0.4123,
"step": 871
},
{
"epoch": 0.8722453189959676,
"grad_norm": 0.17878752946853638,
"learning_rate": 0.00014204545454545454,
"loss": 0.4248,
"step": 872
},
{
"epoch": 0.8732456003250915,
"grad_norm": 0.17918603122234344,
"learning_rate": 0.00014197860962566846,
"loss": 0.4108,
"step": 873
},
{
"epoch": 0.8742458816542152,
"grad_norm": 0.16828817129135132,
"learning_rate": 0.00014191176470588237,
"loss": 0.4016,
"step": 874
},
{
"epoch": 0.875246162983339,
"grad_norm": 0.17155155539512634,
"learning_rate": 0.00014184491978609627,
"loss": 0.4061,
"step": 875
},
{
"epoch": 0.8762464443124629,
"grad_norm": 0.1909843236207962,
"learning_rate": 0.00014177807486631016,
"loss": 0.4019,
"step": 876
},
{
"epoch": 0.8772467256415867,
"grad_norm": 0.17694242298603058,
"learning_rate": 0.00014171122994652405,
"loss": 0.4005,
"step": 877
},
{
"epoch": 0.8782470069707106,
"grad_norm": 0.18169617652893066,
"learning_rate": 0.000141644385026738,
"loss": 0.395,
"step": 878
},
{
"epoch": 0.8792472882998343,
"grad_norm": 0.18381339311599731,
"learning_rate": 0.00014157754010695188,
"loss": 0.3825,
"step": 879
},
{
"epoch": 0.8802475696289581,
"grad_norm": 0.16577623784542084,
"learning_rate": 0.00014151069518716577,
"loss": 0.3815,
"step": 880
},
{
"epoch": 0.881247850958082,
"grad_norm": 0.19959154725074768,
"learning_rate": 0.0001414438502673797,
"loss": 0.3994,
"step": 881
},
{
"epoch": 0.8822481322872058,
"grad_norm": 0.19829948246479034,
"learning_rate": 0.00014137700534759358,
"loss": 0.412,
"step": 882
},
{
"epoch": 0.8832484136163296,
"grad_norm": 0.17270344495773315,
"learning_rate": 0.0001413101604278075,
"loss": 0.408,
"step": 883
},
{
"epoch": 0.8842486949454534,
"grad_norm": 0.17711827158927917,
"learning_rate": 0.0001412433155080214,
"loss": 0.3942,
"step": 884
},
{
"epoch": 0.8852489762745772,
"grad_norm": 0.20845437049865723,
"learning_rate": 0.0001411764705882353,
"loss": 0.4221,
"step": 885
},
{
"epoch": 0.886249257603701,
"grad_norm": 0.1848880648612976,
"learning_rate": 0.0001411096256684492,
"loss": 0.4029,
"step": 886
},
{
"epoch": 0.8872495389328249,
"grad_norm": 0.16786153614521027,
"learning_rate": 0.00014104278074866312,
"loss": 0.3785,
"step": 887
},
{
"epoch": 0.8882498202619487,
"grad_norm": 0.16691294312477112,
"learning_rate": 0.000140975935828877,
"loss": 0.4067,
"step": 888
},
{
"epoch": 0.8892501015910725,
"grad_norm": 0.17728787660598755,
"learning_rate": 0.00014090909090909093,
"loss": 0.397,
"step": 889
},
{
"epoch": 0.8902503829201963,
"grad_norm": 0.1761750876903534,
"learning_rate": 0.00014084224598930482,
"loss": 0.3979,
"step": 890
},
{
"epoch": 0.8912506642493201,
"grad_norm": 0.1840093433856964,
"learning_rate": 0.0001407754010695187,
"loss": 0.4029,
"step": 891
},
{
"epoch": 0.892250945578444,
"grad_norm": 0.17632125318050385,
"learning_rate": 0.00014070855614973263,
"loss": 0.3997,
"step": 892
},
{
"epoch": 0.8932512269075678,
"grad_norm": 0.17613720893859863,
"learning_rate": 0.00014064171122994654,
"loss": 0.3784,
"step": 893
},
{
"epoch": 0.8942515082366915,
"grad_norm": 0.18408696353435516,
"learning_rate": 0.00014057486631016043,
"loss": 0.4104,
"step": 894
},
{
"epoch": 0.8952517895658154,
"grad_norm": 0.18052643537521362,
"learning_rate": 0.00014050802139037433,
"loss": 0.4014,
"step": 895
},
{
"epoch": 0.8962520708949392,
"grad_norm": 0.18135356903076172,
"learning_rate": 0.00014044117647058824,
"loss": 0.4188,
"step": 896
},
{
"epoch": 0.897252352224063,
"grad_norm": 0.1768425852060318,
"learning_rate": 0.00014037433155080216,
"loss": 0.4026,
"step": 897
},
{
"epoch": 0.8982526335531869,
"grad_norm": 0.17669373750686646,
"learning_rate": 0.00014030748663101605,
"loss": 0.4249,
"step": 898
},
{
"epoch": 0.8992529148823106,
"grad_norm": 0.1866510659456253,
"learning_rate": 0.00014024064171122994,
"loss": 0.447,
"step": 899
},
{
"epoch": 0.9002531962114345,
"grad_norm": 0.18631310760974884,
"learning_rate": 0.00014017379679144386,
"loss": 0.4072,
"step": 900
},
{
"epoch": 0.9012534775405583,
"grad_norm": 0.1782829463481903,
"learning_rate": 0.00014010695187165778,
"loss": 0.4176,
"step": 901
},
{
"epoch": 0.9022537588696821,
"grad_norm": 0.16321073472499847,
"learning_rate": 0.00014004010695187167,
"loss": 0.3762,
"step": 902
},
{
"epoch": 0.903254040198806,
"grad_norm": 0.1676628589630127,
"learning_rate": 0.00013997326203208556,
"loss": 0.3868,
"step": 903
},
{
"epoch": 0.9042543215279297,
"grad_norm": 0.18179528415203094,
"learning_rate": 0.00013990641711229948,
"loss": 0.4031,
"step": 904
},
{
"epoch": 0.9052546028570535,
"grad_norm": 0.17398111522197723,
"learning_rate": 0.00013983957219251337,
"loss": 0.3926,
"step": 905
},
{
"epoch": 0.9062548841861774,
"grad_norm": 0.17181190848350525,
"learning_rate": 0.0001397727272727273,
"loss": 0.4142,
"step": 906
},
{
"epoch": 0.9072551655153012,
"grad_norm": 0.17833548784255981,
"learning_rate": 0.00013970588235294118,
"loss": 0.4167,
"step": 907
},
{
"epoch": 0.9082554468444249,
"grad_norm": 0.17353063821792603,
"learning_rate": 0.0001396390374331551,
"loss": 0.4221,
"step": 908
},
{
"epoch": 0.9092557281735488,
"grad_norm": 0.17322175204753876,
"learning_rate": 0.000139572192513369,
"loss": 0.4298,
"step": 909
},
{
"epoch": 0.9102560095026726,
"grad_norm": 0.17294059693813324,
"learning_rate": 0.0001395053475935829,
"loss": 0.3897,
"step": 910
},
{
"epoch": 0.9112562908317965,
"grad_norm": 0.17554591596126556,
"learning_rate": 0.0001394385026737968,
"loss": 0.3742,
"step": 911
},
{
"epoch": 0.9122565721609203,
"grad_norm": 0.1788209229707718,
"learning_rate": 0.0001393716577540107,
"loss": 0.4048,
"step": 912
},
{
"epoch": 0.913256853490044,
"grad_norm": 0.1844097226858139,
"learning_rate": 0.0001393048128342246,
"loss": 0.4148,
"step": 913
},
{
"epoch": 0.9142571348191679,
"grad_norm": 0.18024982511997223,
"learning_rate": 0.0001392379679144385,
"loss": 0.4042,
"step": 914
},
{
"epoch": 0.9152574161482917,
"grad_norm": 0.18731427192687988,
"learning_rate": 0.0001391711229946524,
"loss": 0.3902,
"step": 915
},
{
"epoch": 0.9162576974774155,
"grad_norm": 0.17596861720085144,
"learning_rate": 0.00013910427807486633,
"loss": 0.3782,
"step": 916
},
{
"epoch": 0.9172579788065394,
"grad_norm": 0.18986783921718597,
"learning_rate": 0.00013903743315508022,
"loss": 0.4147,
"step": 917
},
{
"epoch": 0.9182582601356631,
"grad_norm": 0.16781824827194214,
"learning_rate": 0.0001389705882352941,
"loss": 0.4042,
"step": 918
},
{
"epoch": 0.919258541464787,
"grad_norm": 0.17034654319286346,
"learning_rate": 0.00013890374331550803,
"loss": 0.3906,
"step": 919
},
{
"epoch": 0.9202588227939108,
"grad_norm": 0.16829004883766174,
"learning_rate": 0.00013883689839572195,
"loss": 0.4141,
"step": 920
},
{
"epoch": 0.9212591041230346,
"grad_norm": 0.19006861746311188,
"learning_rate": 0.00013877005347593584,
"loss": 0.4561,
"step": 921
},
{
"epoch": 0.9222593854521585,
"grad_norm": 0.16582091152668,
"learning_rate": 0.00013870320855614973,
"loss": 0.4091,
"step": 922
},
{
"epoch": 0.9232596667812822,
"grad_norm": 0.17240959405899048,
"learning_rate": 0.00013863636363636365,
"loss": 0.4028,
"step": 923
},
{
"epoch": 0.924259948110406,
"grad_norm": 0.169050931930542,
"learning_rate": 0.00013856951871657754,
"loss": 0.3787,
"step": 924
},
{
"epoch": 0.9252602294395299,
"grad_norm": 0.18316373229026794,
"learning_rate": 0.00013850267379679146,
"loss": 0.4219,
"step": 925
},
{
"epoch": 0.9262605107686537,
"grad_norm": 0.1770108938217163,
"learning_rate": 0.00013843582887700535,
"loss": 0.3965,
"step": 926
},
{
"epoch": 0.9272607920977775,
"grad_norm": 0.17968682944774628,
"learning_rate": 0.00013836898395721927,
"loss": 0.3796,
"step": 927
},
{
"epoch": 0.9282610734269013,
"grad_norm": 0.17519252002239227,
"learning_rate": 0.00013830213903743316,
"loss": 0.3973,
"step": 928
},
{
"epoch": 0.9292613547560251,
"grad_norm": 0.17505863308906555,
"learning_rate": 0.00013823529411764707,
"loss": 0.3923,
"step": 929
},
{
"epoch": 0.930261636085149,
"grad_norm": 0.1817988157272339,
"learning_rate": 0.00013816844919786096,
"loss": 0.4159,
"step": 930
},
{
"epoch": 0.9312619174142728,
"grad_norm": 0.18939583003520966,
"learning_rate": 0.00013810160427807488,
"loss": 0.3866,
"step": 931
},
{
"epoch": 0.9322621987433966,
"grad_norm": 0.17609402537345886,
"learning_rate": 0.00013803475935828877,
"loss": 0.3984,
"step": 932
},
{
"epoch": 0.9332624800725204,
"grad_norm": 0.17766056954860687,
"learning_rate": 0.00013796791443850266,
"loss": 0.4082,
"step": 933
},
{
"epoch": 0.9342627614016442,
"grad_norm": 0.17946885526180267,
"learning_rate": 0.00013790106951871658,
"loss": 0.3907,
"step": 934
},
{
"epoch": 0.935263042730768,
"grad_norm": 0.17798246443271637,
"learning_rate": 0.0001378342245989305,
"loss": 0.4033,
"step": 935
},
{
"epoch": 0.9362633240598919,
"grad_norm": 0.17503587901592255,
"learning_rate": 0.0001377673796791444,
"loss": 0.3984,
"step": 936
},
{
"epoch": 0.9372636053890157,
"grad_norm": 0.18128138780593872,
"learning_rate": 0.00013770053475935828,
"loss": 0.3902,
"step": 937
},
{
"epoch": 0.9382638867181394,
"grad_norm": 0.17769305408000946,
"learning_rate": 0.0001376336898395722,
"loss": 0.4268,
"step": 938
},
{
"epoch": 0.9392641680472633,
"grad_norm": 0.178322896361351,
"learning_rate": 0.00013756684491978612,
"loss": 0.4037,
"step": 939
},
{
"epoch": 0.9402644493763871,
"grad_norm": 0.17999599874019623,
"learning_rate": 0.0001375,
"loss": 0.4092,
"step": 940
},
{
"epoch": 0.941264730705511,
"grad_norm": 0.1779446005821228,
"learning_rate": 0.0001374331550802139,
"loss": 0.3998,
"step": 941
},
{
"epoch": 0.9422650120346348,
"grad_norm": 0.17861850559711456,
"learning_rate": 0.00013736631016042782,
"loss": 0.4,
"step": 942
},
{
"epoch": 0.9432652933637585,
"grad_norm": 0.1744145005941391,
"learning_rate": 0.00013729946524064173,
"loss": 0.384,
"step": 943
},
{
"epoch": 0.9442655746928824,
"grad_norm": 0.17079755663871765,
"learning_rate": 0.00013723262032085563,
"loss": 0.4012,
"step": 944
},
{
"epoch": 0.9452658560220062,
"grad_norm": 0.17308571934700012,
"learning_rate": 0.00013716577540106952,
"loss": 0.4116,
"step": 945
},
{
"epoch": 0.94626613735113,
"grad_norm": 0.1663939356803894,
"learning_rate": 0.00013709893048128343,
"loss": 0.3813,
"step": 946
},
{
"epoch": 0.9472664186802539,
"grad_norm": 0.1654575616121292,
"learning_rate": 0.00013703208556149733,
"loss": 0.3903,
"step": 947
},
{
"epoch": 0.9482667000093776,
"grad_norm": 0.17540469765663147,
"learning_rate": 0.00013696524064171124,
"loss": 0.401,
"step": 948
},
{
"epoch": 0.9492669813385014,
"grad_norm": 0.17629101872444153,
"learning_rate": 0.00013689839572192513,
"loss": 0.3975,
"step": 949
},
{
"epoch": 0.9502672626676253,
"grad_norm": 0.170530304312706,
"learning_rate": 0.00013683155080213905,
"loss": 0.3889,
"step": 950
},
{
"epoch": 0.9512675439967491,
"grad_norm": 0.1654834896326065,
"learning_rate": 0.00013676470588235294,
"loss": 0.3861,
"step": 951
},
{
"epoch": 0.952267825325873,
"grad_norm": 0.17197169363498688,
"learning_rate": 0.00013669786096256686,
"loss": 0.4027,
"step": 952
},
{
"epoch": 0.9532681066549967,
"grad_norm": 0.1747899055480957,
"learning_rate": 0.00013663101604278075,
"loss": 0.3811,
"step": 953
},
{
"epoch": 0.9542683879841205,
"grad_norm": 0.17641963064670563,
"learning_rate": 0.00013656417112299467,
"loss": 0.4064,
"step": 954
},
{
"epoch": 0.9552686693132444,
"grad_norm": 0.18137142062187195,
"learning_rate": 0.00013649732620320856,
"loss": 0.3985,
"step": 955
},
{
"epoch": 0.9562689506423682,
"grad_norm": 0.1665244996547699,
"learning_rate": 0.00013643048128342245,
"loss": 0.4019,
"step": 956
},
{
"epoch": 0.9572692319714919,
"grad_norm": 0.17225103080272675,
"learning_rate": 0.00013636363636363637,
"loss": 0.401,
"step": 957
},
{
"epoch": 0.9582695133006158,
"grad_norm": 0.17319133877754211,
"learning_rate": 0.0001362967914438503,
"loss": 0.4115,
"step": 958
},
{
"epoch": 0.9592697946297396,
"grad_norm": 0.1801668256521225,
"learning_rate": 0.00013622994652406418,
"loss": 0.4055,
"step": 959
},
{
"epoch": 0.9602700759588634,
"grad_norm": 0.1737590879201889,
"learning_rate": 0.00013616310160427807,
"loss": 0.3978,
"step": 960
},
{
"epoch": 0.9612703572879873,
"grad_norm": 0.17786799371242523,
"learning_rate": 0.00013609625668449199,
"loss": 0.4066,
"step": 961
},
{
"epoch": 0.962270638617111,
"grad_norm": 0.17783451080322266,
"learning_rate": 0.0001360294117647059,
"loss": 0.423,
"step": 962
},
{
"epoch": 0.9632709199462349,
"grad_norm": 0.17744936048984528,
"learning_rate": 0.0001359625668449198,
"loss": 0.3976,
"step": 963
},
{
"epoch": 0.9642712012753587,
"grad_norm": 0.1820724457502365,
"learning_rate": 0.00013589572192513369,
"loss": 0.3925,
"step": 964
},
{
"epoch": 0.9652714826044825,
"grad_norm": 0.16475753486156464,
"learning_rate": 0.0001358288770053476,
"loss": 0.3804,
"step": 965
},
{
"epoch": 0.9662717639336064,
"grad_norm": 0.17936855554580688,
"learning_rate": 0.0001357620320855615,
"loss": 0.4008,
"step": 966
},
{
"epoch": 0.9672720452627301,
"grad_norm": 0.17178383469581604,
"learning_rate": 0.0001356951871657754,
"loss": 0.4043,
"step": 967
},
{
"epoch": 0.9682723265918539,
"grad_norm": 0.18698321282863617,
"learning_rate": 0.0001356283422459893,
"loss": 0.4209,
"step": 968
},
{
"epoch": 0.9692726079209778,
"grad_norm": 0.16066111624240875,
"learning_rate": 0.00013556149732620322,
"loss": 0.3693,
"step": 969
},
{
"epoch": 0.9702728892501016,
"grad_norm": 0.17552712559700012,
"learning_rate": 0.0001354946524064171,
"loss": 0.3985,
"step": 970
},
{
"epoch": 0.9712731705792254,
"grad_norm": 0.1760571002960205,
"learning_rate": 0.00013542780748663103,
"loss": 0.3913,
"step": 971
},
{
"epoch": 0.9722734519083492,
"grad_norm": 0.1769057661294937,
"learning_rate": 0.00013536096256684492,
"loss": 0.4154,
"step": 972
},
{
"epoch": 0.973273733237473,
"grad_norm": 0.1754721999168396,
"learning_rate": 0.00013529411764705884,
"loss": 0.3763,
"step": 973
},
{
"epoch": 0.9742740145665969,
"grad_norm": 0.1725761741399765,
"learning_rate": 0.00013522727272727273,
"loss": 0.3847,
"step": 974
},
{
"epoch": 0.9752742958957207,
"grad_norm": 0.18031996488571167,
"learning_rate": 0.00013516042780748662,
"loss": 0.3996,
"step": 975
},
{
"epoch": 0.9762745772248445,
"grad_norm": 0.17676669359207153,
"learning_rate": 0.00013509358288770054,
"loss": 0.4087,
"step": 976
},
{
"epoch": 0.9772748585539683,
"grad_norm": 0.17073461413383484,
"learning_rate": 0.00013502673796791446,
"loss": 0.4114,
"step": 977
},
{
"epoch": 0.9782751398830921,
"grad_norm": 0.17377017438411713,
"learning_rate": 0.00013495989304812835,
"loss": 0.406,
"step": 978
},
{
"epoch": 0.9792754212122159,
"grad_norm": 0.1675245761871338,
"learning_rate": 0.00013489304812834224,
"loss": 0.3855,
"step": 979
},
{
"epoch": 0.9802757025413398,
"grad_norm": 0.17281413078308105,
"learning_rate": 0.00013482620320855616,
"loss": 0.3893,
"step": 980
},
{
"epoch": 0.9812759838704636,
"grad_norm": 0.17831061780452728,
"learning_rate": 0.00013475935828877007,
"loss": 0.4151,
"step": 981
},
{
"epoch": 0.9822762651995874,
"grad_norm": 0.18597757816314697,
"learning_rate": 0.00013469251336898396,
"loss": 0.4072,
"step": 982
},
{
"epoch": 0.9832765465287112,
"grad_norm": 0.185447558760643,
"learning_rate": 0.00013462566844919786,
"loss": 0.4194,
"step": 983
},
{
"epoch": 0.984276827857835,
"grad_norm": 0.16914793848991394,
"learning_rate": 0.00013455882352941177,
"loss": 0.3941,
"step": 984
},
{
"epoch": 0.9852771091869589,
"grad_norm": 0.17165033519268036,
"learning_rate": 0.0001344919786096257,
"loss": 0.4108,
"step": 985
},
{
"epoch": 0.9862773905160827,
"grad_norm": 0.17159344255924225,
"learning_rate": 0.00013442513368983958,
"loss": 0.3675,
"step": 986
},
{
"epoch": 0.9872776718452064,
"grad_norm": 0.16202309727668762,
"learning_rate": 0.00013435828877005347,
"loss": 0.3808,
"step": 987
},
{
"epoch": 0.9882779531743303,
"grad_norm": 0.18378107249736786,
"learning_rate": 0.0001342914438502674,
"loss": 0.4001,
"step": 988
},
{
"epoch": 0.9892782345034541,
"grad_norm": 0.17740057408809662,
"learning_rate": 0.00013422459893048128,
"loss": 0.3983,
"step": 989
},
{
"epoch": 0.9902785158325779,
"grad_norm": 0.18121828138828278,
"learning_rate": 0.0001341577540106952,
"loss": 0.3996,
"step": 990
},
{
"epoch": 0.9912787971617018,
"grad_norm": 0.17484596371650696,
"learning_rate": 0.0001340909090909091,
"loss": 0.3984,
"step": 991
},
{
"epoch": 0.9922790784908255,
"grad_norm": 0.1730891317129135,
"learning_rate": 0.000134024064171123,
"loss": 0.392,
"step": 992
},
{
"epoch": 0.9932793598199494,
"grad_norm": 0.1800169050693512,
"learning_rate": 0.0001339572192513369,
"loss": 0.4135,
"step": 993
},
{
"epoch": 0.9942796411490732,
"grad_norm": 0.17655795812606812,
"learning_rate": 0.00013389037433155082,
"loss": 0.4061,
"step": 994
},
{
"epoch": 0.995279922478197,
"grad_norm": 0.17109893262386322,
"learning_rate": 0.0001338235294117647,
"loss": 0.394,
"step": 995
},
{
"epoch": 0.9962802038073209,
"grad_norm": 0.16945064067840576,
"learning_rate": 0.00013375668449197863,
"loss": 0.368,
"step": 996
},
{
"epoch": 0.9972804851364446,
"grad_norm": 0.17989976704120636,
"learning_rate": 0.00013368983957219252,
"loss": 0.382,
"step": 997
},
{
"epoch": 0.9982807664655684,
"grad_norm": 0.1649048626422882,
"learning_rate": 0.0001336229946524064,
"loss": 0.4012,
"step": 998
},
{
"epoch": 0.9992810477946923,
"grad_norm": 0.17571307718753815,
"learning_rate": 0.00013355614973262033,
"loss": 0.4168,
"step": 999
}
],
"logging_steps": 1,
"max_steps": 2997,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.934039715057172e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}