gptsw3_translate_1.3B / trainer_state.json
barbaroo's picture
Upload 11 files
b413427 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.999947295374291,
"eval_steps": 500,
"global_step": 42690,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007027283427908856,
"grad_norm": 0.5592142939567566,
"learning_rate": 0.0001995548787630315,
"loss": 1.0457,
"step": 100
},
{
"epoch": 0.014054566855817713,
"grad_norm": 0.6605722308158875,
"learning_rate": 0.00019908633009253836,
"loss": 0.8075,
"step": 200
},
{
"epoch": 0.02108185028372657,
"grad_norm": 0.5018069744110107,
"learning_rate": 0.00019861778142204523,
"loss": 0.7927,
"step": 300
},
{
"epoch": 0.028109133711635426,
"grad_norm": 0.5429800152778625,
"learning_rate": 0.00019814923275155208,
"loss": 0.7648,
"step": 400
},
{
"epoch": 0.03513641713954428,
"grad_norm": 0.5747570395469666,
"learning_rate": 0.00019768068408105893,
"loss": 0.7732,
"step": 500
},
{
"epoch": 0.03513641713954428,
"eval_loss": 0.7362164855003357,
"eval_runtime": 118.3556,
"eval_samples_per_second": 106.873,
"eval_steps_per_second": 13.367,
"step": 500
},
{
"epoch": 0.04216370056745314,
"grad_norm": 0.6291443109512329,
"learning_rate": 0.00019721213541056578,
"loss": 0.7614,
"step": 600
},
{
"epoch": 0.049190983995361995,
"grad_norm": 0.6160371899604797,
"learning_rate": 0.00019674358674007263,
"loss": 0.7489,
"step": 700
},
{
"epoch": 0.05621826742327085,
"grad_norm": 0.5804228782653809,
"learning_rate": 0.00019627503806957949,
"loss": 0.7399,
"step": 800
},
{
"epoch": 0.06324555085117971,
"grad_norm": 0.6000151634216309,
"learning_rate": 0.00019580648939908634,
"loss": 0.7349,
"step": 900
},
{
"epoch": 0.07027283427908856,
"grad_norm": 0.5746223330497742,
"learning_rate": 0.00019533794072859319,
"loss": 0.7282,
"step": 1000
},
{
"epoch": 0.07027283427908856,
"eval_loss": 0.7145671248435974,
"eval_runtime": 118.2812,
"eval_samples_per_second": 106.94,
"eval_steps_per_second": 13.375,
"step": 1000
},
{
"epoch": 0.07730011770699742,
"grad_norm": 0.6321994066238403,
"learning_rate": 0.00019486939205810004,
"loss": 0.7241,
"step": 1100
},
{
"epoch": 0.08432740113490628,
"grad_norm": 0.7580232620239258,
"learning_rate": 0.00019440084338760689,
"loss": 0.7254,
"step": 1200
},
{
"epoch": 0.09135468456281513,
"grad_norm": 0.6181788444519043,
"learning_rate": 0.00019393229471711376,
"loss": 0.7349,
"step": 1300
},
{
"epoch": 0.09838196799072399,
"grad_norm": 0.6435703635215759,
"learning_rate": 0.00019346374604662061,
"loss": 0.7082,
"step": 1400
},
{
"epoch": 0.10540925141863285,
"grad_norm": 0.5934786200523376,
"learning_rate": 0.00019299519737612746,
"loss": 0.7145,
"step": 1500
},
{
"epoch": 0.10540925141863285,
"eval_loss": 0.6975060701370239,
"eval_runtime": 118.4288,
"eval_samples_per_second": 106.807,
"eval_steps_per_second": 13.358,
"step": 1500
},
{
"epoch": 0.1124365348465417,
"grad_norm": 0.4486166536808014,
"learning_rate": 0.00019252664870563431,
"loss": 0.7226,
"step": 1600
},
{
"epoch": 0.11946381827445056,
"grad_norm": 0.4868922531604767,
"learning_rate": 0.00019205810003514116,
"loss": 0.6951,
"step": 1700
},
{
"epoch": 0.12649110170235942,
"grad_norm": 0.5752139687538147,
"learning_rate": 0.00019158955136464801,
"loss": 0.7117,
"step": 1800
},
{
"epoch": 0.13351838513026826,
"grad_norm": 0.5520344972610474,
"learning_rate": 0.00019112100269415486,
"loss": 0.7111,
"step": 1900
},
{
"epoch": 0.14054566855817713,
"grad_norm": 0.5359894633293152,
"learning_rate": 0.00019065245402366172,
"loss": 0.7093,
"step": 2000
},
{
"epoch": 0.14054566855817713,
"eval_loss": 0.687991201877594,
"eval_runtime": 118.7631,
"eval_samples_per_second": 106.506,
"eval_steps_per_second": 13.321,
"step": 2000
},
{
"epoch": 0.14757295198608597,
"grad_norm": 0.5415408611297607,
"learning_rate": 0.00019018390535316857,
"loss": 0.7117,
"step": 2100
},
{
"epoch": 0.15460023541399484,
"grad_norm": 0.6338439583778381,
"learning_rate": 0.00018971535668267542,
"loss": 0.7253,
"step": 2200
},
{
"epoch": 0.16162751884190368,
"grad_norm": 0.6666418313980103,
"learning_rate": 0.00018924680801218227,
"loss": 0.7058,
"step": 2300
},
{
"epoch": 0.16865480226981255,
"grad_norm": 0.5344674587249756,
"learning_rate": 0.00018877825934168914,
"loss": 0.7036,
"step": 2400
},
{
"epoch": 0.1756820856977214,
"grad_norm": 0.5522785186767578,
"learning_rate": 0.000188309710671196,
"loss": 0.6809,
"step": 2500
},
{
"epoch": 0.1756820856977214,
"eval_loss": 0.679720938205719,
"eval_runtime": 118.6143,
"eval_samples_per_second": 106.64,
"eval_steps_per_second": 13.337,
"step": 2500
},
{
"epoch": 0.18270936912563027,
"grad_norm": 0.7211841344833374,
"learning_rate": 0.00018784116200070284,
"loss": 0.6972,
"step": 2600
},
{
"epoch": 0.1897366525535391,
"grad_norm": 0.5469601154327393,
"learning_rate": 0.00018737261333020967,
"loss": 0.6996,
"step": 2700
},
{
"epoch": 0.19676393598144798,
"grad_norm": 0.5906969904899597,
"learning_rate": 0.00018690406465971652,
"loss": 0.6828,
"step": 2800
},
{
"epoch": 0.20379121940935682,
"grad_norm": 0.703484833240509,
"learning_rate": 0.0001864355159892234,
"loss": 0.6906,
"step": 2900
},
{
"epoch": 0.2108185028372657,
"grad_norm": 0.5544711947441101,
"learning_rate": 0.00018596696731873024,
"loss": 0.6891,
"step": 3000
},
{
"epoch": 0.2108185028372657,
"eval_loss": 0.6728695034980774,
"eval_runtime": 118.909,
"eval_samples_per_second": 106.375,
"eval_steps_per_second": 13.304,
"step": 3000
},
{
"epoch": 0.21784578626517453,
"grad_norm": 0.5521793365478516,
"learning_rate": 0.0001854984186482371,
"loss": 0.6777,
"step": 3100
},
{
"epoch": 0.2248730696930834,
"grad_norm": 0.4634329080581665,
"learning_rate": 0.00018502986997774394,
"loss": 0.6744,
"step": 3200
},
{
"epoch": 0.23190035312099225,
"grad_norm": 0.7366927266120911,
"learning_rate": 0.0001845613213072508,
"loss": 0.6642,
"step": 3300
},
{
"epoch": 0.23892763654890112,
"grad_norm": 0.5567039251327515,
"learning_rate": 0.00018409277263675767,
"loss": 0.6895,
"step": 3400
},
{
"epoch": 0.24595491997680996,
"grad_norm": 0.6940245628356934,
"learning_rate": 0.0001836242239662645,
"loss": 0.6708,
"step": 3500
},
{
"epoch": 0.24595491997680996,
"eval_loss": 0.6706892848014832,
"eval_runtime": 118.4648,
"eval_samples_per_second": 106.774,
"eval_steps_per_second": 13.354,
"step": 3500
},
{
"epoch": 0.25298220340471883,
"grad_norm": 0.5577242970466614,
"learning_rate": 0.00018315567529577135,
"loss": 0.679,
"step": 3600
},
{
"epoch": 0.2600094868326277,
"grad_norm": 0.6587842702865601,
"learning_rate": 0.0001826871266252782,
"loss": 0.6918,
"step": 3700
},
{
"epoch": 0.2670367702605365,
"grad_norm": 0.6200099587440491,
"learning_rate": 0.00018221857795478505,
"loss": 0.7002,
"step": 3800
},
{
"epoch": 0.2740640536884454,
"grad_norm": 0.5648295879364014,
"learning_rate": 0.00018175002928429192,
"loss": 0.6791,
"step": 3900
},
{
"epoch": 0.28109133711635426,
"grad_norm": 0.5908897519111633,
"learning_rate": 0.00018128148061379877,
"loss": 0.6839,
"step": 4000
},
{
"epoch": 0.28109133711635426,
"eval_loss": 0.664506196975708,
"eval_runtime": 119.1229,
"eval_samples_per_second": 106.184,
"eval_steps_per_second": 13.28,
"step": 4000
},
{
"epoch": 0.2881186205442631,
"grad_norm": 0.5778653621673584,
"learning_rate": 0.00018081293194330562,
"loss": 0.6654,
"step": 4100
},
{
"epoch": 0.29514590397217194,
"grad_norm": 0.700835645198822,
"learning_rate": 0.00018034438327281247,
"loss": 0.6897,
"step": 4200
},
{
"epoch": 0.3021731874000808,
"grad_norm": 0.5250533819198608,
"learning_rate": 0.00017987583460231932,
"loss": 0.6934,
"step": 4300
},
{
"epoch": 0.3092004708279897,
"grad_norm": 0.725397527217865,
"learning_rate": 0.00017940728593182617,
"loss": 0.6781,
"step": 4400
},
{
"epoch": 0.31622775425589855,
"grad_norm": 0.5805392265319824,
"learning_rate": 0.00017893873726133302,
"loss": 0.6866,
"step": 4500
},
{
"epoch": 0.31622775425589855,
"eval_loss": 0.65822434425354,
"eval_runtime": 118.7959,
"eval_samples_per_second": 106.477,
"eval_steps_per_second": 13.317,
"step": 4500
},
{
"epoch": 0.32325503768380737,
"grad_norm": 0.6944029331207275,
"learning_rate": 0.00017847018859083988,
"loss": 0.6683,
"step": 4600
},
{
"epoch": 0.33028232111171624,
"grad_norm": 0.5891593098640442,
"learning_rate": 0.00017800163992034673,
"loss": 0.6817,
"step": 4700
},
{
"epoch": 0.3373096045396251,
"grad_norm": 0.6220216751098633,
"learning_rate": 0.00017753309124985358,
"loss": 0.6845,
"step": 4800
},
{
"epoch": 0.344336887967534,
"grad_norm": 0.7002474665641785,
"learning_rate": 0.00017706454257936045,
"loss": 0.6551,
"step": 4900
},
{
"epoch": 0.3513641713954428,
"grad_norm": 0.5766635537147522,
"learning_rate": 0.0001765959939088673,
"loss": 0.6741,
"step": 5000
},
{
"epoch": 0.3513641713954428,
"eval_loss": 0.6550154685974121,
"eval_runtime": 118.5512,
"eval_samples_per_second": 106.697,
"eval_steps_per_second": 13.344,
"step": 5000
},
{
"epoch": 0.35839145482335166,
"grad_norm": 0.5734873414039612,
"learning_rate": 0.00017612744523837415,
"loss": 0.6583,
"step": 5100
},
{
"epoch": 0.36541873825126053,
"grad_norm": 0.4953276515007019,
"learning_rate": 0.000175658896567881,
"loss": 0.6621,
"step": 5200
},
{
"epoch": 0.37244602167916935,
"grad_norm": 0.7837636470794678,
"learning_rate": 0.00017519034789738785,
"loss": 0.6846,
"step": 5300
},
{
"epoch": 0.3794733051070782,
"grad_norm": 0.718715488910675,
"learning_rate": 0.0001747217992268947,
"loss": 0.6776,
"step": 5400
},
{
"epoch": 0.3865005885349871,
"grad_norm": 0.5844186544418335,
"learning_rate": 0.00017425325055640155,
"loss": 0.6627,
"step": 5500
},
{
"epoch": 0.3865005885349871,
"eval_loss": 0.6532958149909973,
"eval_runtime": 118.7133,
"eval_samples_per_second": 106.551,
"eval_steps_per_second": 13.326,
"step": 5500
},
{
"epoch": 0.39352787196289596,
"grad_norm": 0.4426696300506592,
"learning_rate": 0.0001737847018859084,
"loss": 0.6546,
"step": 5600
},
{
"epoch": 0.4005551553908048,
"grad_norm": 0.5954882502555847,
"learning_rate": 0.00017331615321541525,
"loss": 0.6448,
"step": 5700
},
{
"epoch": 0.40758243881871364,
"grad_norm": 0.601349413394928,
"learning_rate": 0.0001728476045449221,
"loss": 0.6637,
"step": 5800
},
{
"epoch": 0.4146097222466225,
"grad_norm": 0.6108406782150269,
"learning_rate": 0.00017237905587442898,
"loss": 0.6709,
"step": 5900
},
{
"epoch": 0.4216370056745314,
"grad_norm": 0.6442033052444458,
"learning_rate": 0.00017191050720393583,
"loss": 0.6588,
"step": 6000
},
{
"epoch": 0.4216370056745314,
"eval_loss": 0.6498988270759583,
"eval_runtime": 118.8665,
"eval_samples_per_second": 106.413,
"eval_steps_per_second": 13.309,
"step": 6000
},
{
"epoch": 0.4286642891024402,
"grad_norm": 0.4642776846885681,
"learning_rate": 0.00017144195853344268,
"loss": 0.6706,
"step": 6100
},
{
"epoch": 0.43569157253034907,
"grad_norm": 0.6703388690948486,
"learning_rate": 0.0001709734098629495,
"loss": 0.6675,
"step": 6200
},
{
"epoch": 0.44271885595825794,
"grad_norm": 0.6001936793327332,
"learning_rate": 0.00017050486119245636,
"loss": 0.6545,
"step": 6300
},
{
"epoch": 0.4497461393861668,
"grad_norm": 0.7159720063209534,
"learning_rate": 0.00017003631252196323,
"loss": 0.6684,
"step": 6400
},
{
"epoch": 0.4567734228140756,
"grad_norm": 0.5892972350120544,
"learning_rate": 0.00016956776385147008,
"loss": 0.6509,
"step": 6500
},
{
"epoch": 0.4567734228140756,
"eval_loss": 0.6456841230392456,
"eval_runtime": 118.8557,
"eval_samples_per_second": 106.423,
"eval_steps_per_second": 13.31,
"step": 6500
},
{
"epoch": 0.4638007062419845,
"grad_norm": 0.6351083517074585,
"learning_rate": 0.00016909921518097693,
"loss": 0.634,
"step": 6600
},
{
"epoch": 0.47082798966989337,
"grad_norm": 0.7939039468765259,
"learning_rate": 0.00016863066651048378,
"loss": 0.6595,
"step": 6700
},
{
"epoch": 0.47785527309780224,
"grad_norm": 0.5831073522567749,
"learning_rate": 0.00016816211783999063,
"loss": 0.6444,
"step": 6800
},
{
"epoch": 0.48488255652571105,
"grad_norm": 0.6314815282821655,
"learning_rate": 0.0001676935691694975,
"loss": 0.6622,
"step": 6900
},
{
"epoch": 0.4919098399536199,
"grad_norm": 0.5781182646751404,
"learning_rate": 0.00016722502049900433,
"loss": 0.6448,
"step": 7000
},
{
"epoch": 0.4919098399536199,
"eval_loss": 0.6440666317939758,
"eval_runtime": 118.5556,
"eval_samples_per_second": 106.693,
"eval_steps_per_second": 13.344,
"step": 7000
},
{
"epoch": 0.4989371233815288,
"grad_norm": 0.7241762280464172,
"learning_rate": 0.00016675647182851119,
"loss": 0.6727,
"step": 7100
},
{
"epoch": 0.5059644068094377,
"grad_norm": 0.7668181657791138,
"learning_rate": 0.00016628792315801804,
"loss": 0.6564,
"step": 7200
},
{
"epoch": 0.5129916902373465,
"grad_norm": 0.5727465152740479,
"learning_rate": 0.00016581937448752489,
"loss": 0.6348,
"step": 7300
},
{
"epoch": 0.5200189736652554,
"grad_norm": 0.6217190027236938,
"learning_rate": 0.00016535082581703176,
"loss": 0.6434,
"step": 7400
},
{
"epoch": 0.5270462570931642,
"grad_norm": 0.6559625864028931,
"learning_rate": 0.0001648822771465386,
"loss": 0.6551,
"step": 7500
},
{
"epoch": 0.5270462570931642,
"eval_loss": 0.6428527235984802,
"eval_runtime": 118.6739,
"eval_samples_per_second": 106.586,
"eval_steps_per_second": 13.331,
"step": 7500
},
{
"epoch": 0.534073540521073,
"grad_norm": 0.6792352795600891,
"learning_rate": 0.00016441372847604546,
"loss": 0.6622,
"step": 7600
},
{
"epoch": 0.541100823948982,
"grad_norm": 0.6426942944526672,
"learning_rate": 0.00016394517980555231,
"loss": 0.6529,
"step": 7700
},
{
"epoch": 0.5481281073768908,
"grad_norm": 0.8337587118148804,
"learning_rate": 0.00016347663113505916,
"loss": 0.6506,
"step": 7800
},
{
"epoch": 0.5551553908047996,
"grad_norm": 0.5555398464202881,
"learning_rate": 0.00016300808246456601,
"loss": 0.6572,
"step": 7900
},
{
"epoch": 0.5621826742327085,
"grad_norm": 0.6205873489379883,
"learning_rate": 0.00016253953379407286,
"loss": 0.6545,
"step": 8000
},
{
"epoch": 0.5621826742327085,
"eval_loss": 0.639687716960907,
"eval_runtime": 118.9175,
"eval_samples_per_second": 106.368,
"eval_steps_per_second": 13.303,
"step": 8000
},
{
"epoch": 0.5692099576606173,
"grad_norm": 0.7055862545967102,
"learning_rate": 0.00016207098512357971,
"loss": 0.6428,
"step": 8100
},
{
"epoch": 0.5762372410885263,
"grad_norm": 0.5852298140525818,
"learning_rate": 0.00016160243645308656,
"loss": 0.6404,
"step": 8200
},
{
"epoch": 0.5832645245164351,
"grad_norm": 0.551387369632721,
"learning_rate": 0.00016113388778259341,
"loss": 0.6618,
"step": 8300
},
{
"epoch": 0.5902918079443439,
"grad_norm": 0.7349231839179993,
"learning_rate": 0.0001606653391121003,
"loss": 0.6592,
"step": 8400
},
{
"epoch": 0.5973190913722528,
"grad_norm": 0.502613365650177,
"learning_rate": 0.00016019679044160714,
"loss": 0.6467,
"step": 8500
},
{
"epoch": 0.5973190913722528,
"eval_loss": 0.6377580165863037,
"eval_runtime": 118.7487,
"eval_samples_per_second": 106.519,
"eval_steps_per_second": 13.322,
"step": 8500
},
{
"epoch": 0.6043463748001616,
"grad_norm": 0.7003266215324402,
"learning_rate": 0.000159728241771114,
"loss": 0.6531,
"step": 8600
},
{
"epoch": 0.6113736582280704,
"grad_norm": 0.42152824997901917,
"learning_rate": 0.00015925969310062084,
"loss": 0.647,
"step": 8700
},
{
"epoch": 0.6184009416559794,
"grad_norm": 0.7964949607849121,
"learning_rate": 0.0001587911444301277,
"loss": 0.6623,
"step": 8800
},
{
"epoch": 0.6254282250838882,
"grad_norm": 0.6723759770393372,
"learning_rate": 0.00015832259575963452,
"loss": 0.6528,
"step": 8900
},
{
"epoch": 0.6324555085117971,
"grad_norm": 0.6479921936988831,
"learning_rate": 0.0001578540470891414,
"loss": 0.6615,
"step": 9000
},
{
"epoch": 0.6324555085117971,
"eval_loss": 0.634124755859375,
"eval_runtime": 118.6171,
"eval_samples_per_second": 106.637,
"eval_steps_per_second": 13.337,
"step": 9000
},
{
"epoch": 0.6394827919397059,
"grad_norm": 0.6251150965690613,
"learning_rate": 0.00015738549841864824,
"loss": 0.6575,
"step": 9100
},
{
"epoch": 0.6465100753676147,
"grad_norm": 0.6354021430015564,
"learning_rate": 0.0001569169497481551,
"loss": 0.6371,
"step": 9200
},
{
"epoch": 0.6535373587955237,
"grad_norm": 0.6997053027153015,
"learning_rate": 0.00015644840107766194,
"loss": 0.6724,
"step": 9300
},
{
"epoch": 0.6605646422234325,
"grad_norm": 0.6767123341560364,
"learning_rate": 0.0001559798524071688,
"loss": 0.6382,
"step": 9400
},
{
"epoch": 0.6675919256513413,
"grad_norm": 0.5579701662063599,
"learning_rate": 0.00015551130373667567,
"loss": 0.6362,
"step": 9500
},
{
"epoch": 0.6675919256513413,
"eval_loss": 0.6337981224060059,
"eval_runtime": 118.7227,
"eval_samples_per_second": 106.542,
"eval_steps_per_second": 13.325,
"step": 9500
},
{
"epoch": 0.6746192090792502,
"grad_norm": 0.6185320019721985,
"learning_rate": 0.00015504275506618252,
"loss": 0.638,
"step": 9600
},
{
"epoch": 0.681646492507159,
"grad_norm": 0.5869000554084778,
"learning_rate": 0.00015457420639568935,
"loss": 0.6557,
"step": 9700
},
{
"epoch": 0.688673775935068,
"grad_norm": 0.6124538779258728,
"learning_rate": 0.0001541056577251962,
"loss": 0.6394,
"step": 9800
},
{
"epoch": 0.6957010593629768,
"grad_norm": 1.008245587348938,
"learning_rate": 0.00015363710905470305,
"loss": 0.6516,
"step": 9900
},
{
"epoch": 0.7027283427908856,
"grad_norm": 0.5377674698829651,
"learning_rate": 0.00015316856038420992,
"loss": 0.6089,
"step": 10000
},
{
"epoch": 0.7027283427908856,
"eval_loss": 0.6323862671852112,
"eval_runtime": 118.7128,
"eval_samples_per_second": 106.551,
"eval_steps_per_second": 13.326,
"step": 10000
},
{
"epoch": 0.7097556262187945,
"grad_norm": 0.6718229651451111,
"learning_rate": 0.00015270001171371677,
"loss": 0.6463,
"step": 10100
},
{
"epoch": 0.7167829096467033,
"grad_norm": 0.613488495349884,
"learning_rate": 0.00015223146304322362,
"loss": 0.6513,
"step": 10200
},
{
"epoch": 0.7238101930746121,
"grad_norm": 0.6889612674713135,
"learning_rate": 0.00015176291437273047,
"loss": 0.644,
"step": 10300
},
{
"epoch": 0.7308374765025211,
"grad_norm": 0.845743715763092,
"learning_rate": 0.00015129436570223732,
"loss": 0.6299,
"step": 10400
},
{
"epoch": 0.7378647599304299,
"grad_norm": 0.8277881145477295,
"learning_rate": 0.00015082581703174417,
"loss": 0.6592,
"step": 10500
},
{
"epoch": 0.7378647599304299,
"eval_loss": 0.629069983959198,
"eval_runtime": 118.7997,
"eval_samples_per_second": 106.473,
"eval_steps_per_second": 13.317,
"step": 10500
},
{
"epoch": 0.7448920433583387,
"grad_norm": 0.5254293084144592,
"learning_rate": 0.00015035726836125102,
"loss": 0.6634,
"step": 10600
},
{
"epoch": 0.7519193267862476,
"grad_norm": 0.7291231155395508,
"learning_rate": 0.00014988871969075787,
"loss": 0.653,
"step": 10700
},
{
"epoch": 0.7589466102141564,
"grad_norm": 0.5473717451095581,
"learning_rate": 0.00014942017102026472,
"loss": 0.6477,
"step": 10800
},
{
"epoch": 0.7659738936420654,
"grad_norm": 0.717761218547821,
"learning_rate": 0.00014895162234977158,
"loss": 0.6481,
"step": 10900
},
{
"epoch": 0.7730011770699742,
"grad_norm": 0.5327322483062744,
"learning_rate": 0.00014848307367927845,
"loss": 0.6581,
"step": 11000
},
{
"epoch": 0.7730011770699742,
"eval_loss": 0.6284623146057129,
"eval_runtime": 119.1069,
"eval_samples_per_second": 106.199,
"eval_steps_per_second": 13.282,
"step": 11000
},
{
"epoch": 0.780028460497883,
"grad_norm": 0.5596719980239868,
"learning_rate": 0.0001480145250087853,
"loss": 0.6372,
"step": 11100
},
{
"epoch": 0.7870557439257919,
"grad_norm": 0.56830894947052,
"learning_rate": 0.00014754597633829215,
"loss": 0.6353,
"step": 11200
},
{
"epoch": 0.7940830273537007,
"grad_norm": 0.6329615712165833,
"learning_rate": 0.000147077427667799,
"loss": 0.6365,
"step": 11300
},
{
"epoch": 0.8011103107816095,
"grad_norm": 0.8399169445037842,
"learning_rate": 0.00014660887899730585,
"loss": 0.6212,
"step": 11400
},
{
"epoch": 0.8081375942095185,
"grad_norm": 0.7220659255981445,
"learning_rate": 0.0001461403303268127,
"loss": 0.6438,
"step": 11500
},
{
"epoch": 0.8081375942095185,
"eval_loss": 0.6267364621162415,
"eval_runtime": 118.3852,
"eval_samples_per_second": 106.846,
"eval_steps_per_second": 13.363,
"step": 11500
},
{
"epoch": 0.8151648776374273,
"grad_norm": 0.5614886283874512,
"learning_rate": 0.00014567178165631955,
"loss": 0.6262,
"step": 11600
},
{
"epoch": 0.8221921610653362,
"grad_norm": 0.7679696679115295,
"learning_rate": 0.0001452032329858264,
"loss": 0.6473,
"step": 11700
},
{
"epoch": 0.829219444493245,
"grad_norm": 0.6253560185432434,
"learning_rate": 0.00014473468431533325,
"loss": 0.6483,
"step": 11800
},
{
"epoch": 0.8362467279211538,
"grad_norm": 0.5834682583808899,
"learning_rate": 0.0001442661356448401,
"loss": 0.6411,
"step": 11900
},
{
"epoch": 0.8432740113490628,
"grad_norm": 0.4512103497982025,
"learning_rate": 0.00014379758697434698,
"loss": 0.6321,
"step": 12000
},
{
"epoch": 0.8432740113490628,
"eval_loss": 0.6246777772903442,
"eval_runtime": 118.7689,
"eval_samples_per_second": 106.501,
"eval_steps_per_second": 13.32,
"step": 12000
},
{
"epoch": 0.8503012947769716,
"grad_norm": 0.6617989540100098,
"learning_rate": 0.00014332903830385383,
"loss": 0.6485,
"step": 12100
},
{
"epoch": 0.8573285782048804,
"grad_norm": 0.6235445737838745,
"learning_rate": 0.00014286048963336068,
"loss": 0.6254,
"step": 12200
},
{
"epoch": 0.8643558616327893,
"grad_norm": 0.612450122833252,
"learning_rate": 0.00014239194096286753,
"loss": 0.649,
"step": 12300
},
{
"epoch": 0.8713831450606981,
"grad_norm": 0.7379807829856873,
"learning_rate": 0.00014192339229237436,
"loss": 0.6284,
"step": 12400
},
{
"epoch": 0.8784104284886071,
"grad_norm": 0.8035106658935547,
"learning_rate": 0.00014145484362188123,
"loss": 0.6466,
"step": 12500
},
{
"epoch": 0.8784104284886071,
"eval_loss": 0.6244432330131531,
"eval_runtime": 118.6995,
"eval_samples_per_second": 106.563,
"eval_steps_per_second": 13.328,
"step": 12500
},
{
"epoch": 0.8854377119165159,
"grad_norm": 0.6433550715446472,
"learning_rate": 0.00014098629495138808,
"loss": 0.6324,
"step": 12600
},
{
"epoch": 0.8924649953444247,
"grad_norm": 0.7102698087692261,
"learning_rate": 0.00014051774628089493,
"loss": 0.6536,
"step": 12700
},
{
"epoch": 0.8994922787723336,
"grad_norm": 0.7628334164619446,
"learning_rate": 0.00014004919761040178,
"loss": 0.6434,
"step": 12800
},
{
"epoch": 0.9065195622002424,
"grad_norm": 0.5142523050308228,
"learning_rate": 0.00013958064893990863,
"loss": 0.6406,
"step": 12900
},
{
"epoch": 0.9135468456281512,
"grad_norm": 0.8540221452713013,
"learning_rate": 0.0001391121002694155,
"loss": 0.6317,
"step": 13000
},
{
"epoch": 0.9135468456281512,
"eval_loss": 0.6225576996803284,
"eval_runtime": 118.6623,
"eval_samples_per_second": 106.597,
"eval_steps_per_second": 13.332,
"step": 13000
},
{
"epoch": 0.9205741290560602,
"grad_norm": 0.6770111918449402,
"learning_rate": 0.00013864355159892236,
"loss": 0.6381,
"step": 13100
},
{
"epoch": 0.927601412483969,
"grad_norm": 0.7313960194587708,
"learning_rate": 0.00013817500292842918,
"loss": 0.643,
"step": 13200
},
{
"epoch": 0.9346286959118779,
"grad_norm": 0.8158569931983948,
"learning_rate": 0.00013770645425793603,
"loss": 0.6381,
"step": 13300
},
{
"epoch": 0.9416559793397867,
"grad_norm": 0.6563596725463867,
"learning_rate": 0.00013723790558744289,
"loss": 0.6445,
"step": 13400
},
{
"epoch": 0.9486832627676955,
"grad_norm": 0.6007642149925232,
"learning_rate": 0.00013676935691694976,
"loss": 0.6165,
"step": 13500
},
{
"epoch": 0.9486832627676955,
"eval_loss": 0.6213079690933228,
"eval_runtime": 118.8856,
"eval_samples_per_second": 106.396,
"eval_steps_per_second": 13.307,
"step": 13500
},
{
"epoch": 0.9557105461956045,
"grad_norm": 0.7704166173934937,
"learning_rate": 0.0001363008082464566,
"loss": 0.6475,
"step": 13600
},
{
"epoch": 0.9627378296235133,
"grad_norm": 0.6467058658599854,
"learning_rate": 0.00013583225957596346,
"loss": 0.626,
"step": 13700
},
{
"epoch": 0.9697651130514221,
"grad_norm": 0.5320102572441101,
"learning_rate": 0.0001353637109054703,
"loss": 0.6283,
"step": 13800
},
{
"epoch": 0.976792396479331,
"grad_norm": 0.6444761157035828,
"learning_rate": 0.00013489516223497716,
"loss": 0.6381,
"step": 13900
},
{
"epoch": 0.9838196799072398,
"grad_norm": 0.7598044872283936,
"learning_rate": 0.00013442661356448404,
"loss": 0.6305,
"step": 14000
},
{
"epoch": 0.9838196799072398,
"eval_loss": 0.6203290820121765,
"eval_runtime": 118.8928,
"eval_samples_per_second": 106.39,
"eval_steps_per_second": 13.306,
"step": 14000
},
{
"epoch": 0.9908469633351488,
"grad_norm": 0.735137403011322,
"learning_rate": 0.00013395806489399086,
"loss": 0.6378,
"step": 14100
},
{
"epoch": 0.9978742467630576,
"grad_norm": 0.758840799331665,
"learning_rate": 0.00013348951622349771,
"loss": 0.6469,
"step": 14200
},
{
"epoch": 1.0049015301909665,
"grad_norm": 0.5422857403755188,
"learning_rate": 0.00013302096755300456,
"loss": 0.586,
"step": 14300
},
{
"epoch": 1.0119288136188753,
"grad_norm": 0.5595451593399048,
"learning_rate": 0.00013255241888251141,
"loss": 0.5967,
"step": 14400
},
{
"epoch": 1.0189560970467841,
"grad_norm": 0.6194477081298828,
"learning_rate": 0.0001320838702120183,
"loss": 0.602,
"step": 14500
},
{
"epoch": 1.0189560970467841,
"eval_loss": 0.6205016374588013,
"eval_runtime": 118.9575,
"eval_samples_per_second": 106.332,
"eval_steps_per_second": 13.299,
"step": 14500
},
{
"epoch": 1.025983380474693,
"grad_norm": 0.710670530796051,
"learning_rate": 0.00013161532154152514,
"loss": 0.5954,
"step": 14600
},
{
"epoch": 1.0330106639026018,
"grad_norm": 0.613923192024231,
"learning_rate": 0.000131146772871032,
"loss": 0.5855,
"step": 14700
},
{
"epoch": 1.0400379473305108,
"grad_norm": 0.7411431670188904,
"learning_rate": 0.00013067822420053884,
"loss": 0.5881,
"step": 14800
},
{
"epoch": 1.0470652307584196,
"grad_norm": 0.7143989205360413,
"learning_rate": 0.0001302096755300457,
"loss": 0.5891,
"step": 14900
},
{
"epoch": 1.0540925141863284,
"grad_norm": 0.6597478985786438,
"learning_rate": 0.00012974112685955254,
"loss": 0.5915,
"step": 15000
},
{
"epoch": 1.0540925141863284,
"eval_loss": 0.6205988526344299,
"eval_runtime": 118.9337,
"eval_samples_per_second": 106.353,
"eval_steps_per_second": 13.302,
"step": 15000
},
{
"epoch": 1.0611197976142372,
"grad_norm": 0.5813501477241516,
"learning_rate": 0.0001292725781890594,
"loss": 0.5877,
"step": 15100
},
{
"epoch": 1.068147081042146,
"grad_norm": 0.5610823631286621,
"learning_rate": 0.00012880402951856624,
"loss": 0.5911,
"step": 15200
},
{
"epoch": 1.075174364470055,
"grad_norm": 0.5839988589286804,
"learning_rate": 0.0001283354808480731,
"loss": 0.5903,
"step": 15300
},
{
"epoch": 1.082201647897964,
"grad_norm": 0.6421522498130798,
"learning_rate": 0.00012786693217757994,
"loss": 0.591,
"step": 15400
},
{
"epoch": 1.0892289313258727,
"grad_norm": 0.6854695081710815,
"learning_rate": 0.0001273983835070868,
"loss": 0.5998,
"step": 15500
},
{
"epoch": 1.0892289313258727,
"eval_loss": 0.6208451986312866,
"eval_runtime": 118.6829,
"eval_samples_per_second": 106.578,
"eval_steps_per_second": 13.33,
"step": 15500
},
{
"epoch": 1.0962562147537815,
"grad_norm": 0.7333750128746033,
"learning_rate": 0.00012692983483659367,
"loss": 0.5916,
"step": 15600
},
{
"epoch": 1.1032834981816904,
"grad_norm": 0.5983602404594421,
"learning_rate": 0.00012646128616610052,
"loss": 0.581,
"step": 15700
},
{
"epoch": 1.1103107816095992,
"grad_norm": 0.5112642645835876,
"learning_rate": 0.00012599273749560737,
"loss": 0.5984,
"step": 15800
},
{
"epoch": 1.1173380650375082,
"grad_norm": 0.5569522976875305,
"learning_rate": 0.0001255241888251142,
"loss": 0.5983,
"step": 15900
},
{
"epoch": 1.124365348465417,
"grad_norm": 0.6089062690734863,
"learning_rate": 0.00012505564015462105,
"loss": 0.6019,
"step": 16000
},
{
"epoch": 1.124365348465417,
"eval_loss": 0.6191478967666626,
"eval_runtime": 118.6608,
"eval_samples_per_second": 106.598,
"eval_steps_per_second": 13.332,
"step": 16000
},
{
"epoch": 1.1313926318933258,
"grad_norm": 0.5710394978523254,
"learning_rate": 0.00012458709148412792,
"loss": 0.5907,
"step": 16100
},
{
"epoch": 1.1384199153212347,
"grad_norm": 0.7836496233940125,
"learning_rate": 0.00012411854281363477,
"loss": 0.5818,
"step": 16200
},
{
"epoch": 1.1454471987491435,
"grad_norm": 0.600236177444458,
"learning_rate": 0.00012364999414314162,
"loss": 0.5834,
"step": 16300
},
{
"epoch": 1.1524744821770523,
"grad_norm": 0.7090241312980652,
"learning_rate": 0.00012318144547264847,
"loss": 0.5963,
"step": 16400
},
{
"epoch": 1.1595017656049613,
"grad_norm": 0.5439143180847168,
"learning_rate": 0.00012271289680215532,
"loss": 0.5939,
"step": 16500
},
{
"epoch": 1.1595017656049613,
"eval_loss": 0.6187065243721008,
"eval_runtime": 118.7823,
"eval_samples_per_second": 106.489,
"eval_steps_per_second": 13.318,
"step": 16500
},
{
"epoch": 1.1665290490328701,
"grad_norm": 0.6133089661598206,
"learning_rate": 0.0001222443481316622,
"loss": 0.583,
"step": 16600
},
{
"epoch": 1.173556332460779,
"grad_norm": 0.6947652101516724,
"learning_rate": 0.00012177579946116904,
"loss": 0.5851,
"step": 16700
},
{
"epoch": 1.1805836158886878,
"grad_norm": 0.799213707447052,
"learning_rate": 0.00012130725079067589,
"loss": 0.5815,
"step": 16800
},
{
"epoch": 1.1876108993165966,
"grad_norm": 0.6443912982940674,
"learning_rate": 0.00012083870212018274,
"loss": 0.5794,
"step": 16900
},
{
"epoch": 1.1946381827445056,
"grad_norm": 0.8435219526290894,
"learning_rate": 0.00012037015344968959,
"loss": 0.5747,
"step": 17000
},
{
"epoch": 1.1946381827445056,
"eval_loss": 0.6187562942504883,
"eval_runtime": 118.6879,
"eval_samples_per_second": 106.574,
"eval_steps_per_second": 13.329,
"step": 17000
},
{
"epoch": 1.2016654661724144,
"grad_norm": 0.7162328958511353,
"learning_rate": 0.00011990160477919645,
"loss": 0.596,
"step": 17100
},
{
"epoch": 1.2086927496003232,
"grad_norm": 0.6594322919845581,
"learning_rate": 0.0001194330561087033,
"loss": 0.6054,
"step": 17200
},
{
"epoch": 1.215720033028232,
"grad_norm": 0.5395209193229675,
"learning_rate": 0.00011896450743821015,
"loss": 0.5835,
"step": 17300
},
{
"epoch": 1.2227473164561409,
"grad_norm": 0.7208767533302307,
"learning_rate": 0.000118495958767717,
"loss": 0.595,
"step": 17400
},
{
"epoch": 1.22977459988405,
"grad_norm": 0.6752803921699524,
"learning_rate": 0.00011802741009722384,
"loss": 0.5937,
"step": 17500
},
{
"epoch": 1.22977459988405,
"eval_loss": 0.6167559623718262,
"eval_runtime": 118.8202,
"eval_samples_per_second": 106.455,
"eval_steps_per_second": 13.314,
"step": 17500
},
{
"epoch": 1.2368018833119587,
"grad_norm": 0.7853017449378967,
"learning_rate": 0.00011755886142673072,
"loss": 0.6035,
"step": 17600
},
{
"epoch": 1.2438291667398675,
"grad_norm": 0.6420643329620361,
"learning_rate": 0.00011709031275623757,
"loss": 0.5834,
"step": 17700
},
{
"epoch": 1.2508564501677764,
"grad_norm": 0.8449912667274475,
"learning_rate": 0.00011662176408574442,
"loss": 0.5964,
"step": 17800
},
{
"epoch": 1.2578837335956852,
"grad_norm": 0.6199436783790588,
"learning_rate": 0.00011615321541525125,
"loss": 0.5771,
"step": 17900
},
{
"epoch": 1.2649110170235942,
"grad_norm": 1.0329114198684692,
"learning_rate": 0.0001156846667447581,
"loss": 0.5678,
"step": 18000
},
{
"epoch": 1.2649110170235942,
"eval_loss": 0.6171479821205139,
"eval_runtime": 118.9379,
"eval_samples_per_second": 106.35,
"eval_steps_per_second": 13.301,
"step": 18000
},
{
"epoch": 1.271938300451503,
"grad_norm": 0.7668006420135498,
"learning_rate": 0.00011521611807426498,
"loss": 0.5895,
"step": 18100
},
{
"epoch": 1.2789655838794118,
"grad_norm": 0.661259651184082,
"learning_rate": 0.00011474756940377183,
"loss": 0.5951,
"step": 18200
},
{
"epoch": 1.2859928673073207,
"grad_norm": 0.5448057055473328,
"learning_rate": 0.00011427902073327867,
"loss": 0.5958,
"step": 18300
},
{
"epoch": 1.2930201507352295,
"grad_norm": 0.5419151782989502,
"learning_rate": 0.00011381047206278552,
"loss": 0.5788,
"step": 18400
},
{
"epoch": 1.3000474341631385,
"grad_norm": 0.595245361328125,
"learning_rate": 0.00011334192339229237,
"loss": 0.5849,
"step": 18500
},
{
"epoch": 1.3000474341631385,
"eval_loss": 0.6158913969993591,
"eval_runtime": 118.7534,
"eval_samples_per_second": 106.515,
"eval_steps_per_second": 13.322,
"step": 18500
},
{
"epoch": 1.3070747175910473,
"grad_norm": 0.6495450139045715,
"learning_rate": 0.00011287337472179925,
"loss": 0.5849,
"step": 18600
},
{
"epoch": 1.3141020010189561,
"grad_norm": 0.686590313911438,
"learning_rate": 0.00011240482605130608,
"loss": 0.5679,
"step": 18700
},
{
"epoch": 1.321129284446865,
"grad_norm": 0.60063636302948,
"learning_rate": 0.00011193627738081293,
"loss": 0.5805,
"step": 18800
},
{
"epoch": 1.3281565678747738,
"grad_norm": 0.6396400332450867,
"learning_rate": 0.00011146772871031978,
"loss": 0.5996,
"step": 18900
},
{
"epoch": 1.3351838513026828,
"grad_norm": 0.6631867289543152,
"learning_rate": 0.00011099918003982663,
"loss": 0.5926,
"step": 19000
},
{
"epoch": 1.3351838513026828,
"eval_loss": 0.6149775981903076,
"eval_runtime": 118.2284,
"eval_samples_per_second": 106.988,
"eval_steps_per_second": 13.381,
"step": 19000
},
{
"epoch": 1.3422111347305914,
"grad_norm": 0.6868234276771545,
"learning_rate": 0.0001105306313693335,
"loss": 0.5826,
"step": 19100
},
{
"epoch": 1.3492384181585004,
"grad_norm": 0.6282151937484741,
"learning_rate": 0.00011006208269884035,
"loss": 0.5927,
"step": 19200
},
{
"epoch": 1.3562657015864092,
"grad_norm": 0.6498789191246033,
"learning_rate": 0.0001095935340283472,
"loss": 0.598,
"step": 19300
},
{
"epoch": 1.363292985014318,
"grad_norm": 0.7654560804367065,
"learning_rate": 0.00010912498535785405,
"loss": 0.5819,
"step": 19400
},
{
"epoch": 1.3703202684422269,
"grad_norm": 0.6079320907592773,
"learning_rate": 0.0001086564366873609,
"loss": 0.5941,
"step": 19500
},
{
"epoch": 1.3703202684422269,
"eval_loss": 0.6134491562843323,
"eval_runtime": 118.1186,
"eval_samples_per_second": 107.087,
"eval_steps_per_second": 13.393,
"step": 19500
},
{
"epoch": 1.3773475518701357,
"grad_norm": 0.7858242392539978,
"learning_rate": 0.00010818788801686776,
"loss": 0.6035,
"step": 19600
},
{
"epoch": 1.3843748352980447,
"grad_norm": 0.46175357699394226,
"learning_rate": 0.00010771933934637461,
"loss": 0.6054,
"step": 19700
},
{
"epoch": 1.3914021187259535,
"grad_norm": 0.817308783531189,
"learning_rate": 0.00010725079067588146,
"loss": 0.5844,
"step": 19800
},
{
"epoch": 1.3984294021538624,
"grad_norm": 0.7891727685928345,
"learning_rate": 0.00010678224200538831,
"loss": 0.5885,
"step": 19900
},
{
"epoch": 1.4054566855817712,
"grad_norm": 0.7089536786079407,
"learning_rate": 0.00010631369333489516,
"loss": 0.6108,
"step": 20000
},
{
"epoch": 1.4054566855817712,
"eval_loss": 0.6131945252418518,
"eval_runtime": 119.3154,
"eval_samples_per_second": 106.013,
"eval_steps_per_second": 13.259,
"step": 20000
},
{
"epoch": 1.41248396900968,
"grad_norm": 0.6867943406105042,
"learning_rate": 0.00010584514466440203,
"loss": 0.5863,
"step": 20100
},
{
"epoch": 1.419511252437589,
"grad_norm": 0.9261388182640076,
"learning_rate": 0.00010537659599390888,
"loss": 0.6027,
"step": 20200
},
{
"epoch": 1.4265385358654978,
"grad_norm": 0.7854331135749817,
"learning_rate": 0.00010490804732341573,
"loss": 0.5637,
"step": 20300
},
{
"epoch": 1.4335658192934067,
"grad_norm": 0.4610428214073181,
"learning_rate": 0.00010443949865292258,
"loss": 0.5961,
"step": 20400
},
{
"epoch": 1.4405931027213155,
"grad_norm": 0.651196300983429,
"learning_rate": 0.00010397094998242943,
"loss": 0.5829,
"step": 20500
},
{
"epoch": 1.4405931027213155,
"eval_loss": 0.6108871698379517,
"eval_runtime": 119.1208,
"eval_samples_per_second": 106.186,
"eval_steps_per_second": 13.281,
"step": 20500
},
{
"epoch": 1.4476203861492243,
"grad_norm": 0.7416488528251648,
"learning_rate": 0.00010350240131193629,
"loss": 0.5869,
"step": 20600
},
{
"epoch": 1.4546476695771333,
"grad_norm": 0.6142196655273438,
"learning_rate": 0.00010303385264144314,
"loss": 0.5858,
"step": 20700
},
{
"epoch": 1.4616749530050421,
"grad_norm": 0.644241213798523,
"learning_rate": 0.00010256530397094999,
"loss": 0.5861,
"step": 20800
},
{
"epoch": 1.468702236432951,
"grad_norm": 0.8656560182571411,
"learning_rate": 0.00010209675530045684,
"loss": 0.6099,
"step": 20900
},
{
"epoch": 1.4757295198608598,
"grad_norm": 0.5667104721069336,
"learning_rate": 0.00010162820662996368,
"loss": 0.601,
"step": 21000
},
{
"epoch": 1.4757295198608598,
"eval_loss": 0.6112544536590576,
"eval_runtime": 119.0171,
"eval_samples_per_second": 106.279,
"eval_steps_per_second": 13.292,
"step": 21000
},
{
"epoch": 1.4827568032887686,
"grad_norm": 0.6884378790855408,
"learning_rate": 0.00010115965795947056,
"loss": 0.5882,
"step": 21100
},
{
"epoch": 1.4897840867166776,
"grad_norm": 0.7136459946632385,
"learning_rate": 0.0001006911092889774,
"loss": 0.5905,
"step": 21200
},
{
"epoch": 1.4968113701445864,
"grad_norm": 0.8048639297485352,
"learning_rate": 0.00010022256061848426,
"loss": 0.5875,
"step": 21300
},
{
"epoch": 1.5038386535724952,
"grad_norm": 0.7300230860710144,
"learning_rate": 9.975401194799109e-05,
"loss": 0.5916,
"step": 21400
},
{
"epoch": 1.510865937000404,
"grad_norm": 0.7058496475219727,
"learning_rate": 9.928546327749796e-05,
"loss": 0.5768,
"step": 21500
},
{
"epoch": 1.510865937000404,
"eval_loss": 0.6108001470565796,
"eval_runtime": 118.8504,
"eval_samples_per_second": 106.428,
"eval_steps_per_second": 13.311,
"step": 21500
},
{
"epoch": 1.5178932204283129,
"grad_norm": 0.8157733678817749,
"learning_rate": 9.881691460700481e-05,
"loss": 0.5872,
"step": 21600
},
{
"epoch": 1.524920503856222,
"grad_norm": 0.8618035316467285,
"learning_rate": 9.834836593651166e-05,
"loss": 0.5861,
"step": 21700
},
{
"epoch": 1.5319477872841305,
"grad_norm": 0.7457069158554077,
"learning_rate": 9.787981726601851e-05,
"loss": 0.5948,
"step": 21800
},
{
"epoch": 1.5389750707120395,
"grad_norm": 0.7307142615318298,
"learning_rate": 9.741126859552536e-05,
"loss": 0.6068,
"step": 21900
},
{
"epoch": 1.5460023541399484,
"grad_norm": 0.7692698836326599,
"learning_rate": 9.694271992503222e-05,
"loss": 0.5732,
"step": 22000
},
{
"epoch": 1.5460023541399484,
"eval_loss": 0.6103039979934692,
"eval_runtime": 118.5221,
"eval_samples_per_second": 106.723,
"eval_steps_per_second": 13.348,
"step": 22000
},
{
"epoch": 1.5530296375678572,
"grad_norm": 0.7781071662902832,
"learning_rate": 9.647417125453907e-05,
"loss": 0.5915,
"step": 22100
},
{
"epoch": 1.5600569209957662,
"grad_norm": 0.7720737457275391,
"learning_rate": 9.600562258404592e-05,
"loss": 0.6007,
"step": 22200
},
{
"epoch": 1.5670842044236748,
"grad_norm": 0.632757306098938,
"learning_rate": 9.553707391355277e-05,
"loss": 0.5838,
"step": 22300
},
{
"epoch": 1.5741114878515838,
"grad_norm": 0.7514855265617371,
"learning_rate": 9.506852524305962e-05,
"loss": 0.5894,
"step": 22400
},
{
"epoch": 1.5811387712794926,
"grad_norm": 0.5634511113166809,
"learning_rate": 9.459997657256649e-05,
"loss": 0.5774,
"step": 22500
},
{
"epoch": 1.5811387712794926,
"eval_loss": 0.6085862517356873,
"eval_runtime": 119.1435,
"eval_samples_per_second": 106.166,
"eval_steps_per_second": 13.278,
"step": 22500
},
{
"epoch": 1.5881660547074015,
"grad_norm": 0.8155964612960815,
"learning_rate": 9.413142790207334e-05,
"loss": 0.582,
"step": 22600
},
{
"epoch": 1.5951933381353105,
"grad_norm": 0.7442721128463745,
"learning_rate": 9.366287923158019e-05,
"loss": 0.595,
"step": 22700
},
{
"epoch": 1.602220621563219,
"grad_norm": 0.6977400183677673,
"learning_rate": 9.319433056108704e-05,
"loss": 0.5906,
"step": 22800
},
{
"epoch": 1.6092479049911281,
"grad_norm": 0.7066090703010559,
"learning_rate": 9.272578189059389e-05,
"loss": 0.5917,
"step": 22900
},
{
"epoch": 1.616275188419037,
"grad_norm": 0.9004433751106262,
"learning_rate": 9.225723322010075e-05,
"loss": 0.5821,
"step": 23000
},
{
"epoch": 1.616275188419037,
"eval_loss": 0.608232319355011,
"eval_runtime": 118.5994,
"eval_samples_per_second": 106.653,
"eval_steps_per_second": 13.339,
"step": 23000
},
{
"epoch": 1.6233024718469458,
"grad_norm": 0.6980244517326355,
"learning_rate": 9.17886845496076e-05,
"loss": 0.5986,
"step": 23100
},
{
"epoch": 1.6303297552748546,
"grad_norm": 0.6959982514381409,
"learning_rate": 9.132013587911444e-05,
"loss": 0.5751,
"step": 23200
},
{
"epoch": 1.6373570387027634,
"grad_norm": 0.6452066898345947,
"learning_rate": 9.08515872086213e-05,
"loss": 0.5834,
"step": 23300
},
{
"epoch": 1.6443843221306724,
"grad_norm": 0.563113272190094,
"learning_rate": 9.038303853812815e-05,
"loss": 0.5943,
"step": 23400
},
{
"epoch": 1.6514116055585812,
"grad_norm": 0.6849614977836609,
"learning_rate": 8.991448986763502e-05,
"loss": 0.581,
"step": 23500
},
{
"epoch": 1.6514116055585812,
"eval_loss": 0.607568621635437,
"eval_runtime": 119.0065,
"eval_samples_per_second": 106.288,
"eval_steps_per_second": 13.293,
"step": 23500
},
{
"epoch": 1.65843888898649,
"grad_norm": 0.8570700287818909,
"learning_rate": 8.944594119714185e-05,
"loss": 0.592,
"step": 23600
},
{
"epoch": 1.6654661724143989,
"grad_norm": 0.4864564538002014,
"learning_rate": 8.89773925266487e-05,
"loss": 0.5722,
"step": 23700
},
{
"epoch": 1.6724934558423077,
"grad_norm": 0.6979348063468933,
"learning_rate": 8.850884385615557e-05,
"loss": 0.578,
"step": 23800
},
{
"epoch": 1.6795207392702167,
"grad_norm": 0.7699964046478271,
"learning_rate": 8.804029518566242e-05,
"loss": 0.5897,
"step": 23900
},
{
"epoch": 1.6865480226981253,
"grad_norm": 0.8256754875183105,
"learning_rate": 8.757174651516927e-05,
"loss": 0.5845,
"step": 24000
},
{
"epoch": 1.6865480226981253,
"eval_loss": 0.6071833372116089,
"eval_runtime": 119.1197,
"eval_samples_per_second": 106.187,
"eval_steps_per_second": 13.281,
"step": 24000
},
{
"epoch": 1.6935753061260344,
"grad_norm": 0.6591055393218994,
"learning_rate": 8.710319784467612e-05,
"loss": 0.5867,
"step": 24100
},
{
"epoch": 1.7006025895539432,
"grad_norm": 0.7720032334327698,
"learning_rate": 8.663464917418297e-05,
"loss": 0.5905,
"step": 24200
},
{
"epoch": 1.707629872981852,
"grad_norm": 0.5807234048843384,
"learning_rate": 8.616610050368983e-05,
"loss": 0.5849,
"step": 24300
},
{
"epoch": 1.714657156409761,
"grad_norm": 0.6583465337753296,
"learning_rate": 8.569755183319668e-05,
"loss": 0.5706,
"step": 24400
},
{
"epoch": 1.7216844398376696,
"grad_norm": 0.7280032634735107,
"learning_rate": 8.522900316270352e-05,
"loss": 0.5843,
"step": 24500
},
{
"epoch": 1.7216844398376696,
"eval_loss": 0.6051200032234192,
"eval_runtime": 119.1844,
"eval_samples_per_second": 106.13,
"eval_steps_per_second": 13.274,
"step": 24500
},
{
"epoch": 1.7287117232655786,
"grad_norm": 0.7264565825462341,
"learning_rate": 8.476045449221038e-05,
"loss": 0.5866,
"step": 24600
},
{
"epoch": 1.7357390066934875,
"grad_norm": 0.7795102596282959,
"learning_rate": 8.429190582171723e-05,
"loss": 0.5807,
"step": 24700
},
{
"epoch": 1.7427662901213963,
"grad_norm": 0.702314019203186,
"learning_rate": 8.38233571512241e-05,
"loss": 0.5856,
"step": 24800
},
{
"epoch": 1.7497935735493053,
"grad_norm": 0.7014954090118408,
"learning_rate": 8.335480848073095e-05,
"loss": 0.5812,
"step": 24900
},
{
"epoch": 1.756820856977214,
"grad_norm": 0.6724287867546082,
"learning_rate": 8.288625981023778e-05,
"loss": 0.5803,
"step": 25000
},
{
"epoch": 1.756820856977214,
"eval_loss": 0.6055319309234619,
"eval_runtime": 119.2213,
"eval_samples_per_second": 106.097,
"eval_steps_per_second": 13.269,
"step": 25000
},
{
"epoch": 1.763848140405123,
"grad_norm": 0.9690128564834595,
"learning_rate": 8.241771113974465e-05,
"loss": 0.5674,
"step": 25100
},
{
"epoch": 1.7708754238330318,
"grad_norm": 1.0121440887451172,
"learning_rate": 8.19491624692515e-05,
"loss": 0.5913,
"step": 25200
},
{
"epoch": 1.7779027072609406,
"grad_norm": 0.7207921743392944,
"learning_rate": 8.148061379875836e-05,
"loss": 0.592,
"step": 25300
},
{
"epoch": 1.7849299906888496,
"grad_norm": 0.6326346397399902,
"learning_rate": 8.10120651282652e-05,
"loss": 0.592,
"step": 25400
},
{
"epoch": 1.7919572741167582,
"grad_norm": 0.7215606570243835,
"learning_rate": 8.054351645777205e-05,
"loss": 0.578,
"step": 25500
},
{
"epoch": 1.7919572741167582,
"eval_loss": 0.6045902371406555,
"eval_runtime": 118.5573,
"eval_samples_per_second": 106.691,
"eval_steps_per_second": 13.344,
"step": 25500
},
{
"epoch": 1.7989845575446672,
"grad_norm": 0.6932191848754883,
"learning_rate": 8.007496778727891e-05,
"loss": 0.5881,
"step": 25600
},
{
"epoch": 1.806011840972576,
"grad_norm": 0.7887512445449829,
"learning_rate": 7.960641911678576e-05,
"loss": 0.5875,
"step": 25700
},
{
"epoch": 1.8130391244004849,
"grad_norm": 0.5214329957962036,
"learning_rate": 7.913787044629261e-05,
"loss": 0.5757,
"step": 25800
},
{
"epoch": 1.8200664078283937,
"grad_norm": 0.6298120021820068,
"learning_rate": 7.866932177579946e-05,
"loss": 0.5634,
"step": 25900
},
{
"epoch": 1.8270936912563025,
"grad_norm": 0.6642977595329285,
"learning_rate": 7.820077310530631e-05,
"loss": 0.5708,
"step": 26000
},
{
"epoch": 1.8270936912563025,
"eval_loss": 0.6030368804931641,
"eval_runtime": 118.8611,
"eval_samples_per_second": 106.418,
"eval_steps_per_second": 13.31,
"step": 26000
},
{
"epoch": 1.8341209746842115,
"grad_norm": 0.5501639246940613,
"learning_rate": 7.773222443481318e-05,
"loss": 0.5692,
"step": 26100
},
{
"epoch": 1.8411482581121204,
"grad_norm": 0.6441388726234436,
"learning_rate": 7.726367576432003e-05,
"loss": 0.5881,
"step": 26200
},
{
"epoch": 1.8481755415400292,
"grad_norm": 0.6169604063034058,
"learning_rate": 7.679512709382688e-05,
"loss": 0.5762,
"step": 26300
},
{
"epoch": 1.855202824967938,
"grad_norm": 0.6980007290840149,
"learning_rate": 7.632657842333373e-05,
"loss": 0.5739,
"step": 26400
},
{
"epoch": 1.8622301083958468,
"grad_norm": 0.7939792275428772,
"learning_rate": 7.585802975284058e-05,
"loss": 0.5746,
"step": 26500
},
{
"epoch": 1.8622301083958468,
"eval_loss": 0.6025614142417908,
"eval_runtime": 118.9608,
"eval_samples_per_second": 106.329,
"eval_steps_per_second": 13.298,
"step": 26500
},
{
"epoch": 1.8692573918237558,
"grad_norm": 0.6231071352958679,
"learning_rate": 7.538948108234744e-05,
"loss": 0.5634,
"step": 26600
},
{
"epoch": 1.8762846752516644,
"grad_norm": 0.6939712762832642,
"learning_rate": 7.492093241185428e-05,
"loss": 0.5792,
"step": 26700
},
{
"epoch": 1.8833119586795735,
"grad_norm": 0.6055401563644409,
"learning_rate": 7.445238374136114e-05,
"loss": 0.5929,
"step": 26800
},
{
"epoch": 1.8903392421074823,
"grad_norm": 0.7465933561325073,
"learning_rate": 7.398383507086799e-05,
"loss": 0.5817,
"step": 26900
},
{
"epoch": 1.897366525535391,
"grad_norm": 0.6460291743278503,
"learning_rate": 7.351528640037484e-05,
"loss": 0.5831,
"step": 27000
},
{
"epoch": 1.897366525535391,
"eval_loss": 0.6021212935447693,
"eval_runtime": 118.8793,
"eval_samples_per_second": 106.402,
"eval_steps_per_second": 13.308,
"step": 27000
},
{
"epoch": 1.9043938089633001,
"grad_norm": 0.5837533473968506,
"learning_rate": 7.304673772988169e-05,
"loss": 0.5807,
"step": 27100
},
{
"epoch": 1.9114210923912087,
"grad_norm": 0.7371869087219238,
"learning_rate": 7.257818905938854e-05,
"loss": 0.5967,
"step": 27200
},
{
"epoch": 1.9184483758191178,
"grad_norm": 0.8853654861450195,
"learning_rate": 7.21096403888954e-05,
"loss": 0.5967,
"step": 27300
},
{
"epoch": 1.9254756592470266,
"grad_norm": 0.7515887022018433,
"learning_rate": 7.164109171840226e-05,
"loss": 0.5708,
"step": 27400
},
{
"epoch": 1.9325029426749354,
"grad_norm": 0.6723042130470276,
"learning_rate": 7.11725430479091e-05,
"loss": 0.5687,
"step": 27500
},
{
"epoch": 1.9325029426749354,
"eval_loss": 0.6017782092094421,
"eval_runtime": 118.9577,
"eval_samples_per_second": 106.332,
"eval_steps_per_second": 13.299,
"step": 27500
},
{
"epoch": 1.9395302261028444,
"grad_norm": 0.5766080617904663,
"learning_rate": 7.070399437741596e-05,
"loss": 0.5876,
"step": 27600
},
{
"epoch": 1.946557509530753,
"grad_norm": 0.6501230597496033,
"learning_rate": 7.02354457069228e-05,
"loss": 0.5726,
"step": 27700
},
{
"epoch": 1.953584792958662,
"grad_norm": 0.7194878458976746,
"learning_rate": 6.976689703642966e-05,
"loss": 0.5741,
"step": 27800
},
{
"epoch": 1.9606120763865709,
"grad_norm": 0.8090994954109192,
"learning_rate": 6.929834836593652e-05,
"loss": 0.5764,
"step": 27900
},
{
"epoch": 1.9676393598144797,
"grad_norm": 0.6500638127326965,
"learning_rate": 6.882979969544337e-05,
"loss": 0.5752,
"step": 28000
},
{
"epoch": 1.9676393598144797,
"eval_loss": 0.6010117530822754,
"eval_runtime": 119.0388,
"eval_samples_per_second": 106.259,
"eval_steps_per_second": 13.29,
"step": 28000
},
{
"epoch": 1.9746666432423887,
"grad_norm": 0.7954403162002563,
"learning_rate": 6.836125102495022e-05,
"loss": 0.5698,
"step": 28100
},
{
"epoch": 1.9816939266702973,
"grad_norm": 0.4845888316631317,
"learning_rate": 6.789270235445707e-05,
"loss": 0.5677,
"step": 28200
},
{
"epoch": 1.9887212100982063,
"grad_norm": 0.7075939774513245,
"learning_rate": 6.742415368396392e-05,
"loss": 0.5736,
"step": 28300
},
{
"epoch": 1.9957484935261152,
"grad_norm": 0.6423342823982239,
"learning_rate": 6.695560501347079e-05,
"loss": 0.5824,
"step": 28400
},
{
"epoch": 2.002775776954024,
"grad_norm": 0.49882400035858154,
"learning_rate": 6.648705634297762e-05,
"loss": 0.5598,
"step": 28500
},
{
"epoch": 2.002775776954024,
"eval_loss": 0.6018995642662048,
"eval_runtime": 118.7564,
"eval_samples_per_second": 106.512,
"eval_steps_per_second": 13.321,
"step": 28500
},
{
"epoch": 2.009803060381933,
"grad_norm": 0.6685127019882202,
"learning_rate": 6.601850767248449e-05,
"loss": 0.5498,
"step": 28600
},
{
"epoch": 2.0168303438098416,
"grad_norm": 0.7776573896408081,
"learning_rate": 6.554995900199134e-05,
"loss": 0.541,
"step": 28700
},
{
"epoch": 2.0238576272377506,
"grad_norm": 0.9224827885627747,
"learning_rate": 6.508141033149819e-05,
"loss": 0.5404,
"step": 28800
},
{
"epoch": 2.0308849106656592,
"grad_norm": 0.6114927530288696,
"learning_rate": 6.461286166100504e-05,
"loss": 0.541,
"step": 28900
},
{
"epoch": 2.0379121940935683,
"grad_norm": 0.6814767718315125,
"learning_rate": 6.414431299051189e-05,
"loss": 0.5397,
"step": 29000
},
{
"epoch": 2.0379121940935683,
"eval_loss": 0.6046204566955566,
"eval_runtime": 118.9324,
"eval_samples_per_second": 106.355,
"eval_steps_per_second": 13.302,
"step": 29000
},
{
"epoch": 2.0449394775214773,
"grad_norm": 0.8734195232391357,
"learning_rate": 6.367576432001875e-05,
"loss": 0.5347,
"step": 29100
},
{
"epoch": 2.051966760949386,
"grad_norm": 0.7100592255592346,
"learning_rate": 6.32072156495256e-05,
"loss": 0.5349,
"step": 29200
},
{
"epoch": 2.058994044377295,
"grad_norm": 0.5094404816627502,
"learning_rate": 6.273866697903245e-05,
"loss": 0.5438,
"step": 29300
},
{
"epoch": 2.0660213278052035,
"grad_norm": 0.5938568711280823,
"learning_rate": 6.22701183085393e-05,
"loss": 0.5367,
"step": 29400
},
{
"epoch": 2.0730486112331126,
"grad_norm": 0.6052954196929932,
"learning_rate": 6.180156963804615e-05,
"loss": 0.533,
"step": 29500
},
{
"epoch": 2.0730486112331126,
"eval_loss": 0.6042247414588928,
"eval_runtime": 119.3016,
"eval_samples_per_second": 106.025,
"eval_steps_per_second": 13.261,
"step": 29500
},
{
"epoch": 2.0800758946610216,
"grad_norm": 0.8204342126846313,
"learning_rate": 6.133302096755302e-05,
"loss": 0.5525,
"step": 29600
},
{
"epoch": 2.08710317808893,
"grad_norm": 0.8169859647750854,
"learning_rate": 6.086447229705986e-05,
"loss": 0.5412,
"step": 29700
},
{
"epoch": 2.0941304615168392,
"grad_norm": 0.6919510960578918,
"learning_rate": 6.039592362656671e-05,
"loss": 0.5395,
"step": 29800
},
{
"epoch": 2.101157744944748,
"grad_norm": 0.6376796364784241,
"learning_rate": 5.9927374956073566e-05,
"loss": 0.5296,
"step": 29900
},
{
"epoch": 2.108185028372657,
"grad_norm": 0.6056246161460876,
"learning_rate": 5.9458826285580416e-05,
"loss": 0.5257,
"step": 30000
},
{
"epoch": 2.108185028372657,
"eval_loss": 0.6040454506874084,
"eval_runtime": 119.4721,
"eval_samples_per_second": 105.874,
"eval_steps_per_second": 13.242,
"step": 30000
},
{
"epoch": 2.115212311800566,
"grad_norm": 0.7951282858848572,
"learning_rate": 5.899027761508727e-05,
"loss": 0.5231,
"step": 30100
},
{
"epoch": 2.1222395952284745,
"grad_norm": 0.7288519144058228,
"learning_rate": 5.852172894459412e-05,
"loss": 0.533,
"step": 30200
},
{
"epoch": 2.1292668786563835,
"grad_norm": 0.62901371717453,
"learning_rate": 5.8053180274100973e-05,
"loss": 0.5475,
"step": 30300
},
{
"epoch": 2.136294162084292,
"grad_norm": 0.7269171476364136,
"learning_rate": 5.758463160360783e-05,
"loss": 0.5469,
"step": 30400
},
{
"epoch": 2.143321445512201,
"grad_norm": 0.8678474426269531,
"learning_rate": 5.711608293311468e-05,
"loss": 0.5396,
"step": 30500
},
{
"epoch": 2.143321445512201,
"eval_loss": 0.6033341884613037,
"eval_runtime": 119.4525,
"eval_samples_per_second": 105.891,
"eval_steps_per_second": 13.244,
"step": 30500
},
{
"epoch": 2.15034872894011,
"grad_norm": 0.5461506843566895,
"learning_rate": 5.664753426262154e-05,
"loss": 0.5543,
"step": 30600
},
{
"epoch": 2.157376012368019,
"grad_norm": 0.6282551884651184,
"learning_rate": 5.617898559212839e-05,
"loss": 0.519,
"step": 30700
},
{
"epoch": 2.164403295795928,
"grad_norm": 0.5805559158325195,
"learning_rate": 5.571043692163523e-05,
"loss": 0.5359,
"step": 30800
},
{
"epoch": 2.1714305792238364,
"grad_norm": 0.7047603726387024,
"learning_rate": 5.5241888251142095e-05,
"loss": 0.5244,
"step": 30900
},
{
"epoch": 2.1784578626517455,
"grad_norm": 1.0538957118988037,
"learning_rate": 5.477333958064894e-05,
"loss": 0.5284,
"step": 31000
},
{
"epoch": 2.1784578626517455,
"eval_loss": 0.6041498780250549,
"eval_runtime": 119.3834,
"eval_samples_per_second": 105.953,
"eval_steps_per_second": 13.251,
"step": 31000
},
{
"epoch": 2.185485146079654,
"grad_norm": 0.7913850545883179,
"learning_rate": 5.43047909101558e-05,
"loss": 0.5267,
"step": 31100
},
{
"epoch": 2.192512429507563,
"grad_norm": 0.5944955348968506,
"learning_rate": 5.3836242239662646e-05,
"loss": 0.5359,
"step": 31200
},
{
"epoch": 2.199539712935472,
"grad_norm": 0.8068099617958069,
"learning_rate": 5.3367693569169496e-05,
"loss": 0.5449,
"step": 31300
},
{
"epoch": 2.2065669963633807,
"grad_norm": 0.5993140935897827,
"learning_rate": 5.289914489867635e-05,
"loss": 0.5384,
"step": 31400
},
{
"epoch": 2.2135942797912898,
"grad_norm": 0.6852918267250061,
"learning_rate": 5.24305962281832e-05,
"loss": 0.5448,
"step": 31500
},
{
"epoch": 2.2135942797912898,
"eval_loss": 0.6034653782844543,
"eval_runtime": 119.4509,
"eval_samples_per_second": 105.893,
"eval_steps_per_second": 13.244,
"step": 31500
},
{
"epoch": 2.2206215632191983,
"grad_norm": 0.9072486758232117,
"learning_rate": 5.1962047557690054e-05,
"loss": 0.5392,
"step": 31600
},
{
"epoch": 2.2276488466471074,
"grad_norm": 0.5671890377998352,
"learning_rate": 5.149349888719691e-05,
"loss": 0.5366,
"step": 31700
},
{
"epoch": 2.2346761300750164,
"grad_norm": 0.9552319049835205,
"learning_rate": 5.102495021670376e-05,
"loss": 0.555,
"step": 31800
},
{
"epoch": 2.241703413502925,
"grad_norm": 0.9220768809318542,
"learning_rate": 5.055640154621062e-05,
"loss": 0.5323,
"step": 31900
},
{
"epoch": 2.248730696930834,
"grad_norm": 0.7823670506477356,
"learning_rate": 5.008785287571747e-05,
"loss": 0.5484,
"step": 32000
},
{
"epoch": 2.248730696930834,
"eval_loss": 0.602741539478302,
"eval_runtime": 119.5139,
"eval_samples_per_second": 105.837,
"eval_steps_per_second": 13.237,
"step": 32000
},
{
"epoch": 2.2557579803587426,
"grad_norm": 0.8530369400978088,
"learning_rate": 4.961930420522432e-05,
"loss": 0.5443,
"step": 32100
},
{
"epoch": 2.2627852637866517,
"grad_norm": 0.8256222605705261,
"learning_rate": 4.9150755534731175e-05,
"loss": 0.5371,
"step": 32200
},
{
"epoch": 2.2698125472145607,
"grad_norm": 0.8003319501876831,
"learning_rate": 4.8682206864238025e-05,
"loss": 0.5472,
"step": 32300
},
{
"epoch": 2.2768398306424693,
"grad_norm": 0.6615211367607117,
"learning_rate": 4.8213658193744876e-05,
"loss": 0.5349,
"step": 32400
},
{
"epoch": 2.2838671140703783,
"grad_norm": 0.7718948125839233,
"learning_rate": 4.774510952325173e-05,
"loss": 0.5253,
"step": 32500
},
{
"epoch": 2.2838671140703783,
"eval_loss": 0.6034336090087891,
"eval_runtime": 119.3326,
"eval_samples_per_second": 105.998,
"eval_steps_per_second": 13.257,
"step": 32500
},
{
"epoch": 2.290894397498287,
"grad_norm": 0.6703356504440308,
"learning_rate": 4.727656085275858e-05,
"loss": 0.5356,
"step": 32600
},
{
"epoch": 2.297921680926196,
"grad_norm": 0.6417832970619202,
"learning_rate": 4.680801218226544e-05,
"loss": 0.5456,
"step": 32700
},
{
"epoch": 2.3049489643541046,
"grad_norm": 0.6749237775802612,
"learning_rate": 4.633946351177228e-05,
"loss": 0.5374,
"step": 32800
},
{
"epoch": 2.3119762477820136,
"grad_norm": 0.6223445534706116,
"learning_rate": 4.587091484127914e-05,
"loss": 0.5254,
"step": 32900
},
{
"epoch": 2.3190035312099226,
"grad_norm": 0.6455600261688232,
"learning_rate": 4.540236617078599e-05,
"loss": 0.5322,
"step": 33000
},
{
"epoch": 2.3190035312099226,
"eval_loss": 0.602424144744873,
"eval_runtime": 119.6802,
"eval_samples_per_second": 105.69,
"eval_steps_per_second": 13.219,
"step": 33000
},
{
"epoch": 2.3260308146378312,
"grad_norm": 0.6882891058921814,
"learning_rate": 4.493381750029285e-05,
"loss": 0.5406,
"step": 33100
},
{
"epoch": 2.3330580980657403,
"grad_norm": 0.7169196605682373,
"learning_rate": 4.44652688297997e-05,
"loss": 0.5367,
"step": 33200
},
{
"epoch": 2.3400853814936493,
"grad_norm": 0.6603942513465881,
"learning_rate": 4.399672015930655e-05,
"loss": 0.5315,
"step": 33300
},
{
"epoch": 2.347112664921558,
"grad_norm": 0.7974775433540344,
"learning_rate": 4.3528171488813405e-05,
"loss": 0.5438,
"step": 33400
},
{
"epoch": 2.354139948349467,
"grad_norm": 0.7672884464263916,
"learning_rate": 4.3059622818320255e-05,
"loss": 0.5583,
"step": 33500
},
{
"epoch": 2.354139948349467,
"eval_loss": 0.6008437871932983,
"eval_runtime": 119.0367,
"eval_samples_per_second": 106.261,
"eval_steps_per_second": 13.29,
"step": 33500
},
{
"epoch": 2.3611672317773755,
"grad_norm": 0.7062329649925232,
"learning_rate": 4.259107414782711e-05,
"loss": 0.5548,
"step": 33600
},
{
"epoch": 2.3681945152052846,
"grad_norm": 0.754173219203949,
"learning_rate": 4.212252547733396e-05,
"loss": 0.5276,
"step": 33700
},
{
"epoch": 2.375221798633193,
"grad_norm": 0.7143212556838989,
"learning_rate": 4.165397680684081e-05,
"loss": 0.5413,
"step": 33800
},
{
"epoch": 2.382249082061102,
"grad_norm": 0.8082584142684937,
"learning_rate": 4.118542813634766e-05,
"loss": 0.534,
"step": 33900
},
{
"epoch": 2.3892763654890112,
"grad_norm": 0.7493578791618347,
"learning_rate": 4.071687946585452e-05,
"loss": 0.5373,
"step": 34000
},
{
"epoch": 2.3892763654890112,
"eval_loss": 0.6015102863311768,
"eval_runtime": 119.2005,
"eval_samples_per_second": 106.115,
"eval_steps_per_second": 13.272,
"step": 34000
},
{
"epoch": 2.39630364891692,
"grad_norm": 0.9309408068656921,
"learning_rate": 4.024833079536137e-05,
"loss": 0.5464,
"step": 34100
},
{
"epoch": 2.403330932344829,
"grad_norm": 0.6309605240821838,
"learning_rate": 3.977978212486822e-05,
"loss": 0.5439,
"step": 34200
},
{
"epoch": 2.410358215772738,
"grad_norm": 0.6428382992744446,
"learning_rate": 3.931123345437507e-05,
"loss": 0.5412,
"step": 34300
},
{
"epoch": 2.4173854992006465,
"grad_norm": 0.9063606262207031,
"learning_rate": 3.884268478388193e-05,
"loss": 0.5477,
"step": 34400
},
{
"epoch": 2.4244127826285555,
"grad_norm": 0.7051374316215515,
"learning_rate": 3.837413611338878e-05,
"loss": 0.5351,
"step": 34500
},
{
"epoch": 2.4244127826285555,
"eval_loss": 0.6011614203453064,
"eval_runtime": 119.6428,
"eval_samples_per_second": 105.723,
"eval_steps_per_second": 13.223,
"step": 34500
},
{
"epoch": 2.431440066056464,
"grad_norm": 0.7466573119163513,
"learning_rate": 3.7905587442895635e-05,
"loss": 0.5525,
"step": 34600
},
{
"epoch": 2.438467349484373,
"grad_norm": 0.8113718628883362,
"learning_rate": 3.7437038772402485e-05,
"loss": 0.553,
"step": 34700
},
{
"epoch": 2.4454946329122818,
"grad_norm": 0.5469939112663269,
"learning_rate": 3.6968490101909335e-05,
"loss": 0.5438,
"step": 34800
},
{
"epoch": 2.452521916340191,
"grad_norm": 0.6911020278930664,
"learning_rate": 3.649994143141619e-05,
"loss": 0.5349,
"step": 34900
},
{
"epoch": 2.4595491997681,
"grad_norm": 0.7164533734321594,
"learning_rate": 3.603139276092304e-05,
"loss": 0.5237,
"step": 35000
},
{
"epoch": 2.4595491997681,
"eval_loss": 0.6016719937324524,
"eval_runtime": 119.4046,
"eval_samples_per_second": 105.934,
"eval_steps_per_second": 13.249,
"step": 35000
},
{
"epoch": 2.4665764831960084,
"grad_norm": 0.8674111366271973,
"learning_rate": 3.55628440904299e-05,
"loss": 0.5469,
"step": 35100
},
{
"epoch": 2.4736037666239175,
"grad_norm": 0.9044885039329529,
"learning_rate": 3.509429541993674e-05,
"loss": 0.5445,
"step": 35200
},
{
"epoch": 2.480631050051826,
"grad_norm": 0.7213521599769592,
"learning_rate": 3.46257467494436e-05,
"loss": 0.5136,
"step": 35300
},
{
"epoch": 2.487658333479735,
"grad_norm": 0.6873759627342224,
"learning_rate": 3.415719807895045e-05,
"loss": 0.5362,
"step": 35400
},
{
"epoch": 2.494685616907644,
"grad_norm": 0.6950516104698181,
"learning_rate": 3.368864940845731e-05,
"loss": 0.535,
"step": 35500
},
{
"epoch": 2.494685616907644,
"eval_loss": 0.6001349091529846,
"eval_runtime": 119.0417,
"eval_samples_per_second": 106.257,
"eval_steps_per_second": 13.289,
"step": 35500
},
{
"epoch": 2.5017129003355527,
"grad_norm": 0.6840397715568542,
"learning_rate": 3.322010073796416e-05,
"loss": 0.5385,
"step": 35600
},
{
"epoch": 2.5087401837634618,
"grad_norm": 0.5907210111618042,
"learning_rate": 3.275155206747101e-05,
"loss": 0.5429,
"step": 35700
},
{
"epoch": 2.5157674671913703,
"grad_norm": 0.801002025604248,
"learning_rate": 3.2283003396977865e-05,
"loss": 0.5224,
"step": 35800
},
{
"epoch": 2.5227947506192794,
"grad_norm": 0.7259572148323059,
"learning_rate": 3.1814454726484715e-05,
"loss": 0.5297,
"step": 35900
},
{
"epoch": 2.5298220340471884,
"grad_norm": 0.6602583527565002,
"learning_rate": 3.134590605599157e-05,
"loss": 0.5496,
"step": 36000
},
{
"epoch": 2.5298220340471884,
"eval_loss": 0.5990512371063232,
"eval_runtime": 118.9771,
"eval_samples_per_second": 106.315,
"eval_steps_per_second": 13.297,
"step": 36000
},
{
"epoch": 2.536849317475097,
"grad_norm": 0.6192248463630676,
"learning_rate": 3.0877357385498415e-05,
"loss": 0.5432,
"step": 36100
},
{
"epoch": 2.543876600903006,
"grad_norm": 1.0253371000289917,
"learning_rate": 3.0408808715005272e-05,
"loss": 0.5254,
"step": 36200
},
{
"epoch": 2.5509038843309146,
"grad_norm": 0.8612440228462219,
"learning_rate": 2.9940260044512126e-05,
"loss": 0.56,
"step": 36300
},
{
"epoch": 2.5579311677588237,
"grad_norm": 0.5663143992424011,
"learning_rate": 2.947171137401898e-05,
"loss": 0.5329,
"step": 36400
},
{
"epoch": 2.5649584511867323,
"grad_norm": 0.9142153263092041,
"learning_rate": 2.9003162703525833e-05,
"loss": 0.5384,
"step": 36500
},
{
"epoch": 2.5649584511867323,
"eval_loss": 0.5994681715965271,
"eval_runtime": 119.2506,
"eval_samples_per_second": 106.071,
"eval_steps_per_second": 13.266,
"step": 36500
},
{
"epoch": 2.5719857346146413,
"grad_norm": 0.6240784525871277,
"learning_rate": 2.853461403303268e-05,
"loss": 0.5332,
"step": 36600
},
{
"epoch": 2.5790130180425503,
"grad_norm": 0.7204896211624146,
"learning_rate": 2.8066065362539534e-05,
"loss": 0.534,
"step": 36700
},
{
"epoch": 2.586040301470459,
"grad_norm": 0.8071198463439941,
"learning_rate": 2.7597516692046387e-05,
"loss": 0.533,
"step": 36800
},
{
"epoch": 2.593067584898368,
"grad_norm": 0.5369657278060913,
"learning_rate": 2.712896802155324e-05,
"loss": 0.5325,
"step": 36900
},
{
"epoch": 2.600094868326277,
"grad_norm": 0.8969751000404358,
"learning_rate": 2.6660419351060095e-05,
"loss": 0.5349,
"step": 37000
},
{
"epoch": 2.600094868326277,
"eval_loss": 0.5992428064346313,
"eval_runtime": 119.3558,
"eval_samples_per_second": 105.977,
"eval_steps_per_second": 13.254,
"step": 37000
},
{
"epoch": 2.6071221517541856,
"grad_norm": 0.8350916504859924,
"learning_rate": 2.6191870680566945e-05,
"loss": 0.5388,
"step": 37100
},
{
"epoch": 2.6141494351820946,
"grad_norm": 0.5793244242668152,
"learning_rate": 2.57233220100738e-05,
"loss": 0.5211,
"step": 37200
},
{
"epoch": 2.6211767186100032,
"grad_norm": 0.6331949830055237,
"learning_rate": 2.5254773339580652e-05,
"loss": 0.5335,
"step": 37300
},
{
"epoch": 2.6282040020379123,
"grad_norm": 0.799247145652771,
"learning_rate": 2.4786224669087502e-05,
"loss": 0.5474,
"step": 37400
},
{
"epoch": 2.635231285465821,
"grad_norm": 0.7149389386177063,
"learning_rate": 2.4317675998594356e-05,
"loss": 0.5316,
"step": 37500
},
{
"epoch": 2.635231285465821,
"eval_loss": 0.5990239977836609,
"eval_runtime": 119.2093,
"eval_samples_per_second": 106.107,
"eval_steps_per_second": 13.271,
"step": 37500
},
{
"epoch": 2.64225856889373,
"grad_norm": 0.6996687650680542,
"learning_rate": 2.384912732810121e-05,
"loss": 0.5244,
"step": 37600
},
{
"epoch": 2.649285852321639,
"grad_norm": 0.6374346017837524,
"learning_rate": 2.338057865760806e-05,
"loss": 0.5221,
"step": 37700
},
{
"epoch": 2.6563131357495475,
"grad_norm": 0.7678332328796387,
"learning_rate": 2.2912029987114913e-05,
"loss": 0.5251,
"step": 37800
},
{
"epoch": 2.6633404191774566,
"grad_norm": 0.6463155150413513,
"learning_rate": 2.2443481316621764e-05,
"loss": 0.5343,
"step": 37900
},
{
"epoch": 2.6703677026053656,
"grad_norm": 0.6788434386253357,
"learning_rate": 2.1974932646128617e-05,
"loss": 0.5276,
"step": 38000
},
{
"epoch": 2.6703677026053656,
"eval_loss": 0.5986095070838928,
"eval_runtime": 119.1797,
"eval_samples_per_second": 106.134,
"eval_steps_per_second": 13.274,
"step": 38000
},
{
"epoch": 2.677394986033274,
"grad_norm": 0.7115055322647095,
"learning_rate": 2.1506383975635467e-05,
"loss": 0.5583,
"step": 38100
},
{
"epoch": 2.684422269461183,
"grad_norm": 0.7007845044136047,
"learning_rate": 2.103783530514232e-05,
"loss": 0.5294,
"step": 38200
},
{
"epoch": 2.691449552889092,
"grad_norm": 1.0714610815048218,
"learning_rate": 2.0569286634649178e-05,
"loss": 0.54,
"step": 38300
},
{
"epoch": 2.698476836317001,
"grad_norm": 0.6736337542533875,
"learning_rate": 2.0100737964156028e-05,
"loss": 0.5343,
"step": 38400
},
{
"epoch": 2.7055041197449095,
"grad_norm": 0.6769545078277588,
"learning_rate": 1.9632189293662882e-05,
"loss": 0.5406,
"step": 38500
},
{
"epoch": 2.7055041197449095,
"eval_loss": 0.5980576276779175,
"eval_runtime": 119.2056,
"eval_samples_per_second": 106.111,
"eval_steps_per_second": 13.271,
"step": 38500
},
{
"epoch": 2.7125314031728185,
"grad_norm": 0.714820384979248,
"learning_rate": 1.9163640623169732e-05,
"loss": 0.5408,
"step": 38600
},
{
"epoch": 2.7195586866007275,
"grad_norm": 0.5211949944496155,
"learning_rate": 1.8695091952676586e-05,
"loss": 0.5257,
"step": 38700
},
{
"epoch": 2.726585970028636,
"grad_norm": 0.7966856360435486,
"learning_rate": 1.8226543282183436e-05,
"loss": 0.5381,
"step": 38800
},
{
"epoch": 2.733613253456545,
"grad_norm": 0.7282027006149292,
"learning_rate": 1.775799461169029e-05,
"loss": 0.5415,
"step": 38900
},
{
"epoch": 2.7406405368844537,
"grad_norm": 0.7190561890602112,
"learning_rate": 1.7289445941197143e-05,
"loss": 0.5436,
"step": 39000
},
{
"epoch": 2.7406405368844537,
"eval_loss": 0.5981852412223816,
"eval_runtime": 119.2717,
"eval_samples_per_second": 106.052,
"eval_steps_per_second": 13.264,
"step": 39000
},
{
"epoch": 2.747667820312363,
"grad_norm": 0.6859644651412964,
"learning_rate": 1.6820897270703993e-05,
"loss": 0.5371,
"step": 39100
},
{
"epoch": 2.7546951037402714,
"grad_norm": 0.6917553544044495,
"learning_rate": 1.6352348600210847e-05,
"loss": 0.5411,
"step": 39200
},
{
"epoch": 2.7617223871681804,
"grad_norm": 0.5737515687942505,
"learning_rate": 1.58837999297177e-05,
"loss": 0.5244,
"step": 39300
},
{
"epoch": 2.7687496705960895,
"grad_norm": 0.7747429013252258,
"learning_rate": 1.5415251259224554e-05,
"loss": 0.5452,
"step": 39400
},
{
"epoch": 2.775776954023998,
"grad_norm": 0.6782782673835754,
"learning_rate": 1.4946702588731405e-05,
"loss": 0.5392,
"step": 39500
},
{
"epoch": 2.775776954023998,
"eval_loss": 0.597685694694519,
"eval_runtime": 119.3501,
"eval_samples_per_second": 105.982,
"eval_steps_per_second": 13.255,
"step": 39500
},
{
"epoch": 2.782804237451907,
"grad_norm": 0.6675652265548706,
"learning_rate": 1.4478153918238258e-05,
"loss": 0.534,
"step": 39600
},
{
"epoch": 2.789831520879816,
"grad_norm": 0.822902262210846,
"learning_rate": 1.4009605247745112e-05,
"loss": 0.5582,
"step": 39700
},
{
"epoch": 2.7968588043077247,
"grad_norm": 0.6393033266067505,
"learning_rate": 1.3541056577251962e-05,
"loss": 0.5442,
"step": 39800
},
{
"epoch": 2.8038860877356337,
"grad_norm": 0.744646430015564,
"learning_rate": 1.3072507906758816e-05,
"loss": 0.537,
"step": 39900
},
{
"epoch": 2.8109133711635423,
"grad_norm": 0.5728178024291992,
"learning_rate": 1.2603959236265666e-05,
"loss": 0.5316,
"step": 40000
},
{
"epoch": 2.8109133711635423,
"eval_loss": 0.5975730419158936,
"eval_runtime": 119.1706,
"eval_samples_per_second": 106.142,
"eval_steps_per_second": 13.275,
"step": 40000
},
{
"epoch": 2.8179406545914514,
"grad_norm": 0.6350817680358887,
"learning_rate": 1.2135410565772521e-05,
"loss": 0.5345,
"step": 40100
},
{
"epoch": 2.82496793801936,
"grad_norm": 0.5875131487846375,
"learning_rate": 1.1666861895279373e-05,
"loss": 0.5336,
"step": 40200
},
{
"epoch": 2.831995221447269,
"grad_norm": 0.7108073234558105,
"learning_rate": 1.1198313224786225e-05,
"loss": 0.5364,
"step": 40300
},
{
"epoch": 2.839022504875178,
"grad_norm": 0.5973334312438965,
"learning_rate": 1.0729764554293077e-05,
"loss": 0.5468,
"step": 40400
},
{
"epoch": 2.8460497883030866,
"grad_norm": 0.7775806784629822,
"learning_rate": 1.026121588379993e-05,
"loss": 0.5349,
"step": 40500
},
{
"epoch": 2.8460497883030866,
"eval_loss": 0.5973463654518127,
"eval_runtime": 119.0857,
"eval_samples_per_second": 106.218,
"eval_steps_per_second": 13.285,
"step": 40500
},
{
"epoch": 2.8530770717309957,
"grad_norm": 0.6160515546798706,
"learning_rate": 9.792667213306782e-06,
"loss": 0.5317,
"step": 40600
},
{
"epoch": 2.8601043551589047,
"grad_norm": 0.715397834777832,
"learning_rate": 9.324118542813636e-06,
"loss": 0.5119,
"step": 40700
},
{
"epoch": 2.8671316385868133,
"grad_norm": 0.7450791001319885,
"learning_rate": 8.855569872320488e-06,
"loss": 0.5302,
"step": 40800
},
{
"epoch": 2.874158922014722,
"grad_norm": 0.5536558628082275,
"learning_rate": 8.38702120182734e-06,
"loss": 0.5239,
"step": 40900
},
{
"epoch": 2.881186205442631,
"grad_norm": 0.5844016075134277,
"learning_rate": 7.918472531334192e-06,
"loss": 0.5341,
"step": 41000
},
{
"epoch": 2.881186205442631,
"eval_loss": 0.5972412824630737,
"eval_runtime": 119.0402,
"eval_samples_per_second": 106.258,
"eval_steps_per_second": 13.29,
"step": 41000
},
{
"epoch": 2.88821348887054,
"grad_norm": 0.566956639289856,
"learning_rate": 7.449923860841045e-06,
"loss": 0.5387,
"step": 41100
},
{
"epoch": 2.8952407722984486,
"grad_norm": 0.6934293508529663,
"learning_rate": 6.981375190347898e-06,
"loss": 0.5185,
"step": 41200
},
{
"epoch": 2.9022680557263576,
"grad_norm": 0.6890417337417603,
"learning_rate": 6.51282651985475e-06,
"loss": 0.5419,
"step": 41300
},
{
"epoch": 2.9092953391542666,
"grad_norm": 0.8735133409500122,
"learning_rate": 6.044277849361603e-06,
"loss": 0.5314,
"step": 41400
},
{
"epoch": 2.9163226225821752,
"grad_norm": 0.746161162853241,
"learning_rate": 5.575729178868455e-06,
"loss": 0.5208,
"step": 41500
},
{
"epoch": 2.9163226225821752,
"eval_loss": 0.5972864031791687,
"eval_runtime": 118.9512,
"eval_samples_per_second": 106.338,
"eval_steps_per_second": 13.3,
"step": 41500
},
{
"epoch": 2.9233499060100843,
"grad_norm": 0.9560967683792114,
"learning_rate": 5.107180508375308e-06,
"loss": 0.5505,
"step": 41600
},
{
"epoch": 2.930377189437993,
"grad_norm": 0.7499198317527771,
"learning_rate": 4.63863183788216e-06,
"loss": 0.5378,
"step": 41700
},
{
"epoch": 2.937404472865902,
"grad_norm": 0.6939593553543091,
"learning_rate": 4.170083167389012e-06,
"loss": 0.5351,
"step": 41800
},
{
"epoch": 2.9444317562938105,
"grad_norm": 0.6184145212173462,
"learning_rate": 3.701534496895865e-06,
"loss": 0.5171,
"step": 41900
},
{
"epoch": 2.9514590397217195,
"grad_norm": 0.6636520624160767,
"learning_rate": 3.232985826402718e-06,
"loss": 0.53,
"step": 42000
},
{
"epoch": 2.9514590397217195,
"eval_loss": 0.5971269607543945,
"eval_runtime": 118.9184,
"eval_samples_per_second": 106.367,
"eval_steps_per_second": 13.303,
"step": 42000
},
{
"epoch": 2.9584863231496286,
"grad_norm": 0.8458754420280457,
"learning_rate": 2.76443715590957e-06,
"loss": 0.5447,
"step": 42100
},
{
"epoch": 2.965513606577537,
"grad_norm": 0.6426025032997131,
"learning_rate": 2.295888485416423e-06,
"loss": 0.5287,
"step": 42200
},
{
"epoch": 2.972540890005446,
"grad_norm": 0.7159422039985657,
"learning_rate": 1.827339814923275e-06,
"loss": 0.5288,
"step": 42300
},
{
"epoch": 2.9795681734333552,
"grad_norm": 0.6702597141265869,
"learning_rate": 1.3587911444301279e-06,
"loss": 0.5419,
"step": 42400
},
{
"epoch": 2.986595456861264,
"grad_norm": 0.7953273057937622,
"learning_rate": 8.902424739369803e-07,
"loss": 0.5445,
"step": 42500
},
{
"epoch": 2.986595456861264,
"eval_loss": 0.5969375371932983,
"eval_runtime": 119.2187,
"eval_samples_per_second": 106.099,
"eval_steps_per_second": 13.27,
"step": 42500
},
{
"epoch": 2.993622740289173,
"grad_norm": 0.5247675180435181,
"learning_rate": 4.216938034438327e-07,
"loss": 0.5485,
"step": 42600
}
],
"logging_steps": 100,
"max_steps": 42690,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4552903446892544e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}