error577's picture
Training in progress, step 350, checkpoint
d16e88a verified
raw
history blame
63.8 kB
{
"best_metric": 0.39342138171195984,
"best_model_checkpoint": "miner_id_24/checkpoint-250",
"epoch": 0.5941014215998303,
"eval_steps": 50,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001697432633142372,
"grad_norm": 0.22029195725917816,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.2479,
"step": 1
},
{
"epoch": 0.001697432633142372,
"eval_loss": 0.6322442293167114,
"eval_runtime": 65.6226,
"eval_samples_per_second": 2.941,
"eval_steps_per_second": 2.941,
"step": 1
},
{
"epoch": 0.003394865266284744,
"grad_norm": 0.24590638279914856,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.2438,
"step": 2
},
{
"epoch": 0.005092297899427116,
"grad_norm": 0.23055589199066162,
"learning_rate": 8.999999999999999e-05,
"loss": 1.0916,
"step": 3
},
{
"epoch": 0.006789730532569488,
"grad_norm": 0.21309782564640045,
"learning_rate": 0.00011999999999999999,
"loss": 1.0918,
"step": 4
},
{
"epoch": 0.008487163165711862,
"grad_norm": 0.21661481261253357,
"learning_rate": 0.00015,
"loss": 0.8365,
"step": 5
},
{
"epoch": 0.010184595798854232,
"grad_norm": 0.2395806461572647,
"learning_rate": 0.00017999999999999998,
"loss": 1.0087,
"step": 6
},
{
"epoch": 0.011882028431996604,
"grad_norm": 0.25481849908828735,
"learning_rate": 0.00020999999999999998,
"loss": 0.9591,
"step": 7
},
{
"epoch": 0.013579461065138977,
"grad_norm": 0.22890186309814453,
"learning_rate": 0.00023999999999999998,
"loss": 0.6067,
"step": 8
},
{
"epoch": 0.015276893698281349,
"grad_norm": 0.2807515263557434,
"learning_rate": 0.00027,
"loss": 0.6593,
"step": 9
},
{
"epoch": 0.016974326331423723,
"grad_norm": 0.3522188067436218,
"learning_rate": 0.0003,
"loss": 0.847,
"step": 10
},
{
"epoch": 0.018671758964566094,
"grad_norm": 0.31385794281959534,
"learning_rate": 0.0002999992447508326,
"loss": 0.7329,
"step": 11
},
{
"epoch": 0.020369191597708464,
"grad_norm": 0.5269914269447327,
"learning_rate": 0.0002999969790109359,
"loss": 0.8751,
"step": 12
},
{
"epoch": 0.02206662423085084,
"grad_norm": 0.2571907341480255,
"learning_rate": 0.0002999932028031259,
"loss": 0.6175,
"step": 13
},
{
"epoch": 0.02376405686399321,
"grad_norm": 0.2777341306209564,
"learning_rate": 0.0002999879161654289,
"loss": 0.7442,
"step": 14
},
{
"epoch": 0.025461489497135583,
"grad_norm": 0.19830763339996338,
"learning_rate": 0.00029998111915108125,
"loss": 0.4802,
"step": 15
},
{
"epoch": 0.027158922130277954,
"grad_norm": 0.21772292256355286,
"learning_rate": 0.00029997281182852885,
"loss": 0.527,
"step": 16
},
{
"epoch": 0.028856354763420328,
"grad_norm": 0.18149510025978088,
"learning_rate": 0.00029996299428142637,
"loss": 0.4808,
"step": 17
},
{
"epoch": 0.030553787396562698,
"grad_norm": 0.22295671701431274,
"learning_rate": 0.00029995166660863636,
"loss": 0.5326,
"step": 18
},
{
"epoch": 0.03225122002970507,
"grad_norm": 0.24240700900554657,
"learning_rate": 0.0002999388289242284,
"loss": 0.5986,
"step": 19
},
{
"epoch": 0.033948652662847446,
"grad_norm": 0.21365882456302643,
"learning_rate": 0.00029992448135747777,
"loss": 0.4533,
"step": 20
},
{
"epoch": 0.03564608529598982,
"grad_norm": 0.13466337323188782,
"learning_rate": 0.00029990862405286433,
"loss": 0.2177,
"step": 21
},
{
"epoch": 0.03734351792913219,
"grad_norm": 0.2027832567691803,
"learning_rate": 0.00029989125717007104,
"loss": 0.4166,
"step": 22
},
{
"epoch": 0.03904095056227456,
"grad_norm": 0.14215879142284393,
"learning_rate": 0.0002998723808839821,
"loss": 0.2648,
"step": 23
},
{
"epoch": 0.04073838319541693,
"grad_norm": 0.10996250063180923,
"learning_rate": 0.00029985199538468154,
"loss": 0.1554,
"step": 24
},
{
"epoch": 0.042435815828559306,
"grad_norm": 0.10510318726301193,
"learning_rate": 0.0002998301008774512,
"loss": 0.0231,
"step": 25
},
{
"epoch": 0.04413324846170168,
"grad_norm": 0.10925798863172531,
"learning_rate": 0.00029980669758276836,
"loss": 0.1159,
"step": 26
},
{
"epoch": 0.04583068109484405,
"grad_norm": 0.06849532574415207,
"learning_rate": 0.0002997817857363041,
"loss": 0.039,
"step": 27
},
{
"epoch": 0.04752811372798642,
"grad_norm": 0.0729198008775711,
"learning_rate": 0.0002997553655889203,
"loss": 0.0084,
"step": 28
},
{
"epoch": 0.049225546361128796,
"grad_norm": 0.10013315826654434,
"learning_rate": 0.00029972743740666765,
"loss": 0.0985,
"step": 29
},
{
"epoch": 0.050922978994271166,
"grad_norm": 0.047209128737449646,
"learning_rate": 0.00029969800147078263,
"loss": 0.0234,
"step": 30
},
{
"epoch": 0.05262041162741354,
"grad_norm": 0.015719635412096977,
"learning_rate": 0.00029966705807768474,
"loss": 0.0014,
"step": 31
},
{
"epoch": 0.05431784426055591,
"grad_norm": 0.032827939838171005,
"learning_rate": 0.0002996346075389736,
"loss": 0.005,
"step": 32
},
{
"epoch": 0.056015276893698285,
"grad_norm": 0.021124735474586487,
"learning_rate": 0.0002996006501814258,
"loss": 0.0012,
"step": 33
},
{
"epoch": 0.057712709526840655,
"grad_norm": 0.014757171273231506,
"learning_rate": 0.0002995651863469916,
"loss": 0.0019,
"step": 34
},
{
"epoch": 0.059410142159983026,
"grad_norm": 0.013099136762320995,
"learning_rate": 0.00029952821639279135,
"loss": 0.0012,
"step": 35
},
{
"epoch": 0.061107574793125397,
"grad_norm": 0.02350614406168461,
"learning_rate": 0.0002994897406911121,
"loss": 0.0032,
"step": 36
},
{
"epoch": 0.06280500742626777,
"grad_norm": 0.01101439818739891,
"learning_rate": 0.0002994497596294037,
"loss": 0.0006,
"step": 37
},
{
"epoch": 0.06450244005941014,
"grad_norm": 0.08192350715398788,
"learning_rate": 0.00029940827361027504,
"loss": 0.0046,
"step": 38
},
{
"epoch": 0.06619987269255251,
"grad_norm": 0.03598169609904289,
"learning_rate": 0.0002993652830514899,
"loss": 0.0027,
"step": 39
},
{
"epoch": 0.06789730532569489,
"grad_norm": 0.045441944152116776,
"learning_rate": 0.0002993207883859627,
"loss": 0.0076,
"step": 40
},
{
"epoch": 0.06959473795883726,
"grad_norm": 0.048115745186805725,
"learning_rate": 0.00029927479006175417,
"loss": 0.0028,
"step": 41
},
{
"epoch": 0.07129217059197963,
"grad_norm": 0.027597462758421898,
"learning_rate": 0.00029922728854206703,
"loss": 0.002,
"step": 42
},
{
"epoch": 0.072989603225122,
"grad_norm": 0.0355045348405838,
"learning_rate": 0.00029917828430524096,
"loss": 0.0013,
"step": 43
},
{
"epoch": 0.07468703585826438,
"grad_norm": 0.0412307046353817,
"learning_rate": 0.00029912777784474814,
"loss": 0.003,
"step": 44
},
{
"epoch": 0.07638446849140675,
"grad_norm": 0.051867593079805374,
"learning_rate": 0.0002990757696691881,
"loss": 0.0039,
"step": 45
},
{
"epoch": 0.07808190112454912,
"grad_norm": 0.06054271385073662,
"learning_rate": 0.00029902226030228247,
"loss": 0.0019,
"step": 46
},
{
"epoch": 0.07977933375769149,
"grad_norm": 0.05313190817832947,
"learning_rate": 0.00029896725028287014,
"loss": 0.0041,
"step": 47
},
{
"epoch": 0.08147676639083386,
"grad_norm": 0.18755203485488892,
"learning_rate": 0.00029891074016490126,
"loss": 0.0036,
"step": 48
},
{
"epoch": 0.08317419902397624,
"grad_norm": 0.05305561423301697,
"learning_rate": 0.00029885273051743214,
"loss": 0.0051,
"step": 49
},
{
"epoch": 0.08487163165711861,
"grad_norm": 0.08674199134111404,
"learning_rate": 0.00029879322192461925,
"loss": 0.012,
"step": 50
},
{
"epoch": 0.08487163165711861,
"eval_loss": 0.4556533694267273,
"eval_runtime": 65.6522,
"eval_samples_per_second": 2.94,
"eval_steps_per_second": 2.94,
"step": 50
},
{
"epoch": 0.08656906429026098,
"grad_norm": 0.5487300753593445,
"learning_rate": 0.00029873221498571354,
"loss": 1.0487,
"step": 51
},
{
"epoch": 0.08826649692340335,
"grad_norm": 0.27697888016700745,
"learning_rate": 0.00029866971031505417,
"loss": 0.7504,
"step": 52
},
{
"epoch": 0.08996392955654572,
"grad_norm": 0.2856276035308838,
"learning_rate": 0.00029860570854206244,
"loss": 1.0179,
"step": 53
},
{
"epoch": 0.0916613621896881,
"grad_norm": 0.43166583776474,
"learning_rate": 0.0002985402103112355,
"loss": 0.7947,
"step": 54
},
{
"epoch": 0.09335879482283047,
"grad_norm": 0.26107504963874817,
"learning_rate": 0.0002984732162821399,
"loss": 0.9617,
"step": 55
},
{
"epoch": 0.09505622745597284,
"grad_norm": 0.20953023433685303,
"learning_rate": 0.00029840472712940465,
"loss": 0.7355,
"step": 56
},
{
"epoch": 0.0967536600891152,
"grad_norm": 0.22740969061851501,
"learning_rate": 0.0002983347435427148,
"loss": 0.839,
"step": 57
},
{
"epoch": 0.09845109272225759,
"grad_norm": 0.24932198226451874,
"learning_rate": 0.00029826326622680433,
"loss": 0.775,
"step": 58
},
{
"epoch": 0.10014852535539996,
"grad_norm": 0.260080486536026,
"learning_rate": 0.000298190295901449,
"loss": 0.9275,
"step": 59
},
{
"epoch": 0.10184595798854233,
"grad_norm": 0.22443322837352753,
"learning_rate": 0.00029811583330145914,
"loss": 0.796,
"step": 60
},
{
"epoch": 0.1035433906216847,
"grad_norm": 0.2286413460969925,
"learning_rate": 0.0002980398791766723,
"loss": 0.6944,
"step": 61
},
{
"epoch": 0.10524082325482707,
"grad_norm": 0.18511152267456055,
"learning_rate": 0.00029796243429194575,
"loss": 0.4977,
"step": 62
},
{
"epoch": 0.10693825588796944,
"grad_norm": 0.745172917842865,
"learning_rate": 0.00029788349942714854,
"loss": 0.4391,
"step": 63
},
{
"epoch": 0.10863568852111181,
"grad_norm": 0.27929672598838806,
"learning_rate": 0.0002978030753771539,
"loss": 0.8052,
"step": 64
},
{
"epoch": 0.11033312115425419,
"grad_norm": 0.3147849440574646,
"learning_rate": 0.0002977211629518312,
"loss": 1.0817,
"step": 65
},
{
"epoch": 0.11203055378739657,
"grad_norm": 0.22278442978858948,
"learning_rate": 0.00029763776297603756,
"loss": 0.5924,
"step": 66
},
{
"epoch": 0.11372798642053894,
"grad_norm": 0.1911861002445221,
"learning_rate": 0.0002975528762896098,
"loss": 0.3729,
"step": 67
},
{
"epoch": 0.11542541905368131,
"grad_norm": 0.21224386990070343,
"learning_rate": 0.000297466503747356,
"loss": 0.569,
"step": 68
},
{
"epoch": 0.11712285168682368,
"grad_norm": 0.25374075770378113,
"learning_rate": 0.0002973786462190466,
"loss": 0.8077,
"step": 69
},
{
"epoch": 0.11882028431996605,
"grad_norm": 0.16218560934066772,
"learning_rate": 0.00029728930458940595,
"loss": 0.3303,
"step": 70
},
{
"epoch": 0.12051771695310842,
"grad_norm": 0.18142607808113098,
"learning_rate": 0.0002971984797581034,
"loss": 0.4428,
"step": 71
},
{
"epoch": 0.12221514958625079,
"grad_norm": 0.22838933765888214,
"learning_rate": 0.0002971061726397438,
"loss": 0.6141,
"step": 72
},
{
"epoch": 0.12391258221939316,
"grad_norm": 0.16390271484851837,
"learning_rate": 0.00029701238416385896,
"loss": 0.3882,
"step": 73
},
{
"epoch": 0.12561001485253553,
"grad_norm": 0.07758953422307968,
"learning_rate": 0.00029691711527489776,
"loss": 0.0949,
"step": 74
},
{
"epoch": 0.1273074474856779,
"grad_norm": 0.10925949364900589,
"learning_rate": 0.0002968203669322168,
"loss": 0.1791,
"step": 75
},
{
"epoch": 0.12900488011882028,
"grad_norm": 0.021430950611829758,
"learning_rate": 0.0002967221401100708,
"loss": 0.003,
"step": 76
},
{
"epoch": 0.13070231275196265,
"grad_norm": 0.02433953993022442,
"learning_rate": 0.0002966224357976029,
"loss": 0.0021,
"step": 77
},
{
"epoch": 0.13239974538510502,
"grad_norm": 0.11294607818126678,
"learning_rate": 0.0002965212549988342,
"loss": 0.0765,
"step": 78
},
{
"epoch": 0.1340971780182474,
"grad_norm": 0.026264643296599388,
"learning_rate": 0.0002964185987326545,
"loss": 0.0026,
"step": 79
},
{
"epoch": 0.13579461065138979,
"grad_norm": 0.0762176588177681,
"learning_rate": 0.00029631446803281107,
"loss": 0.0394,
"step": 80
},
{
"epoch": 0.13749204328453216,
"grad_norm": 0.03476507589221001,
"learning_rate": 0.00029620886394789885,
"loss": 0.005,
"step": 81
},
{
"epoch": 0.13918947591767453,
"grad_norm": 0.03176519274711609,
"learning_rate": 0.00029610178754135,
"loss": 0.0038,
"step": 82
},
{
"epoch": 0.1408869085508169,
"grad_norm": 0.006855768151581287,
"learning_rate": 0.00029599323989142263,
"loss": 0.0004,
"step": 83
},
{
"epoch": 0.14258434118395927,
"grad_norm": 0.013548546470701694,
"learning_rate": 0.00029588322209119037,
"loss": 0.0007,
"step": 84
},
{
"epoch": 0.14428177381710164,
"grad_norm": 0.032073307782411575,
"learning_rate": 0.00029577173524853123,
"loss": 0.0023,
"step": 85
},
{
"epoch": 0.145979206450244,
"grad_norm": 0.06568052619695663,
"learning_rate": 0.0002956587804861165,
"loss": 0.003,
"step": 86
},
{
"epoch": 0.14767663908338638,
"grad_norm": 0.017053432762622833,
"learning_rate": 0.0002955443589413994,
"loss": 0.0011,
"step": 87
},
{
"epoch": 0.14937407171652875,
"grad_norm": 0.027703823521733284,
"learning_rate": 0.0002954284717666036,
"loss": 0.0011,
"step": 88
},
{
"epoch": 0.15107150434967112,
"grad_norm": 0.028447195887565613,
"learning_rate": 0.00029531112012871175,
"loss": 0.002,
"step": 89
},
{
"epoch": 0.1527689369828135,
"grad_norm": 0.051351480185985565,
"learning_rate": 0.0002951923052094534,
"loss": 0.0023,
"step": 90
},
{
"epoch": 0.15446636961595586,
"grad_norm": 0.04188869521021843,
"learning_rate": 0.0002950720282052936,
"loss": 0.0022,
"step": 91
},
{
"epoch": 0.15616380224909823,
"grad_norm": 0.03857725113630295,
"learning_rate": 0.00029495029032742024,
"loss": 0.0013,
"step": 92
},
{
"epoch": 0.1578612348822406,
"grad_norm": 0.030291402712464333,
"learning_rate": 0.0002948270928017326,
"loss": 0.0015,
"step": 93
},
{
"epoch": 0.15955866751538297,
"grad_norm": 0.05515044555068016,
"learning_rate": 0.00029470243686882837,
"loss": 0.0008,
"step": 94
},
{
"epoch": 0.16125610014852534,
"grad_norm": 0.0681704506278038,
"learning_rate": 0.00029457632378399127,
"loss": 0.0016,
"step": 95
},
{
"epoch": 0.16295353278166771,
"grad_norm": 0.17659763991832733,
"learning_rate": 0.0002944487548171788,
"loss": 0.0015,
"step": 96
},
{
"epoch": 0.1646509654148101,
"grad_norm": 0.01778286136686802,
"learning_rate": 0.00029431973125300907,
"loss": 0.0003,
"step": 97
},
{
"epoch": 0.16634839804795248,
"grad_norm": 0.052063290029764175,
"learning_rate": 0.0002941892543907478,
"loss": 0.0042,
"step": 98
},
{
"epoch": 0.16804583068109485,
"grad_norm": 0.08560975641012192,
"learning_rate": 0.00029405732554429564,
"loss": 0.003,
"step": 99
},
{
"epoch": 0.16974326331423722,
"grad_norm": 0.07182486355304718,
"learning_rate": 0.0002939239460421746,
"loss": 0.0091,
"step": 100
},
{
"epoch": 0.16974326331423722,
"eval_loss": 0.4275517165660858,
"eval_runtime": 65.6527,
"eval_samples_per_second": 2.94,
"eval_steps_per_second": 2.94,
"step": 100
},
{
"epoch": 0.1714406959473796,
"grad_norm": 0.33816632628440857,
"learning_rate": 0.0002937891172275147,
"loss": 0.6861,
"step": 101
},
{
"epoch": 0.17313812858052197,
"grad_norm": 0.24974839389324188,
"learning_rate": 0.00029365284045804077,
"loss": 0.7581,
"step": 102
},
{
"epoch": 0.17483556121366434,
"grad_norm": 0.389892041683197,
"learning_rate": 0.00029351511710605825,
"loss": 0.8517,
"step": 103
},
{
"epoch": 0.1765329938468067,
"grad_norm": 0.21865390241146088,
"learning_rate": 0.0002933759485584397,
"loss": 0.7927,
"step": 104
},
{
"epoch": 0.17823042647994908,
"grad_norm": 0.2867435812950134,
"learning_rate": 0.00029323533621661106,
"loss": 0.8931,
"step": 105
},
{
"epoch": 0.17992785911309145,
"grad_norm": 0.19504129886627197,
"learning_rate": 0.0002930932814965369,
"loss": 0.5056,
"step": 106
},
{
"epoch": 0.18162529174623382,
"grad_norm": 0.2704707980155945,
"learning_rate": 0.00029294978582870666,
"loss": 0.7927,
"step": 107
},
{
"epoch": 0.1833227243793762,
"grad_norm": 0.20052890479564667,
"learning_rate": 0.0002928048506581202,
"loss": 0.6507,
"step": 108
},
{
"epoch": 0.18502015701251856,
"grad_norm": 0.20652854442596436,
"learning_rate": 0.00029265847744427303,
"loss": 0.6928,
"step": 109
},
{
"epoch": 0.18671758964566093,
"grad_norm": 0.2864634096622467,
"learning_rate": 0.00029251066766114176,
"loss": 1.0405,
"step": 110
},
{
"epoch": 0.1884150222788033,
"grad_norm": 0.2000068873167038,
"learning_rate": 0.0002923614227971694,
"loss": 0.5787,
"step": 111
},
{
"epoch": 0.19011245491194567,
"grad_norm": 0.16141685843467712,
"learning_rate": 0.0002922107443552499,
"loss": 0.4154,
"step": 112
},
{
"epoch": 0.19180988754508804,
"grad_norm": 0.2669724225997925,
"learning_rate": 0.00029205863385271363,
"loss": 0.9767,
"step": 113
},
{
"epoch": 0.1935073201782304,
"grad_norm": 0.7105275988578796,
"learning_rate": 0.00029190509282131153,
"loss": 0.6763,
"step": 114
},
{
"epoch": 0.1952047528113728,
"grad_norm": 0.23595799505710602,
"learning_rate": 0.00029175012280720024,
"loss": 0.6856,
"step": 115
},
{
"epoch": 0.19690218544451518,
"grad_norm": 0.23921440541744232,
"learning_rate": 0.00029159372537092596,
"loss": 0.7075,
"step": 116
},
{
"epoch": 0.19859961807765755,
"grad_norm": 0.21551960706710815,
"learning_rate": 0.0002914359020874092,
"loss": 0.6004,
"step": 117
},
{
"epoch": 0.20029705071079992,
"grad_norm": 0.20390333235263824,
"learning_rate": 0.0002912766545459287,
"loss": 0.57,
"step": 118
},
{
"epoch": 0.2019944833439423,
"grad_norm": 0.15612931549549103,
"learning_rate": 0.0002911159843501053,
"loss": 0.2055,
"step": 119
},
{
"epoch": 0.20369191597708466,
"grad_norm": 0.17460142076015472,
"learning_rate": 0.0002909538931178862,
"loss": 0.371,
"step": 120
},
{
"epoch": 0.20538934861022703,
"grad_norm": 0.18853937089443207,
"learning_rate": 0.00029079038248152835,
"loss": 0.4252,
"step": 121
},
{
"epoch": 0.2070867812433694,
"grad_norm": 0.14426594972610474,
"learning_rate": 0.0002906254540875819,
"loss": 0.2658,
"step": 122
},
{
"epoch": 0.20878421387651178,
"grad_norm": 0.16134832799434662,
"learning_rate": 0.0002904591095968741,
"loss": 0.297,
"step": 123
},
{
"epoch": 0.21048164650965415,
"grad_norm": 0.3797933757305145,
"learning_rate": 0.00029029135068449195,
"loss": 0.2837,
"step": 124
},
{
"epoch": 0.21217907914279652,
"grad_norm": 0.038109440356492996,
"learning_rate": 0.000290122179039766,
"loss": 0.0149,
"step": 125
},
{
"epoch": 0.2138765117759389,
"grad_norm": 0.11762002855539322,
"learning_rate": 0.00028995159636625276,
"loss": 0.0684,
"step": 126
},
{
"epoch": 0.21557394440908126,
"grad_norm": 0.02665620669722557,
"learning_rate": 0.00028977960438171784,
"loss": 0.0044,
"step": 127
},
{
"epoch": 0.21727137704222363,
"grad_norm": 0.03071051463484764,
"learning_rate": 0.0002896062048181186,
"loss": 0.0018,
"step": 128
},
{
"epoch": 0.218968809675366,
"grad_norm": 0.009578248485922813,
"learning_rate": 0.00028943139942158683,
"loss": 0.0004,
"step": 129
},
{
"epoch": 0.22066624230850837,
"grad_norm": 0.0018459237180650234,
"learning_rate": 0.0002892551899524109,
"loss": 0.0001,
"step": 130
},
{
"epoch": 0.22236367494165074,
"grad_norm": 0.09641125798225403,
"learning_rate": 0.0002890775781850181,
"loss": 0.0588,
"step": 131
},
{
"epoch": 0.22406110757479314,
"grad_norm": 0.008955306373536587,
"learning_rate": 0.000288898565907957,
"loss": 0.0002,
"step": 132
},
{
"epoch": 0.2257585402079355,
"grad_norm": 0.4112979471683502,
"learning_rate": 0.0002887181549238793,
"loss": 0.0037,
"step": 133
},
{
"epoch": 0.22745597284107788,
"grad_norm": 0.0008860170491971076,
"learning_rate": 0.00028853634704952165,
"loss": 0.0001,
"step": 134
},
{
"epoch": 0.22915340547422025,
"grad_norm": 0.03983623534440994,
"learning_rate": 0.0002883531441156872,
"loss": 0.0014,
"step": 135
},
{
"epoch": 0.23085083810736262,
"grad_norm": 0.13147315382957458,
"learning_rate": 0.00028816854796722747,
"loss": 0.0039,
"step": 136
},
{
"epoch": 0.232548270740505,
"grad_norm": 0.027041589841246605,
"learning_rate": 0.00028798256046302375,
"loss": 0.003,
"step": 137
},
{
"epoch": 0.23424570337364736,
"grad_norm": 0.012492002919316292,
"learning_rate": 0.000287795183475968,
"loss": 0.0006,
"step": 138
},
{
"epoch": 0.23594313600678973,
"grad_norm": 0.004202733281999826,
"learning_rate": 0.00028760641889294446,
"loss": 0.0002,
"step": 139
},
{
"epoch": 0.2376405686399321,
"grad_norm": 0.004685190040618181,
"learning_rate": 0.0002874162686148104,
"loss": 0.0003,
"step": 140
},
{
"epoch": 0.23933800127307447,
"grad_norm": 0.009362437762320042,
"learning_rate": 0.000287224734556377,
"loss": 0.0007,
"step": 141
},
{
"epoch": 0.24103543390621684,
"grad_norm": 0.0981183648109436,
"learning_rate": 0.0002870318186463901,
"loss": 0.0042,
"step": 142
},
{
"epoch": 0.24273286653935922,
"grad_norm": 0.015274147503077984,
"learning_rate": 0.00028683752282751074,
"loss": 0.0007,
"step": 143
},
{
"epoch": 0.24443029917250159,
"grad_norm": 0.00900458823889494,
"learning_rate": 0.0002866418490562957,
"loss": 0.0007,
"step": 144
},
{
"epoch": 0.24612773180564396,
"grad_norm": 0.01067203190177679,
"learning_rate": 0.00028644479930317775,
"loss": 0.0006,
"step": 145
},
{
"epoch": 0.24782516443878633,
"grad_norm": 0.022083261981606483,
"learning_rate": 0.0002862463755524455,
"loss": 0.0006,
"step": 146
},
{
"epoch": 0.2495225970719287,
"grad_norm": 0.003914229571819305,
"learning_rate": 0.00028604657980222417,
"loss": 0.0003,
"step": 147
},
{
"epoch": 0.25122002970507107,
"grad_norm": 0.005544353276491165,
"learning_rate": 0.0002858454140644546,
"loss": 0.0004,
"step": 148
},
{
"epoch": 0.25291746233821344,
"grad_norm": 0.006152280140668154,
"learning_rate": 0.00028564288036487357,
"loss": 0.0005,
"step": 149
},
{
"epoch": 0.2546148949713558,
"grad_norm": 0.04355669021606445,
"learning_rate": 0.00028543898074299317,
"loss": 0.0053,
"step": 150
},
{
"epoch": 0.2546148949713558,
"eval_loss": 0.42699772119522095,
"eval_runtime": 65.6214,
"eval_samples_per_second": 2.941,
"eval_steps_per_second": 2.941,
"step": 150
},
{
"epoch": 0.2563123276044982,
"grad_norm": 0.33021700382232666,
"learning_rate": 0.00028523371725208035,
"loss": 0.7046,
"step": 151
},
{
"epoch": 0.25800976023764055,
"grad_norm": 0.29715627431869507,
"learning_rate": 0.00028502709195913614,
"loss": 0.8173,
"step": 152
},
{
"epoch": 0.2597071928707829,
"grad_norm": 0.23798537254333496,
"learning_rate": 0.000284819106944875,
"loss": 0.8376,
"step": 153
},
{
"epoch": 0.2614046255039253,
"grad_norm": 0.24740736186504364,
"learning_rate": 0.0002846097643037037,
"loss": 0.8829,
"step": 154
},
{
"epoch": 0.26310205813706766,
"grad_norm": 0.8396446704864502,
"learning_rate": 0.00028439906614370034,
"loss": 0.8286,
"step": 155
},
{
"epoch": 0.26479949077021003,
"grad_norm": 0.271604061126709,
"learning_rate": 0.00028418701458659304,
"loss": 0.8996,
"step": 156
},
{
"epoch": 0.2664969234033524,
"grad_norm": 0.2267700582742691,
"learning_rate": 0.00028397361176773855,
"loss": 0.6639,
"step": 157
},
{
"epoch": 0.2681943560364948,
"grad_norm": 0.19427096843719482,
"learning_rate": 0.000283758859836101,
"loss": 0.4771,
"step": 158
},
{
"epoch": 0.2698917886696372,
"grad_norm": 0.1682572066783905,
"learning_rate": 0.0002835427609542298,
"loss": 0.407,
"step": 159
},
{
"epoch": 0.27158922130277957,
"grad_norm": 0.22453951835632324,
"learning_rate": 0.0002833253172982385,
"loss": 0.6087,
"step": 160
},
{
"epoch": 0.27328665393592194,
"grad_norm": 0.2826938331127167,
"learning_rate": 0.00028310653105778215,
"loss": 0.8161,
"step": 161
},
{
"epoch": 0.2749840865690643,
"grad_norm": 0.26969850063323975,
"learning_rate": 0.0002828864044360358,
"loss": 0.9043,
"step": 162
},
{
"epoch": 0.2766815192022067,
"grad_norm": 0.2145734280347824,
"learning_rate": 0.0002826649396496721,
"loss": 0.5051,
"step": 163
},
{
"epoch": 0.27837895183534905,
"grad_norm": 0.22222040593624115,
"learning_rate": 0.000282442138928839,
"loss": 0.6356,
"step": 164
},
{
"epoch": 0.2800763844684914,
"grad_norm": 0.1878889799118042,
"learning_rate": 0.0002822180045171373,
"loss": 0.4778,
"step": 165
},
{
"epoch": 0.2817738171016338,
"grad_norm": 0.1969398856163025,
"learning_rate": 0.00028199253867159795,
"loss": 0.4855,
"step": 166
},
{
"epoch": 0.28347124973477617,
"grad_norm": 0.11540260910987854,
"learning_rate": 0.0002817657436626596,
"loss": 0.1777,
"step": 167
},
{
"epoch": 0.28516868236791854,
"grad_norm": 0.14375105500221252,
"learning_rate": 0.0002815376217741454,
"loss": 0.3379,
"step": 168
},
{
"epoch": 0.2868661150010609,
"grad_norm": 0.4472449719905853,
"learning_rate": 0.0002813081753032403,
"loss": 0.5727,
"step": 169
},
{
"epoch": 0.2885635476342033,
"grad_norm": 0.24242308735847473,
"learning_rate": 0.0002810774065604677,
"loss": 0.3351,
"step": 170
},
{
"epoch": 0.29026098026734565,
"grad_norm": 0.13647812604904175,
"learning_rate": 0.0002808453178696663,
"loss": 0.2388,
"step": 171
},
{
"epoch": 0.291958412900488,
"grad_norm": 0.13304531574249268,
"learning_rate": 0.00028061191156796656,
"loss": 0.2208,
"step": 172
},
{
"epoch": 0.2936558455336304,
"grad_norm": 0.13337798416614532,
"learning_rate": 0.0002803771900057674,
"loss": 0.1881,
"step": 173
},
{
"epoch": 0.29535327816677276,
"grad_norm": 0.12482510507106781,
"learning_rate": 0.00028014115554671236,
"loss": 0.1659,
"step": 174
},
{
"epoch": 0.29705071079991513,
"grad_norm": 0.05092178285121918,
"learning_rate": 0.0002799038105676658,
"loss": 0.0022,
"step": 175
},
{
"epoch": 0.2987481434330575,
"grad_norm": 0.03115926869213581,
"learning_rate": 0.000279665157458689,
"loss": 0.007,
"step": 176
},
{
"epoch": 0.30044557606619987,
"grad_norm": 0.048752959817647934,
"learning_rate": 0.0002794251986230161,
"loss": 0.0231,
"step": 177
},
{
"epoch": 0.30214300869934224,
"grad_norm": 0.049951329827308655,
"learning_rate": 0.0002791839364770301,
"loss": 0.024,
"step": 178
},
{
"epoch": 0.3038404413324846,
"grad_norm": 0.019635546952486038,
"learning_rate": 0.00027894137345023785,
"loss": 0.0009,
"step": 179
},
{
"epoch": 0.305537873965627,
"grad_norm": 0.008390502072870731,
"learning_rate": 0.0002786975119852465,
"loss": 0.0002,
"step": 180
},
{
"epoch": 0.30723530659876935,
"grad_norm": 0.030752401798963547,
"learning_rate": 0.00027845235453773836,
"loss": 0.0012,
"step": 181
},
{
"epoch": 0.3089327392319117,
"grad_norm": 0.004207144025713205,
"learning_rate": 0.000278205903576446,
"loss": 0.0001,
"step": 182
},
{
"epoch": 0.3106301718650541,
"grad_norm": 0.012170841917395592,
"learning_rate": 0.00027795816158312803,
"loss": 0.0004,
"step": 183
},
{
"epoch": 0.31232760449819646,
"grad_norm": 0.004860470537096262,
"learning_rate": 0.0002777091310525435,
"loss": 0.0002,
"step": 184
},
{
"epoch": 0.31402503713133884,
"grad_norm": 0.004869968164712191,
"learning_rate": 0.00027745881449242713,
"loss": 0.0002,
"step": 185
},
{
"epoch": 0.3157224697644812,
"grad_norm": 0.011595932766795158,
"learning_rate": 0.00027720721442346387,
"loss": 0.0003,
"step": 186
},
{
"epoch": 0.3174199023976236,
"grad_norm": 0.02118592895567417,
"learning_rate": 0.0002769543333792636,
"loss": 0.0012,
"step": 187
},
{
"epoch": 0.31911733503076595,
"grad_norm": 0.03935430571436882,
"learning_rate": 0.00027670017390633573,
"loss": 0.0003,
"step": 188
},
{
"epoch": 0.3208147676639083,
"grad_norm": 0.0356944277882576,
"learning_rate": 0.0002764447385640632,
"loss": 0.0008,
"step": 189
},
{
"epoch": 0.3225122002970507,
"grad_norm": 0.05277875065803528,
"learning_rate": 0.0002761880299246772,
"loss": 0.0007,
"step": 190
},
{
"epoch": 0.32420963293019306,
"grad_norm": 0.0025087774265557528,
"learning_rate": 0.0002759300505732307,
"loss": 0.0001,
"step": 191
},
{
"epoch": 0.32590706556333543,
"grad_norm": 0.002132557798177004,
"learning_rate": 0.00027567080310757306,
"loss": 0.0001,
"step": 192
},
{
"epoch": 0.3276044981964778,
"grad_norm": 0.0010882457718253136,
"learning_rate": 0.0002754102901383233,
"loss": 0.0001,
"step": 193
},
{
"epoch": 0.3293019308296202,
"grad_norm": 0.02085525542497635,
"learning_rate": 0.0002751485142888443,
"loss": 0.0004,
"step": 194
},
{
"epoch": 0.3309993634627626,
"grad_norm": 0.003921423573046923,
"learning_rate": 0.0002748854781952157,
"loss": 0.0001,
"step": 195
},
{
"epoch": 0.33269679609590497,
"grad_norm": 0.05576641112565994,
"learning_rate": 0.0002746211845062082,
"loss": 0.0027,
"step": 196
},
{
"epoch": 0.33439422872904734,
"grad_norm": 0.025059282779693604,
"learning_rate": 0.0002743556358832562,
"loss": 0.0009,
"step": 197
},
{
"epoch": 0.3360916613621897,
"grad_norm": 0.14736546576023102,
"learning_rate": 0.00027408883500043156,
"loss": 0.0117,
"step": 198
},
{
"epoch": 0.3377890939953321,
"grad_norm": 0.022192303091287613,
"learning_rate": 0.00027382078454441606,
"loss": 0.0005,
"step": 199
},
{
"epoch": 0.33948652662847445,
"grad_norm": 0.005171961151063442,
"learning_rate": 0.0002735514872144749,
"loss": 0.0002,
"step": 200
},
{
"epoch": 0.33948652662847445,
"eval_loss": 0.42559075355529785,
"eval_runtime": 65.6151,
"eval_samples_per_second": 2.941,
"eval_steps_per_second": 2.941,
"step": 200
},
{
"epoch": 0.3411839592616168,
"grad_norm": 0.5887985825538635,
"learning_rate": 0.0002732809457224292,
"loss": 0.9434,
"step": 201
},
{
"epoch": 0.3428813918947592,
"grad_norm": 0.38302966952323914,
"learning_rate": 0.00027300916279262866,
"loss": 1.0988,
"step": 202
},
{
"epoch": 0.34457882452790156,
"grad_norm": 0.2614414095878601,
"learning_rate": 0.0002727361411619245,
"loss": 0.6772,
"step": 203
},
{
"epoch": 0.34627625716104393,
"grad_norm": 0.3063081204891205,
"learning_rate": 0.0002724618835796414,
"loss": 0.7314,
"step": 204
},
{
"epoch": 0.3479736897941863,
"grad_norm": 0.18658895790576935,
"learning_rate": 0.0002721863928075503,
"loss": 0.514,
"step": 205
},
{
"epoch": 0.3496711224273287,
"grad_norm": 0.282010018825531,
"learning_rate": 0.0002719096716198402,
"loss": 0.892,
"step": 206
},
{
"epoch": 0.35136855506047104,
"grad_norm": 0.17541489005088806,
"learning_rate": 0.00027163172280309026,
"loss": 0.5047,
"step": 207
},
{
"epoch": 0.3530659876936134,
"grad_norm": 0.21916832029819489,
"learning_rate": 0.0002713525491562421,
"loss": 0.7146,
"step": 208
},
{
"epoch": 0.3547634203267558,
"grad_norm": 0.20561501383781433,
"learning_rate": 0.0002710721534905712,
"loss": 0.6104,
"step": 209
},
{
"epoch": 0.35646085295989816,
"grad_norm": 0.1947142332792282,
"learning_rate": 0.00027079053862965875,
"loss": 0.4924,
"step": 210
},
{
"epoch": 0.3581582855930405,
"grad_norm": 0.23798146843910217,
"learning_rate": 0.00027050770740936336,
"loss": 0.6153,
"step": 211
},
{
"epoch": 0.3598557182261829,
"grad_norm": 0.21775560081005096,
"learning_rate": 0.00027022366267779224,
"loss": 0.4658,
"step": 212
},
{
"epoch": 0.36155315085932527,
"grad_norm": 0.24994409084320068,
"learning_rate": 0.0002699384072952727,
"loss": 0.5979,
"step": 213
},
{
"epoch": 0.36325058349246764,
"grad_norm": 0.28469640016555786,
"learning_rate": 0.0002696519441343233,
"loss": 0.8796,
"step": 214
},
{
"epoch": 0.36494801612561,
"grad_norm": 0.2747570276260376,
"learning_rate": 0.0002693642760796248,
"loss": 0.8625,
"step": 215
},
{
"epoch": 0.3666454487587524,
"grad_norm": 0.2469591647386551,
"learning_rate": 0.00026907540602799136,
"loss": 0.6991,
"step": 216
},
{
"epoch": 0.36834288139189475,
"grad_norm": 0.20425763726234436,
"learning_rate": 0.00026878533688834123,
"loss": 0.5774,
"step": 217
},
{
"epoch": 0.3700403140250371,
"grad_norm": 0.2737872898578644,
"learning_rate": 0.0002684940715816674,
"loss": 0.9062,
"step": 218
},
{
"epoch": 0.3717377466581795,
"grad_norm": 0.2064121514558792,
"learning_rate": 0.00026820161304100823,
"loss": 0.5054,
"step": 219
},
{
"epoch": 0.37343517929132186,
"grad_norm": 0.14863868057727814,
"learning_rate": 0.00026790796421141813,
"loss": 0.285,
"step": 220
},
{
"epoch": 0.37513261192446423,
"grad_norm": 0.12498918920755386,
"learning_rate": 0.00026761312804993734,
"loss": 0.1999,
"step": 221
},
{
"epoch": 0.3768300445576066,
"grad_norm": 0.1726280152797699,
"learning_rate": 0.0002673171075255629,
"loss": 0.2852,
"step": 222
},
{
"epoch": 0.378527477190749,
"grad_norm": 0.1533537358045578,
"learning_rate": 0.0002670199056192181,
"loss": 0.3106,
"step": 223
},
{
"epoch": 0.38022490982389134,
"grad_norm": 0.125217467546463,
"learning_rate": 0.00026672152532372287,
"loss": 0.1804,
"step": 224
},
{
"epoch": 0.3819223424570337,
"grad_norm": 0.05522383376955986,
"learning_rate": 0.0002664219696437635,
"loss": 0.0442,
"step": 225
},
{
"epoch": 0.3836197750901761,
"grad_norm": 0.04138198867440224,
"learning_rate": 0.00026612124159586237,
"loss": 0.023,
"step": 226
},
{
"epoch": 0.38531720772331846,
"grad_norm": 0.05575822666287422,
"learning_rate": 0.0002658193442083475,
"loss": 0.0024,
"step": 227
},
{
"epoch": 0.3870146403564608,
"grad_norm": 0.12629126012325287,
"learning_rate": 0.0002655162805213223,
"loss": 0.1524,
"step": 228
},
{
"epoch": 0.38871207298960325,
"grad_norm": 0.02942221239209175,
"learning_rate": 0.00026521205358663477,
"loss": 0.0096,
"step": 229
},
{
"epoch": 0.3904095056227456,
"grad_norm": 0.0953650251030922,
"learning_rate": 0.00026490666646784665,
"loss": 0.0043,
"step": 230
},
{
"epoch": 0.392106938255888,
"grad_norm": 0.005734459031373262,
"learning_rate": 0.00026460012224020297,
"loss": 0.0003,
"step": 231
},
{
"epoch": 0.39380437088903036,
"grad_norm": 0.010758363641798496,
"learning_rate": 0.0002642924239906006,
"loss": 0.0003,
"step": 232
},
{
"epoch": 0.39550180352217273,
"grad_norm": 0.01772010512650013,
"learning_rate": 0.0002639835748175575,
"loss": 0.0007,
"step": 233
},
{
"epoch": 0.3971992361553151,
"grad_norm": 0.005056055262684822,
"learning_rate": 0.0002636735778311815,
"loss": 0.0002,
"step": 234
},
{
"epoch": 0.3988966687884575,
"grad_norm": 0.24263891577720642,
"learning_rate": 0.00026336243615313873,
"loss": 0.0008,
"step": 235
},
{
"epoch": 0.40059410142159985,
"grad_norm": 0.0014849180588498712,
"learning_rate": 0.0002630501529166224,
"loss": 0.0001,
"step": 236
},
{
"epoch": 0.4022915340547422,
"grad_norm": 0.0037826071493327618,
"learning_rate": 0.00026273673126632133,
"loss": 0.0002,
"step": 237
},
{
"epoch": 0.4039889666878846,
"grad_norm": 0.08331254124641418,
"learning_rate": 0.0002624221743583881,
"loss": 0.0016,
"step": 238
},
{
"epoch": 0.40568639932102696,
"grad_norm": 0.002364553976804018,
"learning_rate": 0.0002621064853604071,
"loss": 0.0001,
"step": 239
},
{
"epoch": 0.40738383195416933,
"grad_norm": 0.014542756602168083,
"learning_rate": 0.0002617896674513632,
"loss": 0.0002,
"step": 240
},
{
"epoch": 0.4090812645873117,
"grad_norm": 0.0031418628059327602,
"learning_rate": 0.00026147172382160914,
"loss": 0.0001,
"step": 241
},
{
"epoch": 0.41077869722045407,
"grad_norm": 0.11094752699136734,
"learning_rate": 0.00026115265767283374,
"loss": 0.0031,
"step": 242
},
{
"epoch": 0.41247612985359644,
"grad_norm": 0.012769564054906368,
"learning_rate": 0.0002608324722180296,
"loss": 0.0005,
"step": 243
},
{
"epoch": 0.4141735624867388,
"grad_norm": 0.055052801966667175,
"learning_rate": 0.0002605111706814607,
"loss": 0.0023,
"step": 244
},
{
"epoch": 0.4158709951198812,
"grad_norm": 0.003668338293209672,
"learning_rate": 0.00026018875629862996,
"loss": 0.0002,
"step": 245
},
{
"epoch": 0.41756842775302355,
"grad_norm": 0.009973675012588501,
"learning_rate": 0.0002598652323162466,
"loss": 0.0003,
"step": 246
},
{
"epoch": 0.4192658603861659,
"grad_norm": 0.02005830593407154,
"learning_rate": 0.0002595406019921936,
"loss": 0.0008,
"step": 247
},
{
"epoch": 0.4209632930193083,
"grad_norm": 0.02860446274280548,
"learning_rate": 0.0002592148685954946,
"loss": 0.0024,
"step": 248
},
{
"epoch": 0.42266072565245066,
"grad_norm": 0.03582284599542618,
"learning_rate": 0.0002588880354062814,
"loss": 0.0014,
"step": 249
},
{
"epoch": 0.42435815828559303,
"grad_norm": 0.03657930716872215,
"learning_rate": 0.0002585601057157605,
"loss": 0.0023,
"step": 250
},
{
"epoch": 0.42435815828559303,
"eval_loss": 0.39342138171195984,
"eval_runtime": 65.6277,
"eval_samples_per_second": 2.941,
"eval_steps_per_second": 2.941,
"step": 250
},
{
"epoch": 0.4260555909187354,
"grad_norm": 0.3402771055698395,
"learning_rate": 0.0002582310828261803,
"loss": 0.955,
"step": 251
},
{
"epoch": 0.4277530235518778,
"grad_norm": 0.2694092392921448,
"learning_rate": 0.00025790097005079764,
"loss": 0.7843,
"step": 252
},
{
"epoch": 0.42945045618502015,
"grad_norm": 0.22484031319618225,
"learning_rate": 0.00025756977071384455,
"loss": 0.6626,
"step": 253
},
{
"epoch": 0.4311478888181625,
"grad_norm": 0.25034627318382263,
"learning_rate": 0.0002572374881504945,
"loss": 0.8865,
"step": 254
},
{
"epoch": 0.4328453214513049,
"grad_norm": 0.25369909405708313,
"learning_rate": 0.00025690412570682946,
"loss": 0.7099,
"step": 255
},
{
"epoch": 0.43454275408444726,
"grad_norm": 0.22795934975147247,
"learning_rate": 0.0002565696867398053,
"loss": 0.7818,
"step": 256
},
{
"epoch": 0.43624018671758963,
"grad_norm": 0.2158069759607315,
"learning_rate": 0.00025623417461721884,
"loss": 0.6434,
"step": 257
},
{
"epoch": 0.437937619350732,
"grad_norm": 0.2332068681716919,
"learning_rate": 0.00025589759271767344,
"loss": 0.8126,
"step": 258
},
{
"epoch": 0.43963505198387437,
"grad_norm": 0.21993213891983032,
"learning_rate": 0.00025555994443054504,
"loss": 0.6689,
"step": 259
},
{
"epoch": 0.44133248461701674,
"grad_norm": 0.26037323474884033,
"learning_rate": 0.0002552212331559482,
"loss": 0.992,
"step": 260
},
{
"epoch": 0.4430299172501591,
"grad_norm": 0.2357717603445053,
"learning_rate": 0.00025488146230470156,
"loss": 0.7212,
"step": 261
},
{
"epoch": 0.4447273498833015,
"grad_norm": 0.22752051055431366,
"learning_rate": 0.00025454063529829405,
"loss": 0.7759,
"step": 262
},
{
"epoch": 0.44642478251644385,
"grad_norm": 0.20641978085041046,
"learning_rate": 0.0002541987555688496,
"loss": 0.6029,
"step": 263
},
{
"epoch": 0.4481222151495863,
"grad_norm": 1.728589415550232,
"learning_rate": 0.0002538558265590934,
"loss": 0.8527,
"step": 264
},
{
"epoch": 0.44981964778272865,
"grad_norm": 0.3176920711994171,
"learning_rate": 0.0002535118517223168,
"loss": 1.0045,
"step": 265
},
{
"epoch": 0.451517080415871,
"grad_norm": 0.22813205420970917,
"learning_rate": 0.00025316683452234254,
"loss": 0.5755,
"step": 266
},
{
"epoch": 0.4532145130490134,
"grad_norm": 0.27417638897895813,
"learning_rate": 0.00025282077843349,
"loss": 0.6442,
"step": 267
},
{
"epoch": 0.45491194568215576,
"grad_norm": 0.23180553317070007,
"learning_rate": 0.00025247368694054017,
"loss": 0.3961,
"step": 268
},
{
"epoch": 0.45660937831529813,
"grad_norm": 0.21716707944869995,
"learning_rate": 0.0002521255635387005,
"loss": 0.5498,
"step": 269
},
{
"epoch": 0.4583068109484405,
"grad_norm": 0.1608789563179016,
"learning_rate": 0.0002517764117335698,
"loss": 0.3229,
"step": 270
},
{
"epoch": 0.4600042435815829,
"grad_norm": 0.06968680769205093,
"learning_rate": 0.00025142623504110286,
"loss": 0.0545,
"step": 271
},
{
"epoch": 0.46170167621472524,
"grad_norm": 0.17753787338733673,
"learning_rate": 0.0002510750369875752,
"loss": 0.3944,
"step": 272
},
{
"epoch": 0.4633991088478676,
"grad_norm": 0.1846492886543274,
"learning_rate": 0.0002507228211095471,
"loss": 0.4219,
"step": 273
},
{
"epoch": 0.46509654148101,
"grad_norm": 0.12200163304805756,
"learning_rate": 0.0002503695909538287,
"loss": 0.1832,
"step": 274
},
{
"epoch": 0.46679397411415235,
"grad_norm": 0.08617426455020905,
"learning_rate": 0.00025001535007744373,
"loss": 0.0833,
"step": 275
},
{
"epoch": 0.4684914067472947,
"grad_norm": 0.10826346278190613,
"learning_rate": 0.0002496601020475938,
"loss": 0.1379,
"step": 276
},
{
"epoch": 0.4701888393804371,
"grad_norm": 0.09130895137786865,
"learning_rate": 0.00024930385044162276,
"loss": 0.0909,
"step": 277
},
{
"epoch": 0.47188627201357947,
"grad_norm": 0.01284122746437788,
"learning_rate": 0.0002489465988469802,
"loss": 0.0011,
"step": 278
},
{
"epoch": 0.47358370464672184,
"grad_norm": 0.03364328294992447,
"learning_rate": 0.0002485883508611858,
"loss": 0.0151,
"step": 279
},
{
"epoch": 0.4752811372798642,
"grad_norm": 0.005700011737644672,
"learning_rate": 0.00024822911009179276,
"loss": 0.0004,
"step": 280
},
{
"epoch": 0.4769785699130066,
"grad_norm": 0.026785628870129585,
"learning_rate": 0.0002478688801563516,
"loss": 0.0022,
"step": 281
},
{
"epoch": 0.47867600254614895,
"grad_norm": 0.014533424749970436,
"learning_rate": 0.00024750766468237387,
"loss": 0.0009,
"step": 282
},
{
"epoch": 0.4803734351792913,
"grad_norm": 0.02565724588930607,
"learning_rate": 0.0002471454673072953,
"loss": 0.0015,
"step": 283
},
{
"epoch": 0.4820708678124337,
"grad_norm": 0.01476586889475584,
"learning_rate": 0.0002467822916784394,
"loss": 0.0007,
"step": 284
},
{
"epoch": 0.48376830044557606,
"grad_norm": 0.0030139784794300795,
"learning_rate": 0.0002464181414529809,
"loss": 0.0001,
"step": 285
},
{
"epoch": 0.48546573307871843,
"grad_norm": 0.013711950741708279,
"learning_rate": 0.00024605302029790836,
"loss": 0.0002,
"step": 286
},
{
"epoch": 0.4871631657118608,
"grad_norm": 0.0045742918737232685,
"learning_rate": 0.00024568693188998776,
"loss": 0.0002,
"step": 287
},
{
"epoch": 0.48886059834500317,
"grad_norm": 0.01617550477385521,
"learning_rate": 0.00024531987991572543,
"loss": 0.0008,
"step": 288
},
{
"epoch": 0.49055803097814554,
"grad_norm": 0.0015017741825431585,
"learning_rate": 0.00024495186807133056,
"loss": 0.0001,
"step": 289
},
{
"epoch": 0.4922554636112879,
"grad_norm": 0.04141293093562126,
"learning_rate": 0.00024458290006267833,
"loss": 0.0021,
"step": 290
},
{
"epoch": 0.4939528962444303,
"grad_norm": 0.03005625680088997,
"learning_rate": 0.0002442129796052726,
"loss": 0.0015,
"step": 291
},
{
"epoch": 0.49565032887757265,
"grad_norm": 0.0004941977094858885,
"learning_rate": 0.00024384211042420822,
"loss": 0.0,
"step": 292
},
{
"epoch": 0.497347761510715,
"grad_norm": 0.0018400073749944568,
"learning_rate": 0.00024347029625413364,
"loss": 0.0001,
"step": 293
},
{
"epoch": 0.4990451941438574,
"grad_norm": 0.023964567109942436,
"learning_rate": 0.00024309754083921354,
"loss": 0.0008,
"step": 294
},
{
"epoch": 0.5007426267769998,
"grad_norm": 0.00808011181652546,
"learning_rate": 0.00024272384793309077,
"loss": 0.0003,
"step": 295
},
{
"epoch": 0.5024400594101421,
"grad_norm": 0.026506319642066956,
"learning_rate": 0.0002423492212988487,
"loss": 0.0011,
"step": 296
},
{
"epoch": 0.5041374920432845,
"grad_norm": 0.0033183887135237455,
"learning_rate": 0.0002419736647089735,
"loss": 0.0001,
"step": 297
},
{
"epoch": 0.5058349246764269,
"grad_norm": 0.0012288711732253432,
"learning_rate": 0.00024159718194531572,
"loss": 0.0001,
"step": 298
},
{
"epoch": 0.5075323573095692,
"grad_norm": 0.001825229381211102,
"learning_rate": 0.00024121977679905266,
"loss": 0.0001,
"step": 299
},
{
"epoch": 0.5092297899427116,
"grad_norm": 0.0013848728267475963,
"learning_rate": 0.00024084145307064997,
"loss": 0.0001,
"step": 300
},
{
"epoch": 0.5092297899427116,
"eval_loss": 0.408105731010437,
"eval_runtime": 65.5854,
"eval_samples_per_second": 2.943,
"eval_steps_per_second": 2.943,
"step": 300
},
{
"epoch": 0.510927222575854,
"grad_norm": 0.6500065922737122,
"learning_rate": 0.0002404622145698234,
"loss": 1.2109,
"step": 301
},
{
"epoch": 0.5126246552089964,
"grad_norm": 0.2953287661075592,
"learning_rate": 0.00024008206511550044,
"loss": 0.8961,
"step": 302
},
{
"epoch": 0.5143220878421387,
"grad_norm": 0.2599642276763916,
"learning_rate": 0.00023970100853578185,
"loss": 0.822,
"step": 303
},
{
"epoch": 0.5160195204752811,
"grad_norm": 0.22075462341308594,
"learning_rate": 0.00023931904866790317,
"loss": 0.7042,
"step": 304
},
{
"epoch": 0.5177169531084235,
"grad_norm": 0.1947249174118042,
"learning_rate": 0.00023893618935819607,
"loss": 0.6038,
"step": 305
},
{
"epoch": 0.5194143857415658,
"grad_norm": 0.1981605589389801,
"learning_rate": 0.00023855243446204946,
"loss": 0.5842,
"step": 306
},
{
"epoch": 0.5211118183747082,
"grad_norm": 0.25752243399620056,
"learning_rate": 0.00023816778784387094,
"loss": 0.7636,
"step": 307
},
{
"epoch": 0.5228092510078506,
"grad_norm": 0.20701764523983002,
"learning_rate": 0.00023778225337704772,
"loss": 0.5711,
"step": 308
},
{
"epoch": 0.524506683640993,
"grad_norm": 0.1890690177679062,
"learning_rate": 0.00023739583494390752,
"loss": 0.5323,
"step": 309
},
{
"epoch": 0.5262041162741353,
"grad_norm": 0.2634682059288025,
"learning_rate": 0.0002370085364356797,
"loss": 0.8929,
"step": 310
},
{
"epoch": 0.5279015489072777,
"grad_norm": 0.21140585839748383,
"learning_rate": 0.00023662036175245595,
"loss": 0.6722,
"step": 311
},
{
"epoch": 0.5295989815404201,
"grad_norm": 0.1461462825536728,
"learning_rate": 0.00023623131480315107,
"loss": 0.3355,
"step": 312
},
{
"epoch": 0.5312964141735624,
"grad_norm": 0.25913017988204956,
"learning_rate": 0.00023584139950546344,
"loss": 0.809,
"step": 313
},
{
"epoch": 0.5329938468067048,
"grad_norm": 0.20216749608516693,
"learning_rate": 0.0002354506197858358,
"loss": 0.522,
"step": 314
},
{
"epoch": 0.5346912794398472,
"grad_norm": 0.19447529315948486,
"learning_rate": 0.00023505897957941556,
"loss": 0.4756,
"step": 315
},
{
"epoch": 0.5363887120729895,
"grad_norm": 0.20887967944145203,
"learning_rate": 0.00023466648283001538,
"loss": 0.5348,
"step": 316
},
{
"epoch": 0.538086144706132,
"grad_norm": 0.2380082756280899,
"learning_rate": 0.000234273133490073,
"loss": 0.6786,
"step": 317
},
{
"epoch": 0.5397835773392744,
"grad_norm": 0.13057811558246613,
"learning_rate": 0.00023387893552061199,
"loss": 0.2262,
"step": 318
},
{
"epoch": 0.5414810099724168,
"grad_norm": 0.19371569156646729,
"learning_rate": 0.00023348389289120158,
"loss": 0.4282,
"step": 319
},
{
"epoch": 0.5431784426055591,
"grad_norm": 0.20126739144325256,
"learning_rate": 0.00023308800957991653,
"loss": 0.4524,
"step": 320
},
{
"epoch": 0.5448758752387015,
"grad_norm": 0.1577330082654953,
"learning_rate": 0.00023269128957329748,
"loss": 0.2979,
"step": 321
},
{
"epoch": 0.5465733078718439,
"grad_norm": 0.1581580936908722,
"learning_rate": 0.0002322937368663105,
"loss": 0.2393,
"step": 322
},
{
"epoch": 0.5482707405049863,
"grad_norm": 0.0836775079369545,
"learning_rate": 0.00023189535546230683,
"loss": 0.0756,
"step": 323
},
{
"epoch": 0.5499681731381286,
"grad_norm": 0.13577239215373993,
"learning_rate": 0.00023149614937298296,
"loss": 0.204,
"step": 324
},
{
"epoch": 0.551665605771271,
"grad_norm": 0.003654525149613619,
"learning_rate": 0.00023109612261833963,
"loss": 0.0001,
"step": 325
},
{
"epoch": 0.5533630384044134,
"grad_norm": 0.0016976917395368218,
"learning_rate": 0.00023069527922664186,
"loss": 0.0001,
"step": 326
},
{
"epoch": 0.5550604710375557,
"grad_norm": 0.006387701723724604,
"learning_rate": 0.00023029362323437818,
"loss": 0.0002,
"step": 327
},
{
"epoch": 0.5567579036706981,
"grad_norm": 0.0010255499510094523,
"learning_rate": 0.00022989115868621995,
"loss": 0.0001,
"step": 328
},
{
"epoch": 0.5584553363038405,
"grad_norm": 0.002511462429538369,
"learning_rate": 0.0002294878896349807,
"loss": 0.0001,
"step": 329
},
{
"epoch": 0.5601527689369828,
"grad_norm": 0.002243687631562352,
"learning_rate": 0.00022908382014157533,
"loss": 0.0001,
"step": 330
},
{
"epoch": 0.5618502015701252,
"grad_norm": 0.0010394121054559946,
"learning_rate": 0.00022867895427497914,
"loss": 0.0,
"step": 331
},
{
"epoch": 0.5635476342032676,
"grad_norm": 0.004921845626085997,
"learning_rate": 0.00022827329611218688,
"loss": 0.0002,
"step": 332
},
{
"epoch": 0.56524506683641,
"grad_norm": 0.006571977864950895,
"learning_rate": 0.0002278668497381718,
"loss": 0.0003,
"step": 333
},
{
"epoch": 0.5669424994695523,
"grad_norm": 0.06410548835992813,
"learning_rate": 0.00022745961924584428,
"loss": 0.0393,
"step": 334
},
{
"epoch": 0.5686399321026947,
"grad_norm": 0.0016587848076596856,
"learning_rate": 0.00022705160873601096,
"loss": 0.0001,
"step": 335
},
{
"epoch": 0.5703373647358371,
"grad_norm": 0.002642609179019928,
"learning_rate": 0.00022664282231733309,
"loss": 0.0001,
"step": 336
},
{
"epoch": 0.5720347973689794,
"grad_norm": 0.09959813952445984,
"learning_rate": 0.00022623326410628534,
"loss": 0.0009,
"step": 337
},
{
"epoch": 0.5737322300021218,
"grad_norm": 0.0017017674399539828,
"learning_rate": 0.00022582293822711444,
"loss": 0.0001,
"step": 338
},
{
"epoch": 0.5754296626352642,
"grad_norm": 0.0015052666421979666,
"learning_rate": 0.00022541184881179737,
"loss": 0.0001,
"step": 339
},
{
"epoch": 0.5771270952684066,
"grad_norm": 0.0034250568132847548,
"learning_rate": 0.000225,
"loss": 0.0001,
"step": 340
},
{
"epoch": 0.5788245279015489,
"grad_norm": 0.004533613566309214,
"learning_rate": 0.0002245873959390353,
"loss": 0.0001,
"step": 341
},
{
"epoch": 0.5805219605346913,
"grad_norm": 0.0014232480898499489,
"learning_rate": 0.00022417404078382152,
"loss": 0.0001,
"step": 342
},
{
"epoch": 0.5822193931678337,
"grad_norm": 0.0018840961856767535,
"learning_rate": 0.00022375993869684058,
"loss": 0.0,
"step": 343
},
{
"epoch": 0.583916825800976,
"grad_norm": 0.0005610976368188858,
"learning_rate": 0.00022334509384809584,
"loss": 0.0,
"step": 344
},
{
"epoch": 0.5856142584341184,
"grad_norm": 0.011308716610074043,
"learning_rate": 0.00022292951041507028,
"loss": 0.0003,
"step": 345
},
{
"epoch": 0.5873116910672608,
"grad_norm": 0.008396640419960022,
"learning_rate": 0.00022251319258268453,
"loss": 0.0001,
"step": 346
},
{
"epoch": 0.5890091237004031,
"grad_norm": 0.016598394140601158,
"learning_rate": 0.00022209614454325459,
"loss": 0.0002,
"step": 347
},
{
"epoch": 0.5907065563335455,
"grad_norm": 0.0011634851107373834,
"learning_rate": 0.00022167837049644947,
"loss": 0.0,
"step": 348
},
{
"epoch": 0.5924039889666879,
"grad_norm": 0.05063653737306595,
"learning_rate": 0.00022125987464924926,
"loss": 0.0006,
"step": 349
},
{
"epoch": 0.5941014215998303,
"grad_norm": 0.030548613518476486,
"learning_rate": 0.0002208406612159024,
"loss": 0.0005,
"step": 350
},
{
"epoch": 0.5941014215998303,
"eval_loss": 0.403373122215271,
"eval_runtime": 65.5747,
"eval_samples_per_second": 2.943,
"eval_steps_per_second": 2.943,
"step": 350
}
],
"logging_steps": 1,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 2
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.2671983590244352e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}