prxy5605's picture
Training in progress, step 200, checkpoint
6df9989 verified
{
"best_metric": 1.9610345363616943,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.013199142055766375,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.599571027883188e-05,
"grad_norm": 0.2628660798072815,
"learning_rate": 1e-05,
"loss": 2.1505,
"step": 1
},
{
"epoch": 6.599571027883188e-05,
"eval_loss": 2.6243069171905518,
"eval_runtime": 456.5465,
"eval_samples_per_second": 55.898,
"eval_steps_per_second": 13.974,
"step": 1
},
{
"epoch": 0.00013199142055766376,
"grad_norm": 0.23186242580413818,
"learning_rate": 2e-05,
"loss": 2.081,
"step": 2
},
{
"epoch": 0.00019798713083649564,
"grad_norm": 0.2530168890953064,
"learning_rate": 3e-05,
"loss": 2.1339,
"step": 3
},
{
"epoch": 0.0002639828411153275,
"grad_norm": 0.23823846876621246,
"learning_rate": 4e-05,
"loss": 2.0768,
"step": 4
},
{
"epoch": 0.00032997855139415937,
"grad_norm": 0.220711350440979,
"learning_rate": 5e-05,
"loss": 2.0971,
"step": 5
},
{
"epoch": 0.00039597426167299127,
"grad_norm": 0.2290567308664322,
"learning_rate": 6e-05,
"loss": 2.1363,
"step": 6
},
{
"epoch": 0.0004619699719518231,
"grad_norm": 0.20521821081638336,
"learning_rate": 7e-05,
"loss": 2.1224,
"step": 7
},
{
"epoch": 0.000527965682230655,
"grad_norm": 0.17055945098400116,
"learning_rate": 8e-05,
"loss": 2.162,
"step": 8
},
{
"epoch": 0.0005939613925094869,
"grad_norm": 0.15676742792129517,
"learning_rate": 9e-05,
"loss": 2.1547,
"step": 9
},
{
"epoch": 0.0006599571027883187,
"grad_norm": 0.14696714282035828,
"learning_rate": 0.0001,
"loss": 2.1577,
"step": 10
},
{
"epoch": 0.0007259528130671506,
"grad_norm": 0.19276079535484314,
"learning_rate": 9.999316524962345e-05,
"loss": 2.1347,
"step": 11
},
{
"epoch": 0.0007919485233459825,
"grad_norm": 0.20708081126213074,
"learning_rate": 9.997266286704631e-05,
"loss": 2.1066,
"step": 12
},
{
"epoch": 0.0008579442336248144,
"grad_norm": 0.2281312346458435,
"learning_rate": 9.993849845741524e-05,
"loss": 2.1568,
"step": 13
},
{
"epoch": 0.0009239399439036462,
"grad_norm": 0.22357775270938873,
"learning_rate": 9.989068136093873e-05,
"loss": 2.1843,
"step": 14
},
{
"epoch": 0.000989935654182478,
"grad_norm": 0.20933884382247925,
"learning_rate": 9.98292246503335e-05,
"loss": 2.1477,
"step": 15
},
{
"epoch": 0.00105593136446131,
"grad_norm": 0.19181935489177704,
"learning_rate": 9.975414512725057e-05,
"loss": 2.1686,
"step": 16
},
{
"epoch": 0.0011219270747401418,
"grad_norm": 0.18410974740982056,
"learning_rate": 9.966546331768191e-05,
"loss": 2.1737,
"step": 17
},
{
"epoch": 0.0011879227850189738,
"grad_norm": 0.17988362908363342,
"learning_rate": 9.956320346634876e-05,
"loss": 2.1099,
"step": 18
},
{
"epoch": 0.0012539184952978057,
"grad_norm": 0.17499294877052307,
"learning_rate": 9.944739353007344e-05,
"loss": 2.1147,
"step": 19
},
{
"epoch": 0.0013199142055766375,
"grad_norm": 0.1744966208934784,
"learning_rate": 9.931806517013612e-05,
"loss": 2.1165,
"step": 20
},
{
"epoch": 0.0013859099158554694,
"grad_norm": 0.19104036688804626,
"learning_rate": 9.917525374361912e-05,
"loss": 2.1989,
"step": 21
},
{
"epoch": 0.0014519056261343012,
"grad_norm": 0.182933509349823,
"learning_rate": 9.901899829374047e-05,
"loss": 2.151,
"step": 22
},
{
"epoch": 0.0015179013364131331,
"grad_norm": 0.21169869601726532,
"learning_rate": 9.884934153917997e-05,
"loss": 2.2015,
"step": 23
},
{
"epoch": 0.001583897046691965,
"grad_norm": 0.2129669189453125,
"learning_rate": 9.86663298624003e-05,
"loss": 2.175,
"step": 24
},
{
"epoch": 0.0016498927569707968,
"grad_norm": 0.24831457436084747,
"learning_rate": 9.847001329696653e-05,
"loss": 2.1981,
"step": 25
},
{
"epoch": 0.0017158884672496288,
"grad_norm": 0.2276773303747177,
"learning_rate": 9.826044551386744e-05,
"loss": 2.1127,
"step": 26
},
{
"epoch": 0.0017818841775284608,
"grad_norm": 0.2360907346010208,
"learning_rate": 9.803768380684242e-05,
"loss": 2.1786,
"step": 27
},
{
"epoch": 0.0018478798878072925,
"grad_norm": 0.23979932069778442,
"learning_rate": 9.780178907671789e-05,
"loss": 2.1466,
"step": 28
},
{
"epoch": 0.0019138755980861245,
"grad_norm": 0.2607581615447998,
"learning_rate": 9.755282581475769e-05,
"loss": 2.2664,
"step": 29
},
{
"epoch": 0.001979871308364956,
"grad_norm": 0.24170605838298798,
"learning_rate": 9.729086208503174e-05,
"loss": 2.0581,
"step": 30
},
{
"epoch": 0.002045867018643788,
"grad_norm": 0.2630557119846344,
"learning_rate": 9.701596950580806e-05,
"loss": 2.2212,
"step": 31
},
{
"epoch": 0.00211186272892262,
"grad_norm": 0.26846829056739807,
"learning_rate": 9.672822322997305e-05,
"loss": 2.1043,
"step": 32
},
{
"epoch": 0.002177858439201452,
"grad_norm": 0.3070265054702759,
"learning_rate": 9.642770192448536e-05,
"loss": 2.1358,
"step": 33
},
{
"epoch": 0.0022438541494802836,
"grad_norm": 0.2925759553909302,
"learning_rate": 9.611448774886924e-05,
"loss": 2.1418,
"step": 34
},
{
"epoch": 0.0023098498597591156,
"grad_norm": 0.3119109570980072,
"learning_rate": 9.578866633275288e-05,
"loss": 2.0999,
"step": 35
},
{
"epoch": 0.0023758455700379475,
"grad_norm": 0.3403557538986206,
"learning_rate": 9.545032675245813e-05,
"loss": 2.2176,
"step": 36
},
{
"epoch": 0.0024418412803167795,
"grad_norm": 0.35925009846687317,
"learning_rate": 9.509956150664796e-05,
"loss": 2.1107,
"step": 37
},
{
"epoch": 0.0025078369905956114,
"grad_norm": 0.3792420029640198,
"learning_rate": 9.473646649103818e-05,
"loss": 2.1179,
"step": 38
},
{
"epoch": 0.002573832700874443,
"grad_norm": 0.3527684807777405,
"learning_rate": 9.43611409721806e-05,
"loss": 2.1567,
"step": 39
},
{
"epoch": 0.002639828411153275,
"grad_norm": 0.41421976685523987,
"learning_rate": 9.397368756032445e-05,
"loss": 2.078,
"step": 40
},
{
"epoch": 0.002705824121432107,
"grad_norm": 0.4361130893230438,
"learning_rate": 9.357421218136386e-05,
"loss": 2.1204,
"step": 41
},
{
"epoch": 0.002771819831710939,
"grad_norm": 0.4689408242702484,
"learning_rate": 9.316282404787871e-05,
"loss": 2.1709,
"step": 42
},
{
"epoch": 0.002837815541989771,
"grad_norm": 0.45768994092941284,
"learning_rate": 9.273963562927695e-05,
"loss": 2.0309,
"step": 43
},
{
"epoch": 0.0029038112522686023,
"grad_norm": 0.4785042405128479,
"learning_rate": 9.230476262104677e-05,
"loss": 2.0683,
"step": 44
},
{
"epoch": 0.0029698069625474343,
"grad_norm": 0.5563807487487793,
"learning_rate": 9.185832391312644e-05,
"loss": 2.1026,
"step": 45
},
{
"epoch": 0.0030358026728262663,
"grad_norm": 0.608992338180542,
"learning_rate": 9.140044155740101e-05,
"loss": 2.1863,
"step": 46
},
{
"epoch": 0.0031017983831050982,
"grad_norm": 0.7556280493736267,
"learning_rate": 9.093124073433463e-05,
"loss": 2.2082,
"step": 47
},
{
"epoch": 0.00316779409338393,
"grad_norm": 0.7814421653747559,
"learning_rate": 9.045084971874738e-05,
"loss": 2.1169,
"step": 48
},
{
"epoch": 0.0032337898036627617,
"grad_norm": 0.9336566925048828,
"learning_rate": 8.995939984474624e-05,
"loss": 2.1759,
"step": 49
},
{
"epoch": 0.0032997855139415937,
"grad_norm": 1.9087984561920166,
"learning_rate": 8.945702546981969e-05,
"loss": 2.5827,
"step": 50
},
{
"epoch": 0.0032997855139415937,
"eval_loss": 2.83476185798645,
"eval_runtime": 456.6197,
"eval_samples_per_second": 55.889,
"eval_steps_per_second": 13.972,
"step": 50
},
{
"epoch": 0.0033657812242204256,
"grad_norm": 4.907052993774414,
"learning_rate": 8.894386393810563e-05,
"loss": 2.8831,
"step": 51
},
{
"epoch": 0.0034317769344992576,
"grad_norm": 4.347491264343262,
"learning_rate": 8.842005554284296e-05,
"loss": 2.7933,
"step": 52
},
{
"epoch": 0.0034977726447780895,
"grad_norm": 2.914515972137451,
"learning_rate": 8.788574348801675e-05,
"loss": 2.4865,
"step": 53
},
{
"epoch": 0.0035637683550569215,
"grad_norm": 1.6698929071426392,
"learning_rate": 8.73410738492077e-05,
"loss": 2.2574,
"step": 54
},
{
"epoch": 0.003629764065335753,
"grad_norm": 0.9714609384536743,
"learning_rate": 8.678619553365659e-05,
"loss": 2.1036,
"step": 55
},
{
"epoch": 0.003695759775614585,
"grad_norm": 0.6560160517692566,
"learning_rate": 8.622126023955446e-05,
"loss": 2.0417,
"step": 56
},
{
"epoch": 0.003761755485893417,
"grad_norm": 0.39631447196006775,
"learning_rate": 8.564642241456986e-05,
"loss": 2.0384,
"step": 57
},
{
"epoch": 0.003827751196172249,
"grad_norm": 0.30650341510772705,
"learning_rate": 8.506183921362443e-05,
"loss": 2.0339,
"step": 58
},
{
"epoch": 0.003893746906451081,
"grad_norm": 0.3017640709877014,
"learning_rate": 8.44676704559283e-05,
"loss": 2.0568,
"step": 59
},
{
"epoch": 0.003959742616729912,
"grad_norm": 0.30954188108444214,
"learning_rate": 8.386407858128706e-05,
"loss": 2.0166,
"step": 60
},
{
"epoch": 0.004025738327008745,
"grad_norm": 0.3095334768295288,
"learning_rate": 8.32512286056924e-05,
"loss": 2.0237,
"step": 61
},
{
"epoch": 0.004091734037287576,
"grad_norm": 0.3569750487804413,
"learning_rate": 8.262928807620843e-05,
"loss": 2.0509,
"step": 62
},
{
"epoch": 0.004157729747566408,
"grad_norm": 0.31211531162261963,
"learning_rate": 8.199842702516583e-05,
"loss": 1.9961,
"step": 63
},
{
"epoch": 0.00422372545784524,
"grad_norm": 0.3285423219203949,
"learning_rate": 8.135881792367686e-05,
"loss": 2.0391,
"step": 64
},
{
"epoch": 0.004289721168124072,
"grad_norm": 0.26709312200546265,
"learning_rate": 8.07106356344834e-05,
"loss": 2.0335,
"step": 65
},
{
"epoch": 0.004355716878402904,
"grad_norm": 0.2849353849887848,
"learning_rate": 8.005405736415126e-05,
"loss": 2.0163,
"step": 66
},
{
"epoch": 0.004421712588681736,
"grad_norm": 0.2928564250469208,
"learning_rate": 7.938926261462366e-05,
"loss": 2.054,
"step": 67
},
{
"epoch": 0.004487708298960567,
"grad_norm": 0.22843998670578003,
"learning_rate": 7.871643313414718e-05,
"loss": 1.9836,
"step": 68
},
{
"epoch": 0.0045537040092394,
"grad_norm": 0.21079106628894806,
"learning_rate": 7.803575286758364e-05,
"loss": 1.9894,
"step": 69
},
{
"epoch": 0.004619699719518231,
"grad_norm": 0.22297044098377228,
"learning_rate": 7.734740790612136e-05,
"loss": 2.0042,
"step": 70
},
{
"epoch": 0.0046856954297970635,
"grad_norm": 0.2284521460533142,
"learning_rate": 7.66515864363997e-05,
"loss": 2.0532,
"step": 71
},
{
"epoch": 0.004751691140075895,
"grad_norm": 0.23351223766803741,
"learning_rate": 7.594847868906076e-05,
"loss": 2.0598,
"step": 72
},
{
"epoch": 0.004817686850354727,
"grad_norm": 0.24678663909435272,
"learning_rate": 7.52382768867422e-05,
"loss": 2.0789,
"step": 73
},
{
"epoch": 0.004883682560633559,
"grad_norm": 0.2197660356760025,
"learning_rate": 7.452117519152542e-05,
"loss": 2.0176,
"step": 74
},
{
"epoch": 0.0049496782709123905,
"grad_norm": 0.22512337565422058,
"learning_rate": 7.379736965185368e-05,
"loss": 1.9777,
"step": 75
},
{
"epoch": 0.005015673981191223,
"grad_norm": 0.24273715913295746,
"learning_rate": 7.30670581489344e-05,
"loss": 2.0729,
"step": 76
},
{
"epoch": 0.005081669691470054,
"grad_norm": 0.2609364688396454,
"learning_rate": 7.233044034264034e-05,
"loss": 2.0301,
"step": 77
},
{
"epoch": 0.005147665401748886,
"grad_norm": 0.257575124502182,
"learning_rate": 7.158771761692464e-05,
"loss": 1.9888,
"step": 78
},
{
"epoch": 0.005213661112027718,
"grad_norm": 0.2791874408721924,
"learning_rate": 7.083909302476453e-05,
"loss": 2.0367,
"step": 79
},
{
"epoch": 0.00527965682230655,
"grad_norm": 0.2933448553085327,
"learning_rate": 7.008477123264848e-05,
"loss": 2.0366,
"step": 80
},
{
"epoch": 0.005345652532585382,
"grad_norm": 0.2926257252693176,
"learning_rate": 6.932495846462261e-05,
"loss": 2.0418,
"step": 81
},
{
"epoch": 0.005411648242864214,
"grad_norm": 0.2932702600955963,
"learning_rate": 6.855986244591104e-05,
"loss": 1.9664,
"step": 82
},
{
"epoch": 0.005477643953143045,
"grad_norm": 0.3120938241481781,
"learning_rate": 6.778969234612584e-05,
"loss": 2.0604,
"step": 83
},
{
"epoch": 0.005543639663421878,
"grad_norm": 0.3065866231918335,
"learning_rate": 6.701465872208216e-05,
"loss": 2.0618,
"step": 84
},
{
"epoch": 0.005609635373700709,
"grad_norm": 0.3389799892902374,
"learning_rate": 6.623497346023418e-05,
"loss": 2.0082,
"step": 85
},
{
"epoch": 0.005675631083979542,
"grad_norm": 0.3524992763996124,
"learning_rate": 6.545084971874738e-05,
"loss": 2.067,
"step": 86
},
{
"epoch": 0.005741626794258373,
"grad_norm": 0.36559221148490906,
"learning_rate": 6.466250186922325e-05,
"loss": 1.9979,
"step": 87
},
{
"epoch": 0.005807622504537205,
"grad_norm": 0.3870045840740204,
"learning_rate": 6.387014543809223e-05,
"loss": 2.0948,
"step": 88
},
{
"epoch": 0.005873618214816037,
"grad_norm": 0.4148994982242584,
"learning_rate": 6.307399704769099e-05,
"loss": 2.1834,
"step": 89
},
{
"epoch": 0.005939613925094869,
"grad_norm": 0.4298236072063446,
"learning_rate": 6.227427435703997e-05,
"loss": 2.0583,
"step": 90
},
{
"epoch": 0.006005609635373701,
"grad_norm": 0.46022361516952515,
"learning_rate": 6.147119600233758e-05,
"loss": 2.0721,
"step": 91
},
{
"epoch": 0.0060716053456525325,
"grad_norm": 0.5010573863983154,
"learning_rate": 6.066498153718735e-05,
"loss": 2.0968,
"step": 92
},
{
"epoch": 0.006137601055931364,
"grad_norm": 0.563363790512085,
"learning_rate": 5.985585137257401e-05,
"loss": 2.1121,
"step": 93
},
{
"epoch": 0.0062035967662101964,
"grad_norm": 0.5774351358413696,
"learning_rate": 5.90440267166055e-05,
"loss": 2.0028,
"step": 94
},
{
"epoch": 0.006269592476489028,
"grad_norm": 0.6517067551612854,
"learning_rate": 5.8229729514036705e-05,
"loss": 2.2008,
"step": 95
},
{
"epoch": 0.00633558818676786,
"grad_norm": 0.7009293437004089,
"learning_rate": 5.74131823855921e-05,
"loss": 2.1267,
"step": 96
},
{
"epoch": 0.006401583897046692,
"grad_norm": 0.7464205026626587,
"learning_rate": 5.6594608567103456e-05,
"loss": 1.9291,
"step": 97
},
{
"epoch": 0.006467579607325523,
"grad_norm": 1.0588631629943848,
"learning_rate": 5.577423184847932e-05,
"loss": 2.1716,
"step": 98
},
{
"epoch": 0.006533575317604356,
"grad_norm": 1.4275753498077393,
"learning_rate": 5.495227651252315e-05,
"loss": 2.1652,
"step": 99
},
{
"epoch": 0.006599571027883187,
"grad_norm": 2.1202917098999023,
"learning_rate": 5.4128967273616625e-05,
"loss": 2.0571,
"step": 100
},
{
"epoch": 0.006599571027883187,
"eval_loss": 2.0340936183929443,
"eval_runtime": 456.7891,
"eval_samples_per_second": 55.868,
"eval_steps_per_second": 13.967,
"step": 100
},
{
"epoch": 0.00666556673816202,
"grad_norm": 0.39126256108283997,
"learning_rate": 5.330452921628497e-05,
"loss": 2.0195,
"step": 101
},
{
"epoch": 0.006731562448440851,
"grad_norm": 0.4759884774684906,
"learning_rate": 5.247918773366112e-05,
"loss": 2.022,
"step": 102
},
{
"epoch": 0.006797558158719684,
"grad_norm": 0.5554403066635132,
"learning_rate": 5.165316846586541e-05,
"loss": 2.0095,
"step": 103
},
{
"epoch": 0.006863553868998515,
"grad_norm": 0.5975823998451233,
"learning_rate": 5.0826697238317935e-05,
"loss": 1.9859,
"step": 104
},
{
"epoch": 0.006929549579277347,
"grad_norm": 0.6069150567054749,
"learning_rate": 5e-05,
"loss": 2.0182,
"step": 105
},
{
"epoch": 0.006995545289556179,
"grad_norm": 0.6009635329246521,
"learning_rate": 4.917330276168208e-05,
"loss": 2.0361,
"step": 106
},
{
"epoch": 0.007061540999835011,
"grad_norm": 0.6053978204727173,
"learning_rate": 4.834683153413459e-05,
"loss": 2.0505,
"step": 107
},
{
"epoch": 0.007127536710113843,
"grad_norm": 0.5334131717681885,
"learning_rate": 4.7520812266338885e-05,
"loss": 2.009,
"step": 108
},
{
"epoch": 0.0071935324203926745,
"grad_norm": 0.45399123430252075,
"learning_rate": 4.669547078371504e-05,
"loss": 1.9958,
"step": 109
},
{
"epoch": 0.007259528130671506,
"grad_norm": 0.408033549785614,
"learning_rate": 4.5871032726383386e-05,
"loss": 1.946,
"step": 110
},
{
"epoch": 0.0073255238409503385,
"grad_norm": 0.3544064164161682,
"learning_rate": 4.504772348747687e-05,
"loss": 1.9544,
"step": 111
},
{
"epoch": 0.00739151955122917,
"grad_norm": 0.32116737961769104,
"learning_rate": 4.4225768151520694e-05,
"loss": 1.9743,
"step": 112
},
{
"epoch": 0.007457515261508002,
"grad_norm": 0.26409533619880676,
"learning_rate": 4.3405391432896555e-05,
"loss": 1.9423,
"step": 113
},
{
"epoch": 0.007523510971786834,
"grad_norm": 0.23238809406757355,
"learning_rate": 4.2586817614407895e-05,
"loss": 2.0055,
"step": 114
},
{
"epoch": 0.0075895066820656654,
"grad_norm": 0.2379342019557953,
"learning_rate": 4.17702704859633e-05,
"loss": 1.9945,
"step": 115
},
{
"epoch": 0.007655502392344498,
"grad_norm": 0.28395143151283264,
"learning_rate": 4.095597328339452e-05,
"loss": 1.976,
"step": 116
},
{
"epoch": 0.007721498102623329,
"grad_norm": 0.23083075881004333,
"learning_rate": 4.0144148627425993e-05,
"loss": 1.9563,
"step": 117
},
{
"epoch": 0.007787493812902162,
"grad_norm": 0.24074456095695496,
"learning_rate": 3.933501846281267e-05,
"loss": 1.9375,
"step": 118
},
{
"epoch": 0.007853489523180993,
"grad_norm": 0.26923075318336487,
"learning_rate": 3.852880399766243e-05,
"loss": 1.9549,
"step": 119
},
{
"epoch": 0.007919485233459825,
"grad_norm": 0.2566552758216858,
"learning_rate": 3.772572564296005e-05,
"loss": 1.997,
"step": 120
},
{
"epoch": 0.007985480943738656,
"grad_norm": 0.22760069370269775,
"learning_rate": 3.6926002952309016e-05,
"loss": 2.0493,
"step": 121
},
{
"epoch": 0.00805147665401749,
"grad_norm": 0.22382082045078278,
"learning_rate": 3.612985456190778e-05,
"loss": 1.9467,
"step": 122
},
{
"epoch": 0.008117472364296321,
"grad_norm": 0.24285265803337097,
"learning_rate": 3.533749813077677e-05,
"loss": 1.9834,
"step": 123
},
{
"epoch": 0.008183468074575153,
"grad_norm": 0.24671024084091187,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.9716,
"step": 124
},
{
"epoch": 0.008249463784853984,
"grad_norm": 0.24303311109542847,
"learning_rate": 3.3765026539765834e-05,
"loss": 1.9536,
"step": 125
},
{
"epoch": 0.008315459495132816,
"grad_norm": 0.25841909646987915,
"learning_rate": 3.298534127791785e-05,
"loss": 1.9818,
"step": 126
},
{
"epoch": 0.008381455205411649,
"grad_norm": 0.24851860105991364,
"learning_rate": 3.221030765387417e-05,
"loss": 1.9619,
"step": 127
},
{
"epoch": 0.00844745091569048,
"grad_norm": 0.25422680377960205,
"learning_rate": 3.144013755408895e-05,
"loss": 2.0336,
"step": 128
},
{
"epoch": 0.008513446625969312,
"grad_norm": 0.2750159800052643,
"learning_rate": 3.0675041535377405e-05,
"loss": 1.9945,
"step": 129
},
{
"epoch": 0.008579442336248144,
"grad_norm": 0.2722633481025696,
"learning_rate": 2.991522876735154e-05,
"loss": 2.0789,
"step": 130
},
{
"epoch": 0.008645438046526975,
"grad_norm": 0.28856736421585083,
"learning_rate": 2.916090697523549e-05,
"loss": 1.9935,
"step": 131
},
{
"epoch": 0.008711433756805808,
"grad_norm": 0.3116317689418793,
"learning_rate": 2.8412282383075363e-05,
"loss": 1.9696,
"step": 132
},
{
"epoch": 0.00877742946708464,
"grad_norm": 0.2954542636871338,
"learning_rate": 2.766955965735968e-05,
"loss": 1.9318,
"step": 133
},
{
"epoch": 0.008843425177363471,
"grad_norm": 0.29408156871795654,
"learning_rate": 2.693294185106562e-05,
"loss": 2.0578,
"step": 134
},
{
"epoch": 0.008909420887642303,
"grad_norm": 0.34558582305908203,
"learning_rate": 2.6202630348146324e-05,
"loss": 1.9598,
"step": 135
},
{
"epoch": 0.008975416597921134,
"grad_norm": 0.3509218394756317,
"learning_rate": 2.547882480847461e-05,
"loss": 1.9991,
"step": 136
},
{
"epoch": 0.009041412308199968,
"grad_norm": 0.3553852438926697,
"learning_rate": 2.476172311325783e-05,
"loss": 1.9513,
"step": 137
},
{
"epoch": 0.0091074080184788,
"grad_norm": 0.36894935369491577,
"learning_rate": 2.405152131093926e-05,
"loss": 1.988,
"step": 138
},
{
"epoch": 0.00917340372875763,
"grad_norm": 0.39475688338279724,
"learning_rate": 2.3348413563600325e-05,
"loss": 1.9913,
"step": 139
},
{
"epoch": 0.009239399439036462,
"grad_norm": 0.45494693517684937,
"learning_rate": 2.2652592093878666e-05,
"loss": 2.0226,
"step": 140
},
{
"epoch": 0.009305395149315294,
"grad_norm": 0.4353376030921936,
"learning_rate": 2.196424713241637e-05,
"loss": 2.0534,
"step": 141
},
{
"epoch": 0.009371390859594127,
"grad_norm": 0.4937349855899811,
"learning_rate": 2.128356686585282e-05,
"loss": 1.9736,
"step": 142
},
{
"epoch": 0.009437386569872959,
"grad_norm": 0.5059090852737427,
"learning_rate": 2.061073738537635e-05,
"loss": 2.0222,
"step": 143
},
{
"epoch": 0.00950338228015179,
"grad_norm": 0.5827047228813171,
"learning_rate": 1.9945942635848748e-05,
"loss": 1.9906,
"step": 144
},
{
"epoch": 0.009569377990430622,
"grad_norm": 0.5805647969245911,
"learning_rate": 1.928936436551661e-05,
"loss": 1.9321,
"step": 145
},
{
"epoch": 0.009635373700709453,
"grad_norm": 0.6282228827476501,
"learning_rate": 1.8641182076323148e-05,
"loss": 2.1064,
"step": 146
},
{
"epoch": 0.009701369410988286,
"grad_norm": 0.7631902694702148,
"learning_rate": 1.800157297483417e-05,
"loss": 2.0535,
"step": 147
},
{
"epoch": 0.009767365121267118,
"grad_norm": 0.8592758178710938,
"learning_rate": 1.7370711923791567e-05,
"loss": 2.0957,
"step": 148
},
{
"epoch": 0.00983336083154595,
"grad_norm": 1.1513166427612305,
"learning_rate": 1.6748771394307585e-05,
"loss": 2.0666,
"step": 149
},
{
"epoch": 0.009899356541824781,
"grad_norm": 1.6363484859466553,
"learning_rate": 1.6135921418712956e-05,
"loss": 2.2042,
"step": 150
},
{
"epoch": 0.009899356541824781,
"eval_loss": 1.96856689453125,
"eval_runtime": 457.1206,
"eval_samples_per_second": 55.828,
"eval_steps_per_second": 13.957,
"step": 150
},
{
"epoch": 0.009965352252103613,
"grad_norm": 0.2069941759109497,
"learning_rate": 1.553232954407171e-05,
"loss": 1.9224,
"step": 151
},
{
"epoch": 0.010031347962382446,
"grad_norm": 0.20309697091579437,
"learning_rate": 1.4938160786375572e-05,
"loss": 1.9403,
"step": 152
},
{
"epoch": 0.010097343672661277,
"grad_norm": 0.211232990026474,
"learning_rate": 1.435357758543015e-05,
"loss": 1.9537,
"step": 153
},
{
"epoch": 0.010163339382940109,
"grad_norm": 0.20085665583610535,
"learning_rate": 1.3778739760445552e-05,
"loss": 1.9565,
"step": 154
},
{
"epoch": 0.01022933509321894,
"grad_norm": 0.19230183959007263,
"learning_rate": 1.3213804466343421e-05,
"loss": 1.9575,
"step": 155
},
{
"epoch": 0.010295330803497772,
"grad_norm": 0.2169540673494339,
"learning_rate": 1.2658926150792322e-05,
"loss": 1.9237,
"step": 156
},
{
"epoch": 0.010361326513776605,
"grad_norm": 0.20325788855552673,
"learning_rate": 1.2114256511983274e-05,
"loss": 1.9356,
"step": 157
},
{
"epoch": 0.010427322224055437,
"grad_norm": 0.20521485805511475,
"learning_rate": 1.157994445715706e-05,
"loss": 1.9332,
"step": 158
},
{
"epoch": 0.010493317934334268,
"grad_norm": 0.209978848695755,
"learning_rate": 1.1056136061894384e-05,
"loss": 1.9729,
"step": 159
},
{
"epoch": 0.0105593136446131,
"grad_norm": 0.2274225503206253,
"learning_rate": 1.0542974530180327e-05,
"loss": 1.9424,
"step": 160
},
{
"epoch": 0.010625309354891931,
"grad_norm": 0.22242596745491028,
"learning_rate": 1.0040600155253765e-05,
"loss": 1.9248,
"step": 161
},
{
"epoch": 0.010691305065170765,
"grad_norm": 0.19525201618671417,
"learning_rate": 9.549150281252633e-06,
"loss": 1.9679,
"step": 162
},
{
"epoch": 0.010757300775449596,
"grad_norm": 0.21084603667259216,
"learning_rate": 9.068759265665384e-06,
"loss": 1.9204,
"step": 163
},
{
"epoch": 0.010823296485728428,
"grad_norm": 0.2283700853586197,
"learning_rate": 8.599558442598998e-06,
"loss": 1.9373,
"step": 164
},
{
"epoch": 0.010889292196007259,
"grad_norm": 0.21563678979873657,
"learning_rate": 8.141676086873572e-06,
"loss": 1.9136,
"step": 165
},
{
"epoch": 0.01095528790628609,
"grad_norm": 0.22769160568714142,
"learning_rate": 7.695237378953223e-06,
"loss": 1.9514,
"step": 166
},
{
"epoch": 0.011021283616564924,
"grad_norm": 0.2206595093011856,
"learning_rate": 7.260364370723044e-06,
"loss": 1.9576,
"step": 167
},
{
"epoch": 0.011087279326843755,
"grad_norm": 0.23338423669338226,
"learning_rate": 6.837175952121306e-06,
"loss": 1.9714,
"step": 168
},
{
"epoch": 0.011153275037122587,
"grad_norm": 0.22528231143951416,
"learning_rate": 6.425787818636131e-06,
"loss": 1.9654,
"step": 169
},
{
"epoch": 0.011219270747401418,
"grad_norm": 0.2464929074048996,
"learning_rate": 6.026312439675552e-06,
"loss": 1.8808,
"step": 170
},
{
"epoch": 0.01128526645768025,
"grad_norm": 0.2448563426733017,
"learning_rate": 5.6388590278194096e-06,
"loss": 1.963,
"step": 171
},
{
"epoch": 0.011351262167959083,
"grad_norm": 0.23899894952774048,
"learning_rate": 5.263533508961827e-06,
"loss": 1.9396,
"step": 172
},
{
"epoch": 0.011417257878237915,
"grad_norm": 0.2452314794063568,
"learning_rate": 4.900438493352055e-06,
"loss": 1.9586,
"step": 173
},
{
"epoch": 0.011483253588516746,
"grad_norm": 0.2673238515853882,
"learning_rate": 4.549673247541875e-06,
"loss": 1.979,
"step": 174
},
{
"epoch": 0.011549249298795578,
"grad_norm": 0.2756348252296448,
"learning_rate": 4.2113336672471245e-06,
"loss": 1.9118,
"step": 175
},
{
"epoch": 0.01161524500907441,
"grad_norm": 0.27247703075408936,
"learning_rate": 3.885512251130763e-06,
"loss": 1.9681,
"step": 176
},
{
"epoch": 0.011681240719353243,
"grad_norm": 0.2777453362941742,
"learning_rate": 3.5722980755146517e-06,
"loss": 1.9151,
"step": 177
},
{
"epoch": 0.011747236429632074,
"grad_norm": 0.28349244594573975,
"learning_rate": 3.271776770026963e-06,
"loss": 1.8951,
"step": 178
},
{
"epoch": 0.011813232139910906,
"grad_norm": 0.28139713406562805,
"learning_rate": 2.9840304941919415e-06,
"loss": 1.9822,
"step": 179
},
{
"epoch": 0.011879227850189737,
"grad_norm": 0.30852484703063965,
"learning_rate": 2.7091379149682685e-06,
"loss": 1.9636,
"step": 180
},
{
"epoch": 0.011945223560468569,
"grad_norm": 0.311739057302475,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.9987,
"step": 181
},
{
"epoch": 0.012011219270747402,
"grad_norm": 0.3112499713897705,
"learning_rate": 2.1982109232821178e-06,
"loss": 1.9355,
"step": 182
},
{
"epoch": 0.012077214981026234,
"grad_norm": 0.3160371482372284,
"learning_rate": 1.962316193157593e-06,
"loss": 2.0306,
"step": 183
},
{
"epoch": 0.012143210691305065,
"grad_norm": 0.3401806354522705,
"learning_rate": 1.7395544861325718e-06,
"loss": 1.9955,
"step": 184
},
{
"epoch": 0.012209206401583897,
"grad_norm": 0.3310815393924713,
"learning_rate": 1.5299867030334814e-06,
"loss": 1.9918,
"step": 185
},
{
"epoch": 0.012275202111862728,
"grad_norm": 0.348056823015213,
"learning_rate": 1.333670137599713e-06,
"loss": 1.951,
"step": 186
},
{
"epoch": 0.012341197822141561,
"grad_norm": 0.35894066095352173,
"learning_rate": 1.1506584608200367e-06,
"loss": 2.0125,
"step": 187
},
{
"epoch": 0.012407193532420393,
"grad_norm": 0.36076369881629944,
"learning_rate": 9.810017062595322e-07,
"loss": 1.8952,
"step": 188
},
{
"epoch": 0.012473189242699224,
"grad_norm": 0.40533554553985596,
"learning_rate": 8.247462563808817e-07,
"loss": 2.0012,
"step": 189
},
{
"epoch": 0.012539184952978056,
"grad_norm": 0.3923289477825165,
"learning_rate": 6.819348298638839e-07,
"loss": 2.022,
"step": 190
},
{
"epoch": 0.012605180663256887,
"grad_norm": 0.43101248145103455,
"learning_rate": 5.526064699265753e-07,
"loss": 1.9242,
"step": 191
},
{
"epoch": 0.01267117637353572,
"grad_norm": 0.4599561393260956,
"learning_rate": 4.367965336512403e-07,
"loss": 1.8971,
"step": 192
},
{
"epoch": 0.012737172083814552,
"grad_norm": 0.45019298791885376,
"learning_rate": 3.3453668231809286e-07,
"loss": 1.9172,
"step": 193
},
{
"epoch": 0.012803167794093384,
"grad_norm": 0.4972461462020874,
"learning_rate": 2.458548727494292e-07,
"loss": 1.9852,
"step": 194
},
{
"epoch": 0.012869163504372215,
"grad_norm": 0.5501299500465393,
"learning_rate": 1.7077534966650766e-07,
"loss": 2.0531,
"step": 195
},
{
"epoch": 0.012935159214651047,
"grad_norm": 0.6353976726531982,
"learning_rate": 1.0931863906127327e-07,
"loss": 2.0832,
"step": 196
},
{
"epoch": 0.01300115492492988,
"grad_norm": 0.6472721099853516,
"learning_rate": 6.150154258476315e-08,
"loss": 2.0793,
"step": 197
},
{
"epoch": 0.013067150635208712,
"grad_norm": 0.7353078722953796,
"learning_rate": 2.7337132953697554e-08,
"loss": 2.073,
"step": 198
},
{
"epoch": 0.013133146345487543,
"grad_norm": 0.9416328072547913,
"learning_rate": 6.834750376549792e-09,
"loss": 2.0497,
"step": 199
},
{
"epoch": 0.013199142055766375,
"grad_norm": 1.7521336078643799,
"learning_rate": 0.0,
"loss": 1.8635,
"step": 200
},
{
"epoch": 0.013199142055766375,
"eval_loss": 1.9610345363616943,
"eval_runtime": 457.5029,
"eval_samples_per_second": 55.781,
"eval_steps_per_second": 13.945,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.483677980688384e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}