zephyr-7b-sft-full / trainer_state.json
RikkiXu's picture
Model save
6eeb6b9 verified
raw
history blame
No virus
39.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 1175,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 3.3722460551119577,
"learning_rate": 1.6949152542372883e-07,
"loss": 0.4424,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 3.3291982249982532,
"learning_rate": 8.474576271186441e-07,
"loss": 0.521,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 3.366718903705411,
"learning_rate": 1.6949152542372882e-06,
"loss": 0.5267,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 3.743534573457503,
"learning_rate": 2.5423728813559323e-06,
"loss": 0.5054,
"step": 15
},
{
"epoch": 0.09,
"grad_norm": 2.6726105105201854,
"learning_rate": 3.3898305084745763e-06,
"loss": 0.5138,
"step": 20
},
{
"epoch": 0.11,
"grad_norm": 2.5592230066929766,
"learning_rate": 4.23728813559322e-06,
"loss": 0.4656,
"step": 25
},
{
"epoch": 0.13,
"grad_norm": 2.5555759369628186,
"learning_rate": 5.084745762711865e-06,
"loss": 0.4712,
"step": 30
},
{
"epoch": 0.15,
"grad_norm": 2.5046298800000617,
"learning_rate": 5.932203389830509e-06,
"loss": 0.4611,
"step": 35
},
{
"epoch": 0.17,
"grad_norm": 2.7136428613817194,
"learning_rate": 6.779661016949153e-06,
"loss": 0.4541,
"step": 40
},
{
"epoch": 0.19,
"grad_norm": 3.010241923008894,
"learning_rate": 7.627118644067797e-06,
"loss": 0.4781,
"step": 45
},
{
"epoch": 0.21,
"grad_norm": 2.475959902357205,
"learning_rate": 8.47457627118644e-06,
"loss": 0.442,
"step": 50
},
{
"epoch": 0.23,
"grad_norm": 3.128773493576812,
"learning_rate": 9.322033898305085e-06,
"loss": 0.426,
"step": 55
},
{
"epoch": 0.26,
"grad_norm": 3.1009164137521252,
"learning_rate": 1.016949152542373e-05,
"loss": 0.4958,
"step": 60
},
{
"epoch": 0.28,
"grad_norm": 2.808266593808473,
"learning_rate": 1.1016949152542374e-05,
"loss": 0.46,
"step": 65
},
{
"epoch": 0.3,
"grad_norm": 2.6760608859412054,
"learning_rate": 1.1864406779661018e-05,
"loss": 0.4929,
"step": 70
},
{
"epoch": 0.32,
"grad_norm": 3.900783318495709,
"learning_rate": 1.2711864406779661e-05,
"loss": 0.4618,
"step": 75
},
{
"epoch": 0.34,
"grad_norm": 2.524216832199052,
"learning_rate": 1.3559322033898305e-05,
"loss": 0.4588,
"step": 80
},
{
"epoch": 0.36,
"grad_norm": 60.979865346510024,
"learning_rate": 1.440677966101695e-05,
"loss": 0.6492,
"step": 85
},
{
"epoch": 0.38,
"grad_norm": 15.672327487548742,
"learning_rate": 1.5254237288135594e-05,
"loss": 0.6082,
"step": 90
},
{
"epoch": 0.4,
"grad_norm": 4.6849329984217345,
"learning_rate": 1.6101694915254237e-05,
"loss": 0.5659,
"step": 95
},
{
"epoch": 0.43,
"grad_norm": 7.354203430908385,
"learning_rate": 1.694915254237288e-05,
"loss": 0.5671,
"step": 100
},
{
"epoch": 0.45,
"grad_norm": 13.233658769725766,
"learning_rate": 1.7796610169491526e-05,
"loss": 0.6546,
"step": 105
},
{
"epoch": 0.47,
"grad_norm": 8.816187325352404,
"learning_rate": 1.864406779661017e-05,
"loss": 0.6247,
"step": 110
},
{
"epoch": 0.49,
"grad_norm": 110.07956321249138,
"learning_rate": 1.9491525423728814e-05,
"loss": 0.6283,
"step": 115
},
{
"epoch": 0.51,
"grad_norm": 9.519856297335076,
"learning_rate": 1.999982332362188e-05,
"loss": 0.5585,
"step": 120
},
{
"epoch": 0.53,
"grad_norm": 7.0894189196386375,
"learning_rate": 1.999783578606323e-05,
"loss": 0.5264,
"step": 125
},
{
"epoch": 0.55,
"grad_norm": 5.230200581327651,
"learning_rate": 1.999364030586835e-05,
"loss": 0.5317,
"step": 130
},
{
"epoch": 0.57,
"grad_norm": 4.219668396076119,
"learning_rate": 1.9987237809575722e-05,
"loss": 0.5372,
"step": 135
},
{
"epoch": 0.6,
"grad_norm": 5.10590490035955,
"learning_rate": 1.997862971112581e-05,
"loss": 0.5236,
"step": 140
},
{
"epoch": 0.62,
"grad_norm": 9.22741963642746,
"learning_rate": 1.9967817911548796e-05,
"loss": 0.5351,
"step": 145
},
{
"epoch": 0.64,
"grad_norm": 3.2116021264594767,
"learning_rate": 1.9954804798544748e-05,
"loss": 0.477,
"step": 150
},
{
"epoch": 0.66,
"grad_norm": 4.7279336245257255,
"learning_rate": 1.993959324595634e-05,
"loss": 0.5063,
"step": 155
},
{
"epoch": 0.68,
"grad_norm": 2.6783717640693663,
"learning_rate": 1.9922186613134152e-05,
"loss": 0.4547,
"step": 160
},
{
"epoch": 0.7,
"grad_norm": 4.6102800388264,
"learning_rate": 1.9902588744194815e-05,
"loss": 0.4618,
"step": 165
},
{
"epoch": 0.72,
"grad_norm": 2.5223162544544246,
"learning_rate": 1.9880803967172048e-05,
"loss": 0.4532,
"step": 170
},
{
"epoch": 0.74,
"grad_norm": 2.2521576224935687,
"learning_rate": 1.985683709306085e-05,
"loss": 0.4627,
"step": 175
},
{
"epoch": 0.77,
"grad_norm": 2.363613216496382,
"learning_rate": 1.983069341475504e-05,
"loss": 0.4266,
"step": 180
},
{
"epoch": 0.79,
"grad_norm": 2.319077028125871,
"learning_rate": 1.9802378705878354e-05,
"loss": 0.4294,
"step": 185
},
{
"epoch": 0.81,
"grad_norm": 2.5983523553598675,
"learning_rate": 1.9771899219509388e-05,
"loss": 0.4463,
"step": 190
},
{
"epoch": 0.83,
"grad_norm": 2.697869053891185,
"learning_rate": 1.9739261686800662e-05,
"loss": 0.3995,
"step": 195
},
{
"epoch": 0.85,
"grad_norm": 2.2299084670054166,
"learning_rate": 1.9704473315492072e-05,
"loss": 0.4145,
"step": 200
},
{
"epoch": 0.87,
"grad_norm": 2.144013684497221,
"learning_rate": 1.966754178831916e-05,
"loss": 0.4047,
"step": 205
},
{
"epoch": 0.89,
"grad_norm": 2.165636444751405,
"learning_rate": 1.962847526131642e-05,
"loss": 0.429,
"step": 210
},
{
"epoch": 0.91,
"grad_norm": 2.5009764776602066,
"learning_rate": 1.9587282362016083e-05,
"loss": 0.3987,
"step": 215
},
{
"epoch": 0.94,
"grad_norm": 3.2454133915018946,
"learning_rate": 1.9543972187542833e-05,
"loss": 0.4196,
"step": 220
},
{
"epoch": 0.96,
"grad_norm": 2.273670978657276,
"learning_rate": 1.9498554302604768e-05,
"loss": 0.4029,
"step": 225
},
{
"epoch": 0.98,
"grad_norm": 3.004323662088254,
"learning_rate": 1.9451038737381078e-05,
"loss": 0.4268,
"step": 230
},
{
"epoch": 1.0,
"grad_norm": 2.3784052825284845,
"learning_rate": 1.940143598530701e-05,
"loss": 0.4066,
"step": 235
},
{
"epoch": 1.0,
"eval_loss": 0.7733179926872253,
"eval_runtime": 347.9521,
"eval_samples_per_second": 21.575,
"eval_steps_per_second": 0.339,
"step": 235
},
{
"epoch": 1.02,
"grad_norm": 2.2553533504002683,
"learning_rate": 1.9349757000756442e-05,
"loss": 0.2356,
"step": 240
},
{
"epoch": 1.04,
"grad_norm": 2.5450743747403717,
"learning_rate": 1.929601319662271e-05,
"loss": 0.2119,
"step": 245
},
{
"epoch": 1.06,
"grad_norm": 3.778262007789548,
"learning_rate": 1.9240216441798145e-05,
"loss": 0.2295,
"step": 250
},
{
"epoch": 1.09,
"grad_norm": 2.2858585923056536,
"learning_rate": 1.918237905855295e-05,
"loss": 0.2251,
"step": 255
},
{
"epoch": 1.11,
"grad_norm": 1.7784488699701833,
"learning_rate": 1.91225138198139e-05,
"loss": 0.2145,
"step": 260
},
{
"epoch": 1.13,
"grad_norm": 1.8254402227157958,
"learning_rate": 1.906063394634356e-05,
"loss": 0.2191,
"step": 265
},
{
"epoch": 1.15,
"grad_norm": 1.9464300006155193,
"learning_rate": 1.899675310382057e-05,
"loss": 0.2273,
"step": 270
},
{
"epoch": 1.17,
"grad_norm": 2.2936148112741757,
"learning_rate": 1.8930885399821693e-05,
"loss": 0.2246,
"step": 275
},
{
"epoch": 1.19,
"grad_norm": 2.3799646925913245,
"learning_rate": 1.8863045380706275e-05,
"loss": 0.2303,
"step": 280
},
{
"epoch": 1.21,
"grad_norm": 2.190554922996115,
"learning_rate": 1.879324802840379e-05,
"loss": 0.2318,
"step": 285
},
{
"epoch": 1.23,
"grad_norm": 2.7452325011160035,
"learning_rate": 1.8721508757105203e-05,
"loss": 0.2276,
"step": 290
},
{
"epoch": 1.26,
"grad_norm": 1.8163744421498897,
"learning_rate": 1.864784340985887e-05,
"loss": 0.24,
"step": 295
},
{
"epoch": 1.28,
"grad_norm": 1.885101115960248,
"learning_rate": 1.8572268255071718e-05,
"loss": 0.2399,
"step": 300
},
{
"epoch": 1.3,
"grad_norm": 1.760073718971802,
"learning_rate": 1.8494799982916512e-05,
"loss": 0.1955,
"step": 305
},
{
"epoch": 1.32,
"grad_norm": 1.8829516569820812,
"learning_rate": 1.8415455701645942e-05,
"loss": 0.2035,
"step": 310
},
{
"epoch": 1.34,
"grad_norm": 2.146880306353409,
"learning_rate": 1.833425293381443e-05,
"loss": 0.2271,
"step": 315
},
{
"epoch": 1.36,
"grad_norm": 2.0534275082799684,
"learning_rate": 1.8251209612408375e-05,
"loss": 0.197,
"step": 320
},
{
"epoch": 1.38,
"grad_norm": 2.0857973185832743,
"learning_rate": 1.816634407688583e-05,
"loss": 0.2337,
"step": 325
},
{
"epoch": 1.4,
"grad_norm": 1.7319986338118718,
"learning_rate": 1.807967506912636e-05,
"loss": 0.1909,
"step": 330
},
{
"epoch": 1.43,
"grad_norm": 1.7148628603629519,
"learning_rate": 1.799122172929206e-05,
"loss": 0.2103,
"step": 335
},
{
"epoch": 1.45,
"grad_norm": 2.0404565885174786,
"learning_rate": 1.7901003591600575e-05,
"loss": 0.2045,
"step": 340
},
{
"epoch": 1.47,
"grad_norm": 2.132532760084515,
"learning_rate": 1.780904058001116e-05,
"loss": 0.2234,
"step": 345
},
{
"epoch": 1.49,
"grad_norm": 2.2388854318437774,
"learning_rate": 1.7715353003824613e-05,
"loss": 0.222,
"step": 350
},
{
"epoch": 1.51,
"grad_norm": 2.1625096071245986,
"learning_rate": 1.761996155319811e-05,
"loss": 0.2278,
"step": 355
},
{
"epoch": 1.53,
"grad_norm": 1.8994643849098976,
"learning_rate": 1.7522887294575978e-05,
"loss": 0.2114,
"step": 360
},
{
"epoch": 1.55,
"grad_norm": 2.0232265086023213,
"learning_rate": 1.742415166603733e-05,
"loss": 0.2276,
"step": 365
},
{
"epoch": 1.57,
"grad_norm": 2.0393231755488737,
"learning_rate": 1.7323776472561625e-05,
"loss": 0.2065,
"step": 370
},
{
"epoch": 1.6,
"grad_norm": 1.7156091041056394,
"learning_rate": 1.7221783881213222e-05,
"loss": 0.2201,
"step": 375
},
{
"epoch": 1.62,
"grad_norm": 2.0249954720249645,
"learning_rate": 1.7118196416245947e-05,
"loss": 0.1986,
"step": 380
},
{
"epoch": 1.64,
"grad_norm": 1.860444641374077,
"learning_rate": 1.701303695412881e-05,
"loss": 0.2224,
"step": 385
},
{
"epoch": 1.66,
"grad_norm": 9.195021611366267,
"learning_rate": 1.6906328718493906e-05,
"loss": 0.2152,
"step": 390
},
{
"epoch": 1.68,
"grad_norm": 2.790540417351318,
"learning_rate": 1.679809527500765e-05,
"loss": 0.2014,
"step": 395
},
{
"epoch": 1.7,
"grad_norm": 1.9401261875054212,
"learning_rate": 1.6688360526166514e-05,
"loss": 0.2305,
"step": 400
},
{
"epoch": 1.72,
"grad_norm": 2.3499169909762903,
"learning_rate": 1.657714870601833e-05,
"loss": 0.2163,
"step": 405
},
{
"epoch": 1.74,
"grad_norm": 1.889841729135842,
"learning_rate": 1.646448437481039e-05,
"loss": 0.2285,
"step": 410
},
{
"epoch": 1.77,
"grad_norm": 1.9100327330280613,
"learning_rate": 1.635039241356553e-05,
"loss": 0.2066,
"step": 415
},
{
"epoch": 1.79,
"grad_norm": 2.0395782940541913,
"learning_rate": 1.6234898018587336e-05,
"loss": 0.2159,
"step": 420
},
{
"epoch": 1.81,
"grad_norm": 2.0672756907905647,
"learning_rate": 1.611802669589575e-05,
"loss": 0.2211,
"step": 425
},
{
"epoch": 1.83,
"grad_norm": 1.8644628412570703,
"learning_rate": 1.5999804255594262e-05,
"loss": 0.2123,
"step": 430
},
{
"epoch": 1.85,
"grad_norm": 2.109246262952444,
"learning_rate": 1.5880256806169954e-05,
"loss": 0.2179,
"step": 435
},
{
"epoch": 1.87,
"grad_norm": 2.0987747021618537,
"learning_rate": 1.5759410748727663e-05,
"loss": 0.2165,
"step": 440
},
{
"epoch": 1.89,
"grad_norm": 1.8615315847750686,
"learning_rate": 1.563729277115947e-05,
"loss": 0.2115,
"step": 445
},
{
"epoch": 1.91,
"grad_norm": 1.91470907138453,
"learning_rate": 1.551392984225094e-05,
"loss": 0.2226,
"step": 450
},
{
"epoch": 1.94,
"grad_norm": 2.1673896851739,
"learning_rate": 1.5389349205725244e-05,
"loss": 0.2034,
"step": 455
},
{
"epoch": 1.96,
"grad_norm": 1.9561220925261051,
"learning_rate": 1.5263578374226607e-05,
"loss": 0.2079,
"step": 460
},
{
"epoch": 1.98,
"grad_norm": 1.8490634437024727,
"learning_rate": 1.5136645123244366e-05,
"loss": 0.2011,
"step": 465
},
{
"epoch": 2.0,
"grad_norm": 1.8370535270484316,
"learning_rate": 1.5008577484978966e-05,
"loss": 0.1901,
"step": 470
},
{
"epoch": 2.0,
"eval_loss": 0.6736623048782349,
"eval_runtime": 345.8336,
"eval_samples_per_second": 21.707,
"eval_steps_per_second": 0.341,
"step": 470
},
{
"epoch": 2.02,
"grad_norm": 3.2591869308084505,
"learning_rate": 1.4879403742151283e-05,
"loss": 0.0766,
"step": 475
},
{
"epoch": 2.04,
"grad_norm": 1.5781740604686478,
"learning_rate": 1.4749152421756596e-05,
"loss": 0.0809,
"step": 480
},
{
"epoch": 2.06,
"grad_norm": 1.6789209629444652,
"learning_rate": 1.4617852288764624e-05,
"loss": 0.0728,
"step": 485
},
{
"epoch": 2.09,
"grad_norm": 1.524275026933269,
"learning_rate": 1.4485532339767036e-05,
"loss": 0.0671,
"step": 490
},
{
"epoch": 2.11,
"grad_norm": 1.911681176460919,
"learning_rate": 1.4352221796573758e-05,
"loss": 0.0829,
"step": 495
},
{
"epoch": 2.13,
"grad_norm": 1.5314000006841326,
"learning_rate": 1.421795009975957e-05,
"loss": 0.0739,
"step": 500
},
{
"epoch": 2.15,
"grad_norm": 1.8497162969927008,
"learning_rate": 1.4082746902162414e-05,
"loss": 0.0782,
"step": 505
},
{
"epoch": 2.17,
"grad_norm": 1.5153675955770316,
"learning_rate": 1.3946642062334765e-05,
"loss": 0.0717,
"step": 510
},
{
"epoch": 2.19,
"grad_norm": 1.4798583086240507,
"learning_rate": 1.3809665637949636e-05,
"loss": 0.0683,
"step": 515
},
{
"epoch": 2.21,
"grad_norm": 1.5185665743912256,
"learning_rate": 1.3671847879162562e-05,
"loss": 0.0647,
"step": 520
},
{
"epoch": 2.23,
"grad_norm": 1.6655269792696328,
"learning_rate": 1.3533219221931102e-05,
"loss": 0.0756,
"step": 525
},
{
"epoch": 2.26,
"grad_norm": 1.4623074396711568,
"learning_rate": 1.3393810281293294e-05,
"loss": 0.079,
"step": 530
},
{
"epoch": 2.28,
"grad_norm": 1.4769143347541074,
"learning_rate": 1.3253651844606571e-05,
"loss": 0.0644,
"step": 535
},
{
"epoch": 2.3,
"grad_norm": 1.5924771297209446,
"learning_rate": 1.311277486474862e-05,
"loss": 0.0676,
"step": 540
},
{
"epoch": 2.32,
"grad_norm": 1.4963368374266843,
"learning_rate": 1.2971210453281675e-05,
"loss": 0.0685,
"step": 545
},
{
"epoch": 2.34,
"grad_norm": 1.3660066623187117,
"learning_rate": 1.2828989873581786e-05,
"loss": 0.0643,
"step": 550
},
{
"epoch": 2.36,
"grad_norm": 1.5417608458745726,
"learning_rate": 1.268614453393454e-05,
"loss": 0.0704,
"step": 555
},
{
"epoch": 2.38,
"grad_norm": 1.3386793668448367,
"learning_rate": 1.2542705980598813e-05,
"loss": 0.0725,
"step": 560
},
{
"epoch": 2.4,
"grad_norm": 4.538383693449207,
"learning_rate": 1.2398705890839988e-05,
"loss": 0.0724,
"step": 565
},
{
"epoch": 2.43,
"grad_norm": 1.5161704188772478,
"learning_rate": 1.2254176065934332e-05,
"loss": 0.069,
"step": 570
},
{
"epoch": 2.45,
"grad_norm": 1.5190424960322728,
"learning_rate": 1.2109148424145897e-05,
"loss": 0.0675,
"step": 575
},
{
"epoch": 2.47,
"grad_norm": 1.6418241298329592,
"learning_rate": 1.1963654993677645e-05,
"loss": 0.0706,
"step": 580
},
{
"epoch": 2.49,
"grad_norm": 2.7746288115630415,
"learning_rate": 1.1817727905598268e-05,
"loss": 0.0661,
"step": 585
},
{
"epoch": 2.51,
"grad_norm": 1.613430655848529,
"learning_rate": 1.1671399386746301e-05,
"loss": 0.0685,
"step": 590
},
{
"epoch": 2.53,
"grad_norm": 1.652377714904139,
"learning_rate": 1.1524701752613074e-05,
"loss": 0.0685,
"step": 595
},
{
"epoch": 2.55,
"grad_norm": 1.6108396859937457,
"learning_rate": 1.13776674002061e-05,
"loss": 0.0736,
"step": 600
},
{
"epoch": 2.57,
"grad_norm": 1.4308175997531103,
"learning_rate": 1.1230328800894437e-05,
"loss": 0.0699,
"step": 605
},
{
"epoch": 2.6,
"grad_norm": 1.529984546400154,
"learning_rate": 1.108271849323767e-05,
"loss": 0.0647,
"step": 610
},
{
"epoch": 2.62,
"grad_norm": 1.7364294117046162,
"learning_rate": 1.09348690758e-05,
"loss": 0.0691,
"step": 615
},
{
"epoch": 2.64,
"grad_norm": 1.4526326385257828,
"learning_rate": 1.0786813199951145e-05,
"loss": 0.0623,
"step": 620
},
{
"epoch": 2.66,
"grad_norm": 1.5442010646463833,
"learning_rate": 1.0638583562655498e-05,
"loss": 0.0673,
"step": 625
},
{
"epoch": 2.68,
"grad_norm": 1.4797041462885991,
"learning_rate": 1.0490212899251308e-05,
"loss": 0.0686,
"step": 630
},
{
"epoch": 2.7,
"grad_norm": 1.650135612201066,
"learning_rate": 1.0341733976221313e-05,
"loss": 0.0715,
"step": 635
},
{
"epoch": 2.72,
"grad_norm": 1.5265081306931187,
"learning_rate": 1.0193179583956523e-05,
"loss": 0.0642,
"step": 640
},
{
"epoch": 2.74,
"grad_norm": 1.4887311962873813,
"learning_rate": 1.0044582529514739e-05,
"loss": 0.075,
"step": 645
},
{
"epoch": 2.77,
"grad_norm": 1.4387050627018103,
"learning_rate": 9.89597562937536e-06,
"loss": 0.06,
"step": 650
},
{
"epoch": 2.79,
"grad_norm": 1.58228556181931,
"learning_rate": 9.747391702192132e-06,
"loss": 0.0613,
"step": 655
},
{
"epoch": 2.81,
"grad_norm": 1.424709805936743,
"learning_rate": 9.598863561545404e-06,
"loss": 0.0728,
"step": 660
},
{
"epoch": 2.83,
"grad_norm": 1.3925788280661726,
"learning_rate": 9.45042400869551e-06,
"loss": 0.0638,
"step": 665
},
{
"epoch": 2.85,
"grad_norm": 1.8579081866696643,
"learning_rate": 9.302105825338876e-06,
"loss": 0.0729,
"step": 670
},
{
"epoch": 2.87,
"grad_norm": 1.4419650630198477,
"learning_rate": 9.153941766368439e-06,
"loss": 0.0605,
"step": 675
},
{
"epoch": 2.89,
"grad_norm": 1.4587070966850302,
"learning_rate": 9.005964552639983e-06,
"loss": 0.0637,
"step": 680
},
{
"epoch": 2.91,
"grad_norm": 1.400937857775751,
"learning_rate": 8.858206863746018e-06,
"loss": 0.0644,
"step": 685
},
{
"epoch": 2.94,
"grad_norm": 1.4194625294878935,
"learning_rate": 8.71070133079872e-06,
"loss": 0.0708,
"step": 690
},
{
"epoch": 2.96,
"grad_norm": 1.511069294541966,
"learning_rate": 8.563480529223638e-06,
"loss": 0.0667,
"step": 695
},
{
"epoch": 2.98,
"grad_norm": 1.3579712613828467,
"learning_rate": 8.41657697156566e-06,
"loss": 0.0527,
"step": 700
},
{
"epoch": 3.0,
"grad_norm": 1.4627979346880684,
"learning_rate": 8.270023100308865e-06,
"loss": 0.068,
"step": 705
},
{
"epoch": 3.0,
"eval_loss": 0.6575748324394226,
"eval_runtime": 346.2698,
"eval_samples_per_second": 21.68,
"eval_steps_per_second": 0.341,
"step": 705
},
{
"epoch": 3.02,
"grad_norm": 0.9187425982230547,
"learning_rate": 8.123851280711877e-06,
"loss": 0.0216,
"step": 710
},
{
"epoch": 3.04,
"grad_norm": 1.0704321956753566,
"learning_rate": 7.978093793660234e-06,
"loss": 0.0187,
"step": 715
},
{
"epoch": 3.06,
"grad_norm": 0.9253831765543917,
"learning_rate": 7.832782828537437e-06,
"loss": 0.0191,
"step": 720
},
{
"epoch": 3.09,
"grad_norm": 0.9546574193581036,
"learning_rate": 7.68795047611615e-06,
"loss": 0.0186,
"step": 725
},
{
"epoch": 3.11,
"grad_norm": 0.9527598588762864,
"learning_rate": 7.543628721471234e-06,
"loss": 0.0191,
"step": 730
},
{
"epoch": 3.13,
"grad_norm": 1.0142369552642194,
"learning_rate": 7.399849436916076e-06,
"loss": 0.017,
"step": 735
},
{
"epoch": 3.15,
"grad_norm": 0.8516277331105468,
"learning_rate": 7.256644374963857e-06,
"loss": 0.0169,
"step": 740
},
{
"epoch": 3.17,
"grad_norm": 0.6998532587968802,
"learning_rate": 7.11404516131526e-06,
"loss": 0.0167,
"step": 745
},
{
"epoch": 3.19,
"grad_norm": 0.9597297439764481,
"learning_rate": 6.9720832878741776e-06,
"loss": 0.0195,
"step": 750
},
{
"epoch": 3.21,
"grad_norm": 0.8162423625488969,
"learning_rate": 6.8307901057929735e-06,
"loss": 0.0196,
"step": 755
},
{
"epoch": 3.23,
"grad_norm": 1.0809022540934834,
"learning_rate": 6.690196818548846e-06,
"loss": 0.0202,
"step": 760
},
{
"epoch": 3.26,
"grad_norm": 0.9451137823905494,
"learning_rate": 6.550334475052767e-06,
"loss": 0.0173,
"step": 765
},
{
"epoch": 3.28,
"grad_norm": 1.0194236278105107,
"learning_rate": 6.411233962792593e-06,
"loss": 0.0217,
"step": 770
},
{
"epoch": 3.3,
"grad_norm": 0.9428518118269854,
"learning_rate": 6.2729260010117995e-06,
"loss": 0.0205,
"step": 775
},
{
"epoch": 3.32,
"grad_norm": 0.8298491758909723,
"learning_rate": 6.135441133925382e-06,
"loss": 0.0156,
"step": 780
},
{
"epoch": 3.34,
"grad_norm": 0.9988001166823972,
"learning_rate": 5.998809723974407e-06,
"loss": 0.018,
"step": 785
},
{
"epoch": 3.36,
"grad_norm": 0.8874287620178792,
"learning_rate": 5.863061945120719e-06,
"loss": 0.0172,
"step": 790
},
{
"epoch": 3.38,
"grad_norm": 0.9021252322116792,
"learning_rate": 5.728227776183244e-06,
"loss": 0.0184,
"step": 795
},
{
"epoch": 3.4,
"grad_norm": 0.899901563579598,
"learning_rate": 5.594336994217416e-06,
"loss": 0.0207,
"step": 800
},
{
"epoch": 3.43,
"grad_norm": 0.7294818546149708,
"learning_rate": 5.461419167939145e-06,
"loss": 0.0154,
"step": 805
},
{
"epoch": 3.45,
"grad_norm": 0.9595599651370788,
"learning_rate": 5.329503651194805e-06,
"loss": 0.0182,
"step": 810
},
{
"epoch": 3.47,
"grad_norm": 0.7263044743513145,
"learning_rate": 5.198619576478678e-06,
"loss": 0.017,
"step": 815
},
{
"epoch": 3.49,
"grad_norm": 0.9300662832706075,
"learning_rate": 5.068795848499257e-06,
"loss": 0.0182,
"step": 820
},
{
"epoch": 3.51,
"grad_norm": 1.044597812236674,
"learning_rate": 4.940061137795876e-06,
"loss": 0.0154,
"step": 825
},
{
"epoch": 3.53,
"grad_norm": 0.9405246884194852,
"learning_rate": 4.812443874407059e-06,
"loss": 0.0187,
"step": 830
},
{
"epoch": 3.55,
"grad_norm": 0.8135204435661318,
"learning_rate": 4.685972241591956e-06,
"loss": 0.0156,
"step": 835
},
{
"epoch": 3.57,
"grad_norm": 0.806855367473174,
"learning_rate": 4.560674169606317e-06,
"loss": 0.0165,
"step": 840
},
{
"epoch": 3.6,
"grad_norm": 1.1283393882651565,
"learning_rate": 4.436577329534291e-06,
"loss": 0.0161,
"step": 845
},
{
"epoch": 3.62,
"grad_norm": 1.0316895831074073,
"learning_rate": 4.3137091271775e-06,
"loss": 0.018,
"step": 850
},
{
"epoch": 3.64,
"grad_norm": 0.83713135691551,
"learning_rate": 4.192096697002686e-06,
"loss": 0.0154,
"step": 855
},
{
"epoch": 3.66,
"grad_norm": 0.8862197834996656,
"learning_rate": 4.0717668961492725e-06,
"loss": 0.014,
"step": 860
},
{
"epoch": 3.68,
"grad_norm": 0.9409064835179552,
"learning_rate": 3.9527462984981954e-06,
"loss": 0.0185,
"step": 865
},
{
"epoch": 3.7,
"grad_norm": 0.8141414364863905,
"learning_rate": 3.8350611888032474e-06,
"loss": 0.0167,
"step": 870
},
{
"epoch": 3.72,
"grad_norm": 0.9491167080432446,
"learning_rate": 3.718737556886316e-06,
"loss": 0.0166,
"step": 875
},
{
"epoch": 3.74,
"grad_norm": 0.8939541066316343,
"learning_rate": 3.6038010918977308e-06,
"loss": 0.0157,
"step": 880
},
{
"epoch": 3.77,
"grad_norm": 1.1762213442708835,
"learning_rate": 3.490277176643033e-06,
"loss": 0.0176,
"step": 885
},
{
"epoch": 3.79,
"grad_norm": 0.8409814896300148,
"learning_rate": 3.378190881977359e-06,
"loss": 0.0152,
"step": 890
},
{
"epoch": 3.81,
"grad_norm": 1.0408829490759663,
"learning_rate": 3.2675669612687565e-06,
"loss": 0.0191,
"step": 895
},
{
"epoch": 3.83,
"grad_norm": 0.9464969671060768,
"learning_rate": 3.158429844931611e-06,
"loss": 0.0156,
"step": 900
},
{
"epoch": 3.85,
"grad_norm": 0.8183518112165244,
"learning_rate": 3.0508036350313553e-06,
"loss": 0.0155,
"step": 905
},
{
"epoch": 3.87,
"grad_norm": 1.031893449336409,
"learning_rate": 2.9447120999617363e-06,
"loss": 0.0151,
"step": 910
},
{
"epoch": 3.89,
"grad_norm": 0.9770328880326357,
"learning_rate": 2.8401786691957632e-06,
"loss": 0.0156,
"step": 915
},
{
"epoch": 3.91,
"grad_norm": 0.9350326565557209,
"learning_rate": 2.737226428111471e-06,
"loss": 0.0176,
"step": 920
},
{
"epoch": 3.94,
"grad_norm": 0.7498411603912395,
"learning_rate": 2.635878112893717e-06,
"loss": 0.0147,
"step": 925
},
{
"epoch": 3.96,
"grad_norm": 0.890762978868864,
"learning_rate": 2.5361561055130625e-06,
"loss": 0.0179,
"step": 930
},
{
"epoch": 3.98,
"grad_norm": 1.153428874102931,
"learning_rate": 2.4380824287829073e-06,
"loss": 0.0159,
"step": 935
},
{
"epoch": 4.0,
"grad_norm": 0.89001029327834,
"learning_rate": 2.3416787414959097e-06,
"loss": 0.0183,
"step": 940
},
{
"epoch": 4.0,
"eval_loss": 0.7071638107299805,
"eval_runtime": 348.8132,
"eval_samples_per_second": 21.522,
"eval_steps_per_second": 0.338,
"step": 940
},
{
"epoch": 4.02,
"grad_norm": 0.3600823717570703,
"learning_rate": 2.246966333640823e-06,
"loss": 0.0048,
"step": 945
},
{
"epoch": 4.04,
"grad_norm": 0.33280820804988087,
"learning_rate": 2.153966121700769e-06,
"loss": 0.0049,
"step": 950
},
{
"epoch": 4.06,
"grad_norm": 0.48812206732400915,
"learning_rate": 2.0626986440340036e-06,
"loss": 0.0058,
"step": 955
},
{
"epoch": 4.09,
"grad_norm": 0.3662559033659471,
"learning_rate": 1.973184056338173e-06,
"loss": 0.0039,
"step": 960
},
{
"epoch": 4.11,
"grad_norm": 0.3710554752839552,
"learning_rate": 1.8854421271990964e-06,
"loss": 0.0037,
"step": 965
},
{
"epoch": 4.13,
"grad_norm": 0.3360503768539956,
"learning_rate": 1.7994922337250276e-06,
"loss": 0.0041,
"step": 970
},
{
"epoch": 4.15,
"grad_norm": 0.32232522758989207,
"learning_rate": 1.7153533572673708e-06,
"loss": 0.0042,
"step": 975
},
{
"epoch": 4.17,
"grad_norm": 0.3933577657073139,
"learning_rate": 1.633044079228817e-06,
"loss": 0.0042,
"step": 980
},
{
"epoch": 4.19,
"grad_norm": 0.4133692185032901,
"learning_rate": 1.5525825769597625e-06,
"loss": 0.0042,
"step": 985
},
{
"epoch": 4.21,
"grad_norm": 0.311775019674446,
"learning_rate": 1.4739866197440046e-06,
"loss": 0.004,
"step": 990
},
{
"epoch": 4.23,
"grad_norm": 0.49759561364679256,
"learning_rate": 1.3972735648745295e-06,
"loss": 0.0049,
"step": 995
},
{
"epoch": 4.26,
"grad_norm": 0.4477509727419261,
"learning_rate": 1.3224603538202929e-06,
"loss": 0.0041,
"step": 1000
},
{
"epoch": 4.28,
"grad_norm": 0.4461256466898462,
"learning_rate": 1.2495635084848356e-06,
"loss": 0.0038,
"step": 1005
},
{
"epoch": 4.3,
"grad_norm": 0.40708673174578486,
"learning_rate": 1.1785991275575426e-06,
"loss": 0.0039,
"step": 1010
},
{
"epoch": 4.32,
"grad_norm": 0.4494486001323653,
"learning_rate": 1.1095828829583844e-06,
"loss": 0.0049,
"step": 1015
},
{
"epoch": 4.34,
"grad_norm": 0.3778130066090292,
"learning_rate": 1.0425300163768903e-06,
"loss": 0.0042,
"step": 1020
},
{
"epoch": 4.36,
"grad_norm": 0.3983653765993126,
"learning_rate": 9.77455335906149e-07,
"loss": 0.0053,
"step": 1025
},
{
"epoch": 4.38,
"grad_norm": 0.3604607140996719,
"learning_rate": 9.143732127725591e-07,
"loss": 0.0039,
"step": 1030
},
{
"epoch": 4.4,
"grad_norm": 0.6021048871397348,
"learning_rate": 8.532975781620511e-07,
"loss": 0.0035,
"step": 1035
},
{
"epoch": 4.43,
"grad_norm": 0.3771138444026374,
"learning_rate": 7.942419201435014e-07,
"loss": 0.0041,
"step": 1040
},
{
"epoch": 4.45,
"grad_norm": 0.40288900755654594,
"learning_rate": 7.372192806899947e-07,
"loss": 0.0045,
"step": 1045
},
{
"epoch": 4.47,
"grad_norm": 0.4475530083250148,
"learning_rate": 6.822422527986161e-07,
"loss": 0.0043,
"step": 1050
},
{
"epoch": 4.49,
"grad_norm": 0.3791478244996085,
"learning_rate": 6.293229777093779e-07,
"loss": 0.0046,
"step": 1055
},
{
"epoch": 4.51,
"grad_norm": 0.3778438936995471,
"learning_rate": 5.784731422239276e-07,
"loss": 0.0038,
"step": 1060
},
{
"epoch": 4.53,
"grad_norm": 0.376192442816014,
"learning_rate": 5.297039761246137e-07,
"loss": 0.0038,
"step": 1065
},
{
"epoch": 4.55,
"grad_norm": 0.2960720923070219,
"learning_rate": 4.830262496944693e-07,
"loss": 0.0037,
"step": 1070
},
{
"epoch": 4.57,
"grad_norm": 0.37323225545474764,
"learning_rate": 4.384502713386918e-07,
"loss": 0.0038,
"step": 1075
},
{
"epoch": 4.6,
"grad_norm": 0.33848258454167024,
"learning_rate": 3.9598588530810335e-07,
"loss": 0.0037,
"step": 1080
},
{
"epoch": 4.62,
"grad_norm": 0.4828649331052619,
"learning_rate": 3.5564246952512817e-07,
"loss": 0.0034,
"step": 1085
},
{
"epoch": 4.64,
"grad_norm": 0.43489507591573195,
"learning_rate": 3.174289335127612e-07,
"loss": 0.0039,
"step": 1090
},
{
"epoch": 4.66,
"grad_norm": 0.3646806476787612,
"learning_rate": 2.8135371642695865e-07,
"loss": 0.0042,
"step": 1095
},
{
"epoch": 4.68,
"grad_norm": 0.5325215204533521,
"learning_rate": 2.474247851929246e-07,
"loss": 0.0048,
"step": 1100
},
{
"epoch": 4.7,
"grad_norm": 0.40238125679038056,
"learning_rate": 2.1564963274568028e-07,
"loss": 0.0051,
"step": 1105
},
{
"epoch": 4.72,
"grad_norm": 0.38702990805573456,
"learning_rate": 1.860352763753004e-07,
"loss": 0.0042,
"step": 1110
},
{
"epoch": 4.74,
"grad_norm": 0.3852959902302661,
"learning_rate": 1.585882561772112e-07,
"loss": 0.0044,
"step": 1115
},
{
"epoch": 4.77,
"grad_norm": 0.44717232901584403,
"learning_rate": 1.333146336078528e-07,
"loss": 0.0043,
"step": 1120
},
{
"epoch": 4.79,
"grad_norm": 0.372462847171052,
"learning_rate": 1.1021999014606322e-07,
"loss": 0.0038,
"step": 1125
},
{
"epoch": 4.81,
"grad_norm": 0.2928988966778955,
"learning_rate": 8.930942606044434e-08,
"loss": 0.0034,
"step": 1130
},
{
"epoch": 4.83,
"grad_norm": 0.39889536364254635,
"learning_rate": 7.058755928301631e-08,
"loss": 0.0051,
"step": 1135
},
{
"epoch": 4.85,
"grad_norm": 0.40786489098741363,
"learning_rate": 5.405852438937764e-08,
"loss": 0.0034,
"step": 1140
},
{
"epoch": 4.87,
"grad_norm": 0.3349150763788282,
"learning_rate": 3.972597168562131e-08,
"loss": 0.0066,
"step": 1145
},
{
"epoch": 4.89,
"grad_norm": 0.3597591461189066,
"learning_rate": 2.7593066402189506e-08,
"loss": 0.0037,
"step": 1150
},
{
"epoch": 4.91,
"grad_norm": 0.4040878903041599,
"learning_rate": 1.7662487994862808e-08,
"loss": 0.0039,
"step": 1155
},
{
"epoch": 4.94,
"grad_norm": 0.3992498945384279,
"learning_rate": 9.93642955301799e-09,
"loss": 0.0039,
"step": 1160
},
{
"epoch": 4.96,
"grad_norm": 0.3821243876748289,
"learning_rate": 4.4165973153076355e-09,
"loss": 0.0044,
"step": 1165
},
{
"epoch": 4.98,
"grad_norm": 0.3430681816581551,
"learning_rate": 1.1042102928460196e-09,
"loss": 0.0045,
"step": 1170
},
{
"epoch": 5.0,
"grad_norm": 0.32719513702973596,
"learning_rate": 0.0,
"loss": 0.004,
"step": 1175
},
{
"epoch": 5.0,
"eval_loss": 0.7659260034561157,
"eval_runtime": 346.6983,
"eval_samples_per_second": 21.653,
"eval_steps_per_second": 0.34,
"step": 1175
},
{
"epoch": 5.0,
"step": 1175,
"total_flos": 246021095424000.0,
"train_loss": 0.1592100728509274,
"train_runtime": 9397.2382,
"train_samples_per_second": 3.994,
"train_steps_per_second": 0.125
}
],
"logging_steps": 5,
"max_steps": 1175,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"total_flos": 246021095424000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}