babylm-en / checkpoint-79326 /trainer_state.json
abritez's picture
Upload 21 files
73ac0c5 verified
raw
history blame
29 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.526447931526391,
"eval_steps": 500,
"global_step": 79326,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.028530670470756064,
"grad_norm": 4.224736213684082,
"learning_rate": 9.936968963517636e-05,
"loss": 7.9606,
"step": 500
},
{
"epoch": 0.05706134094151213,
"grad_norm": 3.775930881500244,
"learning_rate": 9.873937927035273e-05,
"loss": 7.4609,
"step": 1000
},
{
"epoch": 0.08559201141226819,
"grad_norm": 4.54003381729126,
"learning_rate": 9.810906890552909e-05,
"loss": 7.2264,
"step": 1500
},
{
"epoch": 0.11412268188302425,
"grad_norm": 4.762158393859863,
"learning_rate": 9.747875854070544e-05,
"loss": 7.1154,
"step": 2000
},
{
"epoch": 0.14265335235378032,
"grad_norm": 4.3742995262146,
"learning_rate": 9.684844817588181e-05,
"loss": 7.0396,
"step": 2500
},
{
"epoch": 0.17118402282453637,
"grad_norm": 6.319996356964111,
"learning_rate": 9.621813781105817e-05,
"loss": 6.9091,
"step": 3000
},
{
"epoch": 0.19971469329529243,
"grad_norm": 5.134556770324707,
"learning_rate": 9.558782744623454e-05,
"loss": 6.8436,
"step": 3500
},
{
"epoch": 0.2282453637660485,
"grad_norm": 5.821471691131592,
"learning_rate": 9.49575170814109e-05,
"loss": 6.7296,
"step": 4000
},
{
"epoch": 0.25677603423680456,
"grad_norm": 5.649785041809082,
"learning_rate": 9.432720671658725e-05,
"loss": 6.6347,
"step": 4500
},
{
"epoch": 0.28530670470756064,
"grad_norm": 4.85307502746582,
"learning_rate": 9.369689635176361e-05,
"loss": 6.5734,
"step": 5000
},
{
"epoch": 0.31383737517831667,
"grad_norm": 4.924481391906738,
"learning_rate": 9.306658598693997e-05,
"loss": 6.4956,
"step": 5500
},
{
"epoch": 0.34236804564907275,
"grad_norm": 5.568727970123291,
"learning_rate": 9.243627562211632e-05,
"loss": 6.4667,
"step": 6000
},
{
"epoch": 0.37089871611982883,
"grad_norm": 5.307117938995361,
"learning_rate": 9.18059652572927e-05,
"loss": 6.4108,
"step": 6500
},
{
"epoch": 0.39942938659058486,
"grad_norm": 5.159631729125977,
"learning_rate": 9.117565489246905e-05,
"loss": 6.3248,
"step": 7000
},
{
"epoch": 0.42796005706134094,
"grad_norm": 5.529886722564697,
"learning_rate": 9.054534452764542e-05,
"loss": 6.3069,
"step": 7500
},
{
"epoch": 0.456490727532097,
"grad_norm": 9.25552749633789,
"learning_rate": 8.991503416282178e-05,
"loss": 6.2188,
"step": 8000
},
{
"epoch": 0.48502139800285304,
"grad_norm": 7.253251075744629,
"learning_rate": 8.928472379799813e-05,
"loss": 6.0356,
"step": 8500
},
{
"epoch": 0.5135520684736091,
"grad_norm": 8.699528694152832,
"learning_rate": 8.86544134331745e-05,
"loss": 6.0202,
"step": 9000
},
{
"epoch": 0.5420827389443652,
"grad_norm": 7.144530296325684,
"learning_rate": 8.802410306835086e-05,
"loss": 5.8857,
"step": 9500
},
{
"epoch": 0.5706134094151213,
"grad_norm": 7.254436492919922,
"learning_rate": 8.739379270352722e-05,
"loss": 5.8363,
"step": 10000
},
{
"epoch": 0.5991440798858774,
"grad_norm": 7.687765121459961,
"learning_rate": 8.676348233870359e-05,
"loss": 5.7397,
"step": 10500
},
{
"epoch": 0.6276747503566333,
"grad_norm": 6.547056674957275,
"learning_rate": 8.613317197387994e-05,
"loss": 5.6764,
"step": 11000
},
{
"epoch": 0.6562054208273894,
"grad_norm": 9.949172973632812,
"learning_rate": 8.55028616090563e-05,
"loss": 5.6305,
"step": 11500
},
{
"epoch": 0.6847360912981455,
"grad_norm": 8.107893943786621,
"learning_rate": 8.487255124423267e-05,
"loss": 5.5956,
"step": 12000
},
{
"epoch": 0.7132667617689016,
"grad_norm": 6.340846538543701,
"learning_rate": 8.424224087940903e-05,
"loss": 5.5119,
"step": 12500
},
{
"epoch": 0.7417974322396577,
"grad_norm": 7.98532247543335,
"learning_rate": 8.361193051458538e-05,
"loss": 5.4685,
"step": 13000
},
{
"epoch": 0.7703281027104137,
"grad_norm": 8.349475860595703,
"learning_rate": 8.298162014976175e-05,
"loss": 5.4685,
"step": 13500
},
{
"epoch": 0.7988587731811697,
"grad_norm": 8.270821571350098,
"learning_rate": 8.23513097849381e-05,
"loss": 5.4529,
"step": 14000
},
{
"epoch": 0.8273894436519258,
"grad_norm": 7.967436790466309,
"learning_rate": 8.172099942011447e-05,
"loss": 5.369,
"step": 14500
},
{
"epoch": 0.8559201141226819,
"grad_norm": 6.520368576049805,
"learning_rate": 8.109068905529082e-05,
"loss": 5.3417,
"step": 15000
},
{
"epoch": 0.884450784593438,
"grad_norm": 7.0926713943481445,
"learning_rate": 8.046037869046718e-05,
"loss": 5.3051,
"step": 15500
},
{
"epoch": 0.912981455064194,
"grad_norm": 7.306639194488525,
"learning_rate": 7.983006832564355e-05,
"loss": 5.275,
"step": 16000
},
{
"epoch": 0.9415121255349501,
"grad_norm": 7.494438171386719,
"learning_rate": 7.919975796081991e-05,
"loss": 5.2383,
"step": 16500
},
{
"epoch": 0.9700427960057061,
"grad_norm": 7.95282506942749,
"learning_rate": 7.856944759599628e-05,
"loss": 5.2275,
"step": 17000
},
{
"epoch": 0.9985734664764622,
"grad_norm": 8.143848419189453,
"learning_rate": 7.793913723117263e-05,
"loss": 5.1984,
"step": 17500
},
{
"epoch": 1.0,
"eval_loss": 5.088438034057617,
"eval_runtime": 60.5124,
"eval_samples_per_second": 817.683,
"eval_steps_per_second": 51.113,
"step": 17525
},
{
"epoch": 1.0271041369472182,
"grad_norm": 7.758360862731934,
"learning_rate": 7.730882686634899e-05,
"loss": 5.1319,
"step": 18000
},
{
"epoch": 1.0556348074179742,
"grad_norm": 6.7139177322387695,
"learning_rate": 7.667851650152536e-05,
"loss": 5.1087,
"step": 18500
},
{
"epoch": 1.0841654778887304,
"grad_norm": 6.584017753601074,
"learning_rate": 7.604820613670172e-05,
"loss": 5.0938,
"step": 19000
},
{
"epoch": 1.1126961483594864,
"grad_norm": 7.792596817016602,
"learning_rate": 7.541789577187807e-05,
"loss": 5.0448,
"step": 19500
},
{
"epoch": 1.1412268188302426,
"grad_norm": 7.243467807769775,
"learning_rate": 7.478758540705444e-05,
"loss": 5.0423,
"step": 20000
},
{
"epoch": 1.1697574893009985,
"grad_norm": 8.1492338180542,
"learning_rate": 7.41572750422308e-05,
"loss": 4.9913,
"step": 20500
},
{
"epoch": 1.1982881597717547,
"grad_norm": 7.754183292388916,
"learning_rate": 7.352696467740716e-05,
"loss": 4.9983,
"step": 21000
},
{
"epoch": 1.2268188302425107,
"grad_norm": 7.245093822479248,
"learning_rate": 7.289665431258353e-05,
"loss": 4.9729,
"step": 21500
},
{
"epoch": 1.2553495007132667,
"grad_norm": 7.720768451690674,
"learning_rate": 7.226634394775988e-05,
"loss": 4.9499,
"step": 22000
},
{
"epoch": 1.2838801711840229,
"grad_norm": 7.034431457519531,
"learning_rate": 7.163603358293624e-05,
"loss": 4.9368,
"step": 22500
},
{
"epoch": 1.3124108416547788,
"grad_norm": 9.341329574584961,
"learning_rate": 7.100572321811261e-05,
"loss": 4.8745,
"step": 23000
},
{
"epoch": 1.340941512125535,
"grad_norm": 6.874212265014648,
"learning_rate": 7.037541285328895e-05,
"loss": 4.877,
"step": 23500
},
{
"epoch": 1.369472182596291,
"grad_norm": 8.832662582397461,
"learning_rate": 6.974510248846532e-05,
"loss": 4.8839,
"step": 24000
},
{
"epoch": 1.3980028530670472,
"grad_norm": 6.884407997131348,
"learning_rate": 6.911479212364168e-05,
"loss": 4.8456,
"step": 24500
},
{
"epoch": 1.4265335235378032,
"grad_norm": 8.421597480773926,
"learning_rate": 6.848448175881804e-05,
"loss": 4.8491,
"step": 25000
},
{
"epoch": 1.4550641940085591,
"grad_norm": 8.932446479797363,
"learning_rate": 6.78541713939944e-05,
"loss": 4.7936,
"step": 25500
},
{
"epoch": 1.4835948644793153,
"grad_norm": 9.981544494628906,
"learning_rate": 6.722386102917076e-05,
"loss": 4.816,
"step": 26000
},
{
"epoch": 1.5121255349500713,
"grad_norm": 7.8625922203063965,
"learning_rate": 6.659355066434713e-05,
"loss": 4.7983,
"step": 26500
},
{
"epoch": 1.5406562054208273,
"grad_norm": 9.113426208496094,
"learning_rate": 6.596324029952349e-05,
"loss": 4.7438,
"step": 27000
},
{
"epoch": 1.5691868758915835,
"grad_norm": 8.4766206741333,
"learning_rate": 6.533292993469985e-05,
"loss": 4.7544,
"step": 27500
},
{
"epoch": 1.5977175463623396,
"grad_norm": 9.448740005493164,
"learning_rate": 6.470261956987622e-05,
"loss": 4.7244,
"step": 28000
},
{
"epoch": 1.6262482168330956,
"grad_norm": 7.334268093109131,
"learning_rate": 6.407230920505257e-05,
"loss": 4.7225,
"step": 28500
},
{
"epoch": 1.6547788873038516,
"grad_norm": 8.931607246398926,
"learning_rate": 6.344199884022893e-05,
"loss": 4.6495,
"step": 29000
},
{
"epoch": 1.6833095577746078,
"grad_norm": 7.774843215942383,
"learning_rate": 6.28116884754053e-05,
"loss": 4.7006,
"step": 29500
},
{
"epoch": 1.7118402282453637,
"grad_norm": 7.911451816558838,
"learning_rate": 6.218137811058166e-05,
"loss": 4.6525,
"step": 30000
},
{
"epoch": 1.7403708987161197,
"grad_norm": 7.005200386047363,
"learning_rate": 6.155106774575801e-05,
"loss": 4.6765,
"step": 30500
},
{
"epoch": 1.768901569186876,
"grad_norm": 8.889507293701172,
"learning_rate": 6.0920757380934376e-05,
"loss": 4.6397,
"step": 31000
},
{
"epoch": 1.797432239657632,
"grad_norm": 8.807517051696777,
"learning_rate": 6.029044701611074e-05,
"loss": 4.6291,
"step": 31500
},
{
"epoch": 1.825962910128388,
"grad_norm": 10.036596298217773,
"learning_rate": 5.96601366512871e-05,
"loss": 4.6033,
"step": 32000
},
{
"epoch": 1.854493580599144,
"grad_norm": 8.668290138244629,
"learning_rate": 5.902982628646345e-05,
"loss": 4.617,
"step": 32500
},
{
"epoch": 1.8830242510699002,
"grad_norm": 7.631191253662109,
"learning_rate": 5.8399515921639816e-05,
"loss": 4.591,
"step": 33000
},
{
"epoch": 1.9115549215406562,
"grad_norm": 7.901882171630859,
"learning_rate": 5.776920555681617e-05,
"loss": 4.593,
"step": 33500
},
{
"epoch": 1.9400855920114122,
"grad_norm": 8.088603973388672,
"learning_rate": 5.7138895191992536e-05,
"loss": 4.575,
"step": 34000
},
{
"epoch": 1.9686162624821684,
"grad_norm": 13.519835472106934,
"learning_rate": 5.65085848271689e-05,
"loss": 4.5674,
"step": 34500
},
{
"epoch": 1.9971469329529246,
"grad_norm": 7.497794151306152,
"learning_rate": 5.587827446234526e-05,
"loss": 4.5896,
"step": 35000
},
{
"epoch": 2.0,
"eval_loss": 4.483283996582031,
"eval_runtime": 55.5401,
"eval_samples_per_second": 890.889,
"eval_steps_per_second": 55.69,
"step": 35050
},
{
"epoch": 2.0256776034236803,
"grad_norm": 9.086140632629395,
"learning_rate": 5.524796409752162e-05,
"loss": 4.4731,
"step": 35500
},
{
"epoch": 2.0542082738944365,
"grad_norm": 7.284142017364502,
"learning_rate": 5.461765373269798e-05,
"loss": 4.5119,
"step": 36000
},
{
"epoch": 2.0827389443651927,
"grad_norm": 9.4158296585083,
"learning_rate": 5.3987343367874346e-05,
"loss": 4.4564,
"step": 36500
},
{
"epoch": 2.1112696148359484,
"grad_norm": 10.107216835021973,
"learning_rate": 5.33570330030507e-05,
"loss": 4.5029,
"step": 37000
},
{
"epoch": 2.1398002853067046,
"grad_norm": 8.506832122802734,
"learning_rate": 5.2726722638227066e-05,
"loss": 4.4487,
"step": 37500
},
{
"epoch": 2.168330955777461,
"grad_norm": 10.495549201965332,
"learning_rate": 5.209641227340343e-05,
"loss": 4.433,
"step": 38000
},
{
"epoch": 2.196861626248217,
"grad_norm": 8.59913444519043,
"learning_rate": 5.146610190857979e-05,
"loss": 4.4377,
"step": 38500
},
{
"epoch": 2.2253922967189728,
"grad_norm": 7.879130840301514,
"learning_rate": 5.083579154375615e-05,
"loss": 4.3888,
"step": 39000
},
{
"epoch": 2.253922967189729,
"grad_norm": 8.134923934936523,
"learning_rate": 5.020548117893251e-05,
"loss": 4.4215,
"step": 39500
},
{
"epoch": 2.282453637660485,
"grad_norm": 8.942688941955566,
"learning_rate": 4.957517081410887e-05,
"loss": 4.4249,
"step": 40000
},
{
"epoch": 2.310984308131241,
"grad_norm": 12.744963645935059,
"learning_rate": 4.894486044928523e-05,
"loss": 4.3957,
"step": 40500
},
{
"epoch": 2.339514978601997,
"grad_norm": 10.202946662902832,
"learning_rate": 4.831455008446159e-05,
"loss": 4.4239,
"step": 41000
},
{
"epoch": 2.3680456490727533,
"grad_norm": 7.9722418785095215,
"learning_rate": 4.768423971963795e-05,
"loss": 4.4196,
"step": 41500
},
{
"epoch": 2.3965763195435095,
"grad_norm": 8.571127891540527,
"learning_rate": 4.7053929354814315e-05,
"loss": 4.3274,
"step": 42000
},
{
"epoch": 2.425106990014265,
"grad_norm": 9.048084259033203,
"learning_rate": 4.642361898999067e-05,
"loss": 4.3422,
"step": 42500
},
{
"epoch": 2.4536376604850214,
"grad_norm": 9.029339790344238,
"learning_rate": 4.5793308625167035e-05,
"loss": 4.3474,
"step": 43000
},
{
"epoch": 2.4821683309557776,
"grad_norm": 8.281103134155273,
"learning_rate": 4.51629982603434e-05,
"loss": 4.3849,
"step": 43500
},
{
"epoch": 2.5106990014265333,
"grad_norm": 7.619591236114502,
"learning_rate": 4.4532687895519755e-05,
"loss": 4.3154,
"step": 44000
},
{
"epoch": 2.5392296718972895,
"grad_norm": 10.685304641723633,
"learning_rate": 4.390237753069611e-05,
"loss": 4.2803,
"step": 44500
},
{
"epoch": 2.5677603423680457,
"grad_norm": 8.622846603393555,
"learning_rate": 4.3272067165872475e-05,
"loss": 4.2911,
"step": 45000
},
{
"epoch": 2.596291012838802,
"grad_norm": 10.364981651306152,
"learning_rate": 4.264175680104884e-05,
"loss": 4.3172,
"step": 45500
},
{
"epoch": 2.6248216833095577,
"grad_norm": 6.996392726898193,
"learning_rate": 4.20114464362252e-05,
"loss": 4.3123,
"step": 46000
},
{
"epoch": 2.653352353780314,
"grad_norm": 9.445304870605469,
"learning_rate": 4.138113607140156e-05,
"loss": 4.2621,
"step": 46500
},
{
"epoch": 2.68188302425107,
"grad_norm": 9.185279846191406,
"learning_rate": 4.075082570657792e-05,
"loss": 4.2983,
"step": 47000
},
{
"epoch": 2.710413694721826,
"grad_norm": 7.654629230499268,
"learning_rate": 4.0120515341754285e-05,
"loss": 4.2994,
"step": 47500
},
{
"epoch": 2.738944365192582,
"grad_norm": 8.687335014343262,
"learning_rate": 3.949020497693065e-05,
"loss": 4.2667,
"step": 48000
},
{
"epoch": 2.767475035663338,
"grad_norm": 9.457321166992188,
"learning_rate": 3.8859894612107e-05,
"loss": 4.2709,
"step": 48500
},
{
"epoch": 2.7960057061340944,
"grad_norm": 9.891905784606934,
"learning_rate": 3.822958424728336e-05,
"loss": 4.217,
"step": 49000
},
{
"epoch": 2.82453637660485,
"grad_norm": 7.958982467651367,
"learning_rate": 3.7599273882459725e-05,
"loss": 4.219,
"step": 49500
},
{
"epoch": 2.8530670470756063,
"grad_norm": 9.362072944641113,
"learning_rate": 3.696896351763609e-05,
"loss": 4.2212,
"step": 50000
},
{
"epoch": 2.881597717546362,
"grad_norm": 9.81021499633789,
"learning_rate": 3.6338653152812445e-05,
"loss": 4.1786,
"step": 50500
},
{
"epoch": 2.9101283880171183,
"grad_norm": 9.377098083496094,
"learning_rate": 3.570834278798881e-05,
"loss": 4.1874,
"step": 51000
},
{
"epoch": 2.9386590584878745,
"grad_norm": 7.9157562255859375,
"learning_rate": 3.507803242316517e-05,
"loss": 4.2021,
"step": 51500
},
{
"epoch": 2.9671897289586306,
"grad_norm": 9.986918449401855,
"learning_rate": 3.444772205834153e-05,
"loss": 4.2068,
"step": 52000
},
{
"epoch": 2.995720399429387,
"grad_norm": 9.590192794799805,
"learning_rate": 3.381741169351789e-05,
"loss": 4.2007,
"step": 52500
},
{
"epoch": 3.0,
"eval_loss": 4.12980842590332,
"eval_runtime": 54.7837,
"eval_samples_per_second": 903.188,
"eval_steps_per_second": 56.458,
"step": 52575
},
{
"epoch": 3.0242510699001426,
"grad_norm": 10.125175476074219,
"learning_rate": 3.3187101328694254e-05,
"loss": 4.1689,
"step": 53000
},
{
"epoch": 3.0527817403708988,
"grad_norm": 9.876838684082031,
"learning_rate": 3.255679096387061e-05,
"loss": 4.1606,
"step": 53500
},
{
"epoch": 3.081312410841655,
"grad_norm": 8.361190795898438,
"learning_rate": 3.192648059904697e-05,
"loss": 4.1792,
"step": 54000
},
{
"epoch": 3.1098430813124107,
"grad_norm": 11.448074340820312,
"learning_rate": 3.129617023422333e-05,
"loss": 4.1463,
"step": 54500
},
{
"epoch": 3.138373751783167,
"grad_norm": 9.612030982971191,
"learning_rate": 3.0665859869399694e-05,
"loss": 4.1013,
"step": 55000
},
{
"epoch": 3.166904422253923,
"grad_norm": 8.74341106414795,
"learning_rate": 3.0035549504576054e-05,
"loss": 4.1097,
"step": 55500
},
{
"epoch": 3.195435092724679,
"grad_norm": 8.951421737670898,
"learning_rate": 2.9405239139752418e-05,
"loss": 4.1154,
"step": 56000
},
{
"epoch": 3.223965763195435,
"grad_norm": 9.474740028381348,
"learning_rate": 2.8774928774928778e-05,
"loss": 4.1132,
"step": 56500
},
{
"epoch": 3.2524964336661912,
"grad_norm": 10.41540241241455,
"learning_rate": 2.8144618410105137e-05,
"loss": 4.1416,
"step": 57000
},
{
"epoch": 3.281027104136947,
"grad_norm": 10.558377265930176,
"learning_rate": 2.75143080452815e-05,
"loss": 4.1209,
"step": 57500
},
{
"epoch": 3.309557774607703,
"grad_norm": 8.718255996704102,
"learning_rate": 2.6883997680457857e-05,
"loss": 4.0549,
"step": 58000
},
{
"epoch": 3.3380884450784594,
"grad_norm": 8.449244499206543,
"learning_rate": 2.6253687315634217e-05,
"loss": 4.1046,
"step": 58500
},
{
"epoch": 3.3666191155492156,
"grad_norm": 10.529504776000977,
"learning_rate": 2.562337695081058e-05,
"loss": 4.1058,
"step": 59000
},
{
"epoch": 3.3951497860199713,
"grad_norm": 8.345918655395508,
"learning_rate": 2.499306658598694e-05,
"loss": 4.0668,
"step": 59500
},
{
"epoch": 3.4236804564907275,
"grad_norm": 10.951416015625,
"learning_rate": 2.43627562211633e-05,
"loss": 4.1145,
"step": 60000
},
{
"epoch": 3.4522111269614837,
"grad_norm": 8.29075813293457,
"learning_rate": 2.3732445856339664e-05,
"loss": 4.0765,
"step": 60500
},
{
"epoch": 3.4807417974322394,
"grad_norm": 9.023518562316895,
"learning_rate": 2.3102135491516024e-05,
"loss": 4.0608,
"step": 61000
},
{
"epoch": 3.5092724679029956,
"grad_norm": 10.263666152954102,
"learning_rate": 2.2471825126692384e-05,
"loss": 4.0361,
"step": 61500
},
{
"epoch": 3.537803138373752,
"grad_norm": 10.308072090148926,
"learning_rate": 2.1841514761868744e-05,
"loss": 4.0801,
"step": 62000
},
{
"epoch": 3.566333808844508,
"grad_norm": 11.750104904174805,
"learning_rate": 2.1211204397045107e-05,
"loss": 4.0166,
"step": 62500
},
{
"epoch": 3.5948644793152638,
"grad_norm": 9.711214065551758,
"learning_rate": 2.0580894032221467e-05,
"loss": 4.1,
"step": 63000
},
{
"epoch": 3.62339514978602,
"grad_norm": 9.461030006408691,
"learning_rate": 1.9950583667397827e-05,
"loss": 4.0546,
"step": 63500
},
{
"epoch": 3.651925820256776,
"grad_norm": 9.695231437683105,
"learning_rate": 1.9320273302574187e-05,
"loss": 3.9823,
"step": 64000
},
{
"epoch": 3.680456490727532,
"grad_norm": 8.181650161743164,
"learning_rate": 1.868996293775055e-05,
"loss": 4.0467,
"step": 64500
},
{
"epoch": 3.708987161198288,
"grad_norm": 10.602164268493652,
"learning_rate": 1.805965257292691e-05,
"loss": 4.0006,
"step": 65000
},
{
"epoch": 3.7375178316690443,
"grad_norm": 8.176977157592773,
"learning_rate": 1.7429342208103274e-05,
"loss": 4.0594,
"step": 65500
},
{
"epoch": 3.7660485021398005,
"grad_norm": 11.314457893371582,
"learning_rate": 1.679903184327963e-05,
"loss": 3.9929,
"step": 66000
},
{
"epoch": 3.794579172610556,
"grad_norm": 10.995957374572754,
"learning_rate": 1.6168721478455993e-05,
"loss": 4.0191,
"step": 66500
},
{
"epoch": 3.8231098430813124,
"grad_norm": 8.929314613342285,
"learning_rate": 1.5538411113632353e-05,
"loss": 4.0284,
"step": 67000
},
{
"epoch": 3.8516405135520686,
"grad_norm": 9.887913703918457,
"learning_rate": 1.4908100748808715e-05,
"loss": 3.9552,
"step": 67500
},
{
"epoch": 3.8801711840228243,
"grad_norm": 12.043901443481445,
"learning_rate": 1.4277790383985077e-05,
"loss": 4.0139,
"step": 68000
},
{
"epoch": 3.9087018544935805,
"grad_norm": 8.444652557373047,
"learning_rate": 1.3647480019161435e-05,
"loss": 3.9746,
"step": 68500
},
{
"epoch": 3.9372325249643367,
"grad_norm": 9.180087089538574,
"learning_rate": 1.3017169654337797e-05,
"loss": 3.9663,
"step": 69000
},
{
"epoch": 3.965763195435093,
"grad_norm": 11.611577987670898,
"learning_rate": 1.2386859289514158e-05,
"loss": 3.939,
"step": 69500
},
{
"epoch": 3.9942938659058487,
"grad_norm": 9.529749870300293,
"learning_rate": 1.1756548924690518e-05,
"loss": 3.9911,
"step": 70000
},
{
"epoch": 4.0,
"eval_loss": 3.9361355304718018,
"eval_runtime": 55.3368,
"eval_samples_per_second": 894.161,
"eval_steps_per_second": 55.894,
"step": 70100
},
{
"epoch": 4.022824536376604,
"grad_norm": 10.018484115600586,
"learning_rate": 1.112623855986688e-05,
"loss": 3.9744,
"step": 70500
},
{
"epoch": 4.051355206847361,
"grad_norm": 11.023027420043945,
"learning_rate": 1.049592819504324e-05,
"loss": 3.9143,
"step": 71000
},
{
"epoch": 4.079885877318117,
"grad_norm": 11.09890079498291,
"learning_rate": 9.8656178302196e-06,
"loss": 3.967,
"step": 71500
},
{
"epoch": 4.108416547788873,
"grad_norm": 11.603713035583496,
"learning_rate": 9.235307465395961e-06,
"loss": 3.9525,
"step": 72000
},
{
"epoch": 4.136947218259629,
"grad_norm": 8.631877899169922,
"learning_rate": 8.604997100572321e-06,
"loss": 3.9658,
"step": 72500
},
{
"epoch": 4.165477888730385,
"grad_norm": 7.5662407875061035,
"learning_rate": 7.974686735748683e-06,
"loss": 3.9118,
"step": 73000
},
{
"epoch": 4.194008559201142,
"grad_norm": 10.707962036132812,
"learning_rate": 7.344376370925044e-06,
"loss": 3.948,
"step": 73500
},
{
"epoch": 4.222539229671897,
"grad_norm": 11.31724739074707,
"learning_rate": 6.714066006101405e-06,
"loss": 3.9319,
"step": 74000
},
{
"epoch": 4.251069900142653,
"grad_norm": 9.398261070251465,
"learning_rate": 6.083755641277765e-06,
"loss": 3.9621,
"step": 74500
},
{
"epoch": 4.279600570613409,
"grad_norm": 11.119857788085938,
"learning_rate": 5.453445276454126e-06,
"loss": 3.9429,
"step": 75000
},
{
"epoch": 4.3081312410841655,
"grad_norm": 11.433586120605469,
"learning_rate": 4.823134911630487e-06,
"loss": 3.9126,
"step": 75500
},
{
"epoch": 4.336661911554922,
"grad_norm": 10.043067932128906,
"learning_rate": 4.192824546806848e-06,
"loss": 3.9218,
"step": 76000
},
{
"epoch": 4.365192582025678,
"grad_norm": 9.148512840270996,
"learning_rate": 3.5625141819832086e-06,
"loss": 3.9124,
"step": 76500
},
{
"epoch": 4.393723252496434,
"grad_norm": 9.347081184387207,
"learning_rate": 2.9322038171595694e-06,
"loss": 3.8917,
"step": 77000
},
{
"epoch": 4.422253922967189,
"grad_norm": 8.787015914916992,
"learning_rate": 2.30189345233593e-06,
"loss": 3.9091,
"step": 77500
},
{
"epoch": 4.4507845934379455,
"grad_norm": 8.788630485534668,
"learning_rate": 1.671583087512291e-06,
"loss": 3.9211,
"step": 78000
},
{
"epoch": 4.479315263908702,
"grad_norm": 8.639442443847656,
"learning_rate": 1.0412727226886518e-06,
"loss": 3.9139,
"step": 78500
},
{
"epoch": 4.507845934379458,
"grad_norm": 9.101580619812012,
"learning_rate": 4.1096235786501275e-07,
"loss": 3.911,
"step": 79000
}
],
"logging_steps": 500,
"max_steps": 79326,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 8192,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.6441172712886272e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}