belie-gen-l1-7b-v3 / trainer_state.json
jcssafedep's picture
End of training
30a49b0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 35190,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"grad_norm": 2.325218677520752,
"learning_rate": 4.92895709008241e-05,
"loss": 0.8164,
"step": 500
},
{
"epoch": 0.28,
"grad_norm": 2.017394781112671,
"learning_rate": 4.85791418016482e-05,
"loss": 0.6693,
"step": 1000
},
{
"epoch": 0.43,
"grad_norm": 1.5872315168380737,
"learning_rate": 4.78687127024723e-05,
"loss": 0.6128,
"step": 1500
},
{
"epoch": 0.57,
"grad_norm": 1.8339617252349854,
"learning_rate": 4.71582836032964e-05,
"loss": 0.5742,
"step": 2000
},
{
"epoch": 0.71,
"grad_norm": 1.8466053009033203,
"learning_rate": 4.644785450412049e-05,
"loss": 0.5523,
"step": 2500
},
{
"epoch": 0.85,
"grad_norm": 1.4517784118652344,
"learning_rate": 4.573742540494459e-05,
"loss": 0.5346,
"step": 3000
},
{
"epoch": 0.99,
"grad_norm": 1.3925143480300903,
"learning_rate": 4.5026996305768686e-05,
"loss": 0.5028,
"step": 3500
},
{
"epoch": 1.14,
"grad_norm": 1.220083236694336,
"learning_rate": 4.4316567206592784e-05,
"loss": 0.4355,
"step": 4000
},
{
"epoch": 1.28,
"grad_norm": 1.5840811729431152,
"learning_rate": 4.360613810741688e-05,
"loss": 0.4228,
"step": 4500
},
{
"epoch": 1.42,
"grad_norm": 1.6422966718673706,
"learning_rate": 4.289570900824098e-05,
"loss": 0.4009,
"step": 5000
},
{
"epoch": 1.56,
"grad_norm": 1.7338138818740845,
"learning_rate": 4.218527990906508e-05,
"loss": 0.3916,
"step": 5500
},
{
"epoch": 1.71,
"grad_norm": 1.0937657356262207,
"learning_rate": 4.147485080988918e-05,
"loss": 0.3851,
"step": 6000
},
{
"epoch": 1.85,
"grad_norm": 1.1090480089187622,
"learning_rate": 4.076442171071328e-05,
"loss": 0.3672,
"step": 6500
},
{
"epoch": 1.99,
"grad_norm": 1.5349233150482178,
"learning_rate": 4.005399261153737e-05,
"loss": 0.3659,
"step": 7000
},
{
"epoch": 2.13,
"grad_norm": 1.6046255826950073,
"learning_rate": 3.934356351236147e-05,
"loss": 0.3117,
"step": 7500
},
{
"epoch": 2.27,
"grad_norm": 1.3127567768096924,
"learning_rate": 3.8633134413185566e-05,
"loss": 0.3043,
"step": 8000
},
{
"epoch": 2.42,
"grad_norm": 1.3286867141723633,
"learning_rate": 3.7922705314009665e-05,
"loss": 0.3029,
"step": 8500
},
{
"epoch": 2.56,
"grad_norm": 0.8038182258605957,
"learning_rate": 3.721227621483376e-05,
"loss": 0.2934,
"step": 9000
},
{
"epoch": 2.7,
"grad_norm": 1.097979187965393,
"learning_rate": 3.650184711565786e-05,
"loss": 0.2822,
"step": 9500
},
{
"epoch": 2.84,
"grad_norm": 1.42654287815094,
"learning_rate": 3.579141801648196e-05,
"loss": 0.2768,
"step": 10000
},
{
"epoch": 2.98,
"grad_norm": 1.3376920223236084,
"learning_rate": 3.508098891730606e-05,
"loss": 0.2733,
"step": 10500
},
{
"epoch": 3.13,
"grad_norm": 1.3901501893997192,
"learning_rate": 3.437055981813016e-05,
"loss": 0.2358,
"step": 11000
},
{
"epoch": 3.27,
"grad_norm": 0.9437574148178101,
"learning_rate": 3.366013071895425e-05,
"loss": 0.2347,
"step": 11500
},
{
"epoch": 3.41,
"grad_norm": 1.7849817276000977,
"learning_rate": 3.294970161977835e-05,
"loss": 0.2282,
"step": 12000
},
{
"epoch": 3.55,
"grad_norm": 1.3517547845840454,
"learning_rate": 3.2239272520602446e-05,
"loss": 0.2196,
"step": 12500
},
{
"epoch": 3.69,
"grad_norm": 1.3112218379974365,
"learning_rate": 3.152884342142654e-05,
"loss": 0.2196,
"step": 13000
},
{
"epoch": 3.84,
"grad_norm": 1.532402515411377,
"learning_rate": 3.081841432225064e-05,
"loss": 0.222,
"step": 13500
},
{
"epoch": 3.98,
"grad_norm": 1.0918519496917725,
"learning_rate": 3.0107985223074735e-05,
"loss": 0.217,
"step": 14000
},
{
"epoch": 4.12,
"grad_norm": 1.1216357946395874,
"learning_rate": 2.9397556123898834e-05,
"loss": 0.1902,
"step": 14500
},
{
"epoch": 4.26,
"grad_norm": 0.9181265234947205,
"learning_rate": 2.8687127024722932e-05,
"loss": 0.1854,
"step": 15000
},
{
"epoch": 4.4,
"grad_norm": 1.3160160779953003,
"learning_rate": 2.797669792554703e-05,
"loss": 0.1784,
"step": 15500
},
{
"epoch": 4.55,
"grad_norm": 1.198168396949768,
"learning_rate": 2.7266268826371126e-05,
"loss": 0.1812,
"step": 16000
},
{
"epoch": 4.69,
"grad_norm": 1.515939474105835,
"learning_rate": 2.6555839727195225e-05,
"loss": 0.1824,
"step": 16500
},
{
"epoch": 4.83,
"grad_norm": 1.1988515853881836,
"learning_rate": 2.5845410628019323e-05,
"loss": 0.178,
"step": 17000
},
{
"epoch": 4.97,
"grad_norm": 0.8168569803237915,
"learning_rate": 2.5134981528843422e-05,
"loss": 0.1758,
"step": 17500
},
{
"epoch": 5.12,
"grad_norm": 1.039239764213562,
"learning_rate": 2.442455242966752e-05,
"loss": 0.1517,
"step": 18000
},
{
"epoch": 5.26,
"grad_norm": 0.9721378087997437,
"learning_rate": 2.371412333049162e-05,
"loss": 0.1452,
"step": 18500
},
{
"epoch": 5.4,
"grad_norm": 1.0437672138214111,
"learning_rate": 2.3003694231315714e-05,
"loss": 0.1513,
"step": 19000
},
{
"epoch": 5.54,
"grad_norm": 1.620819091796875,
"learning_rate": 2.2293265132139813e-05,
"loss": 0.1539,
"step": 19500
},
{
"epoch": 5.68,
"grad_norm": 1.2683171033859253,
"learning_rate": 2.158283603296391e-05,
"loss": 0.152,
"step": 20000
},
{
"epoch": 5.83,
"grad_norm": 1.0495145320892334,
"learning_rate": 2.0872406933788007e-05,
"loss": 0.1435,
"step": 20500
},
{
"epoch": 5.97,
"grad_norm": 1.5612547397613525,
"learning_rate": 2.0161977834612105e-05,
"loss": 0.1471,
"step": 21000
},
{
"epoch": 6.11,
"grad_norm": 1.3628000020980835,
"learning_rate": 1.9451548735436204e-05,
"loss": 0.1349,
"step": 21500
},
{
"epoch": 6.25,
"grad_norm": 0.9959810376167297,
"learning_rate": 1.8741119636260302e-05,
"loss": 0.1229,
"step": 22000
},
{
"epoch": 6.39,
"grad_norm": 0.9821630120277405,
"learning_rate": 1.80306905370844e-05,
"loss": 0.1288,
"step": 22500
},
{
"epoch": 6.54,
"grad_norm": 1.5605982542037964,
"learning_rate": 1.7320261437908496e-05,
"loss": 0.1285,
"step": 23000
},
{
"epoch": 6.68,
"grad_norm": 1.138551115989685,
"learning_rate": 1.6609832338732595e-05,
"loss": 0.128,
"step": 23500
},
{
"epoch": 6.82,
"grad_norm": 0.9666039347648621,
"learning_rate": 1.5899403239556693e-05,
"loss": 0.127,
"step": 24000
},
{
"epoch": 6.96,
"grad_norm": 0.7725051641464233,
"learning_rate": 1.518897414038079e-05,
"loss": 0.1268,
"step": 24500
},
{
"epoch": 7.1,
"grad_norm": 0.8455806970596313,
"learning_rate": 1.4478545041204889e-05,
"loss": 0.1077,
"step": 25000
},
{
"epoch": 7.25,
"grad_norm": 0.9111607074737549,
"learning_rate": 1.3768115942028985e-05,
"loss": 0.1129,
"step": 25500
},
{
"epoch": 7.39,
"grad_norm": 1.432874083518982,
"learning_rate": 1.3057686842853084e-05,
"loss": 0.1105,
"step": 26000
},
{
"epoch": 7.53,
"grad_norm": 1.1109156608581543,
"learning_rate": 1.2347257743677183e-05,
"loss": 0.1151,
"step": 26500
},
{
"epoch": 7.67,
"grad_norm": 0.8510033488273621,
"learning_rate": 1.163682864450128e-05,
"loss": 0.1113,
"step": 27000
},
{
"epoch": 7.81,
"grad_norm": 0.6120481491088867,
"learning_rate": 1.0926399545325378e-05,
"loss": 0.1116,
"step": 27500
},
{
"epoch": 7.96,
"grad_norm": 0.8336113691329956,
"learning_rate": 1.0215970446149475e-05,
"loss": 0.112,
"step": 28000
},
{
"epoch": 8.1,
"grad_norm": 1.1527196168899536,
"learning_rate": 9.505541346973572e-06,
"loss": 0.0994,
"step": 28500
},
{
"epoch": 8.24,
"grad_norm": 0.9409528970718384,
"learning_rate": 8.79511224779767e-06,
"loss": 0.0999,
"step": 29000
},
{
"epoch": 8.38,
"grad_norm": 1.298326849937439,
"learning_rate": 8.084683148621767e-06,
"loss": 0.1007,
"step": 29500
},
{
"epoch": 8.53,
"grad_norm": 1.2974679470062256,
"learning_rate": 7.374254049445865e-06,
"loss": 0.1007,
"step": 30000
},
{
"epoch": 8.67,
"grad_norm": 1.1797147989273071,
"learning_rate": 6.6638249502699635e-06,
"loss": 0.1012,
"step": 30500
},
{
"epoch": 8.81,
"grad_norm": 1.3059791326522827,
"learning_rate": 5.953395851094061e-06,
"loss": 0.0987,
"step": 31000
},
{
"epoch": 8.95,
"grad_norm": 0.8831413388252258,
"learning_rate": 5.242966751918159e-06,
"loss": 0.0988,
"step": 31500
},
{
"epoch": 9.09,
"grad_norm": 0.7082082629203796,
"learning_rate": 4.532537652742257e-06,
"loss": 0.0961,
"step": 32000
},
{
"epoch": 9.24,
"grad_norm": 0.956742525100708,
"learning_rate": 3.822108553566354e-06,
"loss": 0.0911,
"step": 32500
},
{
"epoch": 9.38,
"grad_norm": 0.6049565672874451,
"learning_rate": 3.111679454390452e-06,
"loss": 0.0907,
"step": 33000
},
{
"epoch": 9.52,
"grad_norm": 1.5750799179077148,
"learning_rate": 2.40125035521455e-06,
"loss": 0.0957,
"step": 33500
},
{
"epoch": 9.66,
"grad_norm": 1.0990025997161865,
"learning_rate": 1.6908212560386474e-06,
"loss": 0.0913,
"step": 34000
},
{
"epoch": 9.8,
"grad_norm": 1.086700677871704,
"learning_rate": 9.80392156862745e-07,
"loss": 0.0937,
"step": 34500
},
{
"epoch": 9.95,
"grad_norm": 0.9267112612724304,
"learning_rate": 2.6996305768684286e-07,
"loss": 0.0941,
"step": 35000
},
{
"epoch": 10.0,
"step": 35190,
"total_flos": 1.838973321216e+16,
"train_loss": 0.22781734634577194,
"train_runtime": 7496.2711,
"train_samples_per_second": 4.694,
"train_steps_per_second": 4.694
}
],
"logging_steps": 500,
"max_steps": 35190,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000,
"total_flos": 1.838973321216e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}