smolm-autoreg-bpe-seed_8128 / trainer_state.json
kanishka's picture
Upload folder using huggingface_hub
3a0c735 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.2734763622283936,
"learning_rate": 6.25e-05,
"loss": 6.1949,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.9611207842826843,
"learning_rate": 0.000125,
"loss": 3.6204,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 0.8855475187301636,
"learning_rate": 0.0001875,
"loss": 3.342,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 0.9431908130645752,
"learning_rate": 0.00025,
"loss": 3.1768,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 1.0506539344787598,
"learning_rate": 0.0003125,
"loss": 3.0573,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4374067470023161,
"eval_loss": 3.0220537185668945,
"eval_runtime": 3.3384,
"eval_samples_per_second": 1345.248,
"eval_steps_per_second": 10.784,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.9601704478263855,
"learning_rate": 0.000375,
"loss": 2.9605,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.86359703540802,
"learning_rate": 0.00043750000000000006,
"loss": 2.8692,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.9397691488265991,
"learning_rate": 0.0005,
"loss": 2.8139,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8730801343917847,
"learning_rate": 0.0005625000000000001,
"loss": 2.7856,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.855320394039154,
"learning_rate": 0.000625,
"loss": 2.7465,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.7909379005432129,
"learning_rate": 0.0006875,
"loss": 2.7148,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4588617304600452,
"eval_loss": 2.7909772396087646,
"eval_runtime": 3.4393,
"eval_samples_per_second": 1305.804,
"eval_steps_per_second": 10.467,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.8202732801437378,
"learning_rate": 0.00075,
"loss": 2.6759,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.8122907280921936,
"learning_rate": 0.0008125,
"loss": 2.6193,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.7012184858322144,
"learning_rate": 0.0008750000000000001,
"loss": 2.6282,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.7719469666481018,
"learning_rate": 0.0009375,
"loss": 2.6113,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6816595196723938,
"learning_rate": 0.001,
"loss": 2.5932,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6456273794174194,
"learning_rate": 0.0010625,
"loss": 2.5912,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.46825584677666793,
"eval_loss": 2.6989104747772217,
"eval_runtime": 3.4692,
"eval_samples_per_second": 1294.527,
"eval_steps_per_second": 10.377,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6302840709686279,
"learning_rate": 0.0011250000000000001,
"loss": 2.5446,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.6132036447525024,
"learning_rate": 0.0011875,
"loss": 2.5258,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.6050586104393005,
"learning_rate": 0.00125,
"loss": 2.5124,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5358923673629761,
"learning_rate": 0.0013125,
"loss": 2.521,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.4991964101791382,
"learning_rate": 0.001375,
"loss": 2.5118,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.4726608097553253,
"learning_rate": 0.0014375000000000002,
"loss": 2.5153,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.47618772102385,
"eval_loss": 2.640246868133545,
"eval_runtime": 3.4098,
"eval_samples_per_second": 1317.084,
"eval_steps_per_second": 10.558,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.4862295091152191,
"learning_rate": 0.0015,
"loss": 2.468,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.4742026627063751,
"learning_rate": 0.0015625,
"loss": 2.4564,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.42348113656044006,
"learning_rate": 0.001625,
"loss": 2.4542,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.4266359210014343,
"learning_rate": 0.0016875,
"loss": 2.464,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.4484919309616089,
"learning_rate": 0.0017500000000000003,
"loss": 2.4581,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.3841446340084076,
"learning_rate": 0.0018124999999999999,
"loss": 2.4585,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.4799169642872797,
"eval_loss": 2.609379529953003,
"eval_runtime": 3.3919,
"eval_samples_per_second": 1324.054,
"eval_steps_per_second": 10.614,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.4488911032676697,
"learning_rate": 0.001875,
"loss": 2.4088,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.4177059829235077,
"learning_rate": 0.0019375000000000002,
"loss": 2.409,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.34312692284584045,
"learning_rate": 0.002,
"loss": 2.4161,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.3501368463039398,
"learning_rate": 0.0020625,
"loss": 2.4148,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.36941683292388916,
"learning_rate": 0.002125,
"loss": 2.4199,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.33993226289749146,
"learning_rate": 0.0021874999999999998,
"loss": 2.4202,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.48289404706175254,
"eval_loss": 2.5849053859710693,
"eval_runtime": 3.3568,
"eval_samples_per_second": 1337.875,
"eval_steps_per_second": 10.724,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.38930949568748474,
"learning_rate": 0.0022500000000000003,
"loss": 2.3482,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.33626502752304077,
"learning_rate": 0.0023125000000000003,
"loss": 2.3696,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.3184686303138733,
"learning_rate": 0.002375,
"loss": 2.3821,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.34031516313552856,
"learning_rate": 0.0024375,
"loss": 2.3775,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.34857431054115295,
"learning_rate": 0.0025,
"loss": 2.395,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.48452635805293875,
"eval_loss": 2.5702977180480957,
"eval_runtime": 3.3619,
"eval_samples_per_second": 1335.857,
"eval_steps_per_second": 10.708,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.3316744863986969,
"learning_rate": 0.0025625,
"loss": 2.3917,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.45487892627716064,
"learning_rate": 0.002625,
"loss": 2.3159,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.35938137769699097,
"learning_rate": 0.0026875000000000002,
"loss": 2.3368,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.29650333523750305,
"learning_rate": 0.00275,
"loss": 2.3457,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.35303518176078796,
"learning_rate": 0.0028125,
"loss": 2.3632,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.3135327100753784,
"learning_rate": 0.0028750000000000004,
"loss": 2.363,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.48588866271475584,
"eval_loss": 2.5576674938201904,
"eval_runtime": 3.3548,
"eval_samples_per_second": 1338.695,
"eval_steps_per_second": 10.731,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.3409130275249481,
"learning_rate": 0.0029375,
"loss": 2.3464,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.30268123745918274,
"learning_rate": 0.003,
"loss": 2.2887,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.35700365900993347,
"learning_rate": 0.002715909090909091,
"loss": 2.3084,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.3313594162464142,
"learning_rate": 0.0024318181818181817,
"loss": 2.2992,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.3504340648651123,
"learning_rate": 0.002147727272727273,
"loss": 2.2959,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.3072393536567688,
"learning_rate": 0.0018636363636363638,
"loss": 2.2878,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.4939870993079773,
"eval_loss": 2.509469509124756,
"eval_runtime": 3.3506,
"eval_samples_per_second": 1340.373,
"eval_steps_per_second": 10.744,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.323819637298584,
"learning_rate": 0.0015795454545454546,
"loss": 2.2377,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.30831000208854675,
"learning_rate": 0.0012954545454545456,
"loss": 2.1654,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.33950263261795044,
"learning_rate": 0.0010113636363636364,
"loss": 2.1686,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.33820563554763794,
"learning_rate": 0.0007272727272727273,
"loss": 2.1555,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.32013264298439026,
"learning_rate": 0.0004431818181818182,
"loss": 2.143,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.3225158452987671,
"learning_rate": 0.0001590909090909091,
"loss": 2.1407,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.49941703178886204,
"eval_loss": 2.4756977558135986,
"eval_runtime": 3.3496,
"eval_samples_per_second": 1340.777,
"eval_steps_per_second": 10.748,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5683612552496906,
"train_runtime": 751.927,
"train_samples_per_second": 622.999,
"train_steps_per_second": 38.94
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}