smolm-autoreg-bpe-seed_1709 / trainer_state.json
kanishka's picture
End of training
8f282fc verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.1469449996948242,
"learning_rate": 6.25e-05,
"loss": 6.159,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 1.0157864093780518,
"learning_rate": 0.000125,
"loss": 3.6138,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.1782385110855103,
"learning_rate": 0.0001875,
"loss": 3.3436,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 1.0569684505462646,
"learning_rate": 0.00025,
"loss": 3.1723,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.9641144275665283,
"learning_rate": 0.0003125,
"loss": 3.0397,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4385586571217676,
"eval_loss": 3.0116729736328125,
"eval_runtime": 3.3975,
"eval_samples_per_second": 1321.86,
"eval_steps_per_second": 10.596,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.9262954592704773,
"learning_rate": 0.000375,
"loss": 2.9523,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.8675119876861572,
"learning_rate": 0.00043750000000000006,
"loss": 2.86,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.9038915038108826,
"learning_rate": 0.0005,
"loss": 2.8063,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8369466066360474,
"learning_rate": 0.0005625000000000001,
"loss": 2.7738,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.7954362630844116,
"learning_rate": 0.000625,
"loss": 2.7437,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.7949953675270081,
"learning_rate": 0.0006875,
"loss": 2.7146,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.45942979572443227,
"eval_loss": 2.777545690536499,
"eval_runtime": 3.4218,
"eval_samples_per_second": 1312.463,
"eval_steps_per_second": 10.521,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.8254726529121399,
"learning_rate": 0.00075,
"loss": 2.6703,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7527750730514526,
"learning_rate": 0.0008125,
"loss": 2.6175,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.7464008331298828,
"learning_rate": 0.0008750000000000001,
"loss": 2.6076,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.749565064907074,
"learning_rate": 0.0009375,
"loss": 2.6061,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6759158968925476,
"learning_rate": 0.001,
"loss": 2.5896,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6443790793418884,
"learning_rate": 0.0010625,
"loss": 2.5805,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.47120838352119815,
"eval_loss": 2.6823782920837402,
"eval_runtime": 3.3769,
"eval_samples_per_second": 1329.919,
"eval_steps_per_second": 10.661,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.599865734577179,
"learning_rate": 0.0011250000000000001,
"loss": 2.5413,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.6362124085426331,
"learning_rate": 0.0011875,
"loss": 2.5078,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5414631962776184,
"learning_rate": 0.00125,
"loss": 2.5172,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5505661368370056,
"learning_rate": 0.0013125,
"loss": 2.5145,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.5100904107093811,
"learning_rate": 0.001375,
"loss": 2.511,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.5121994614601135,
"learning_rate": 0.0014375000000000002,
"loss": 2.5051,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.47740274950601114,
"eval_loss": 2.6317946910858154,
"eval_runtime": 3.4238,
"eval_samples_per_second": 1311.689,
"eval_steps_per_second": 10.515,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.44310715794563293,
"learning_rate": 0.0015,
"loss": 2.4617,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.4380172789096832,
"learning_rate": 0.0015625,
"loss": 2.4573,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.46541547775268555,
"learning_rate": 0.001625,
"loss": 2.4615,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.4405967891216278,
"learning_rate": 0.0016875,
"loss": 2.4486,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.3662794828414917,
"learning_rate": 0.0017500000000000003,
"loss": 2.4608,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.4047889709472656,
"learning_rate": 0.0018124999999999999,
"loss": 2.4548,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.4802483356915055,
"eval_loss": 2.6045305728912354,
"eval_runtime": 3.4368,
"eval_samples_per_second": 1306.728,
"eval_steps_per_second": 10.475,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.39033982157707214,
"learning_rate": 0.001875,
"loss": 2.3964,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.4250138998031616,
"learning_rate": 0.0019375000000000002,
"loss": 2.4074,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.3768446445465088,
"learning_rate": 0.002,
"loss": 2.4077,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.42265185713768005,
"learning_rate": 0.0020625,
"loss": 2.4189,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.350845068693161,
"learning_rate": 0.002125,
"loss": 2.43,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.3043559789657593,
"learning_rate": 0.0021874999999999998,
"loss": 2.42,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.4842475852843044,
"eval_loss": 2.577181577682495,
"eval_runtime": 3.398,
"eval_samples_per_second": 1321.679,
"eval_steps_per_second": 10.595,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.33819225430488586,
"learning_rate": 0.0022500000000000003,
"loss": 2.3613,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.3696514368057251,
"learning_rate": 0.0023125000000000003,
"loss": 2.363,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.331849068403244,
"learning_rate": 0.002375,
"loss": 2.3793,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.31926459074020386,
"learning_rate": 0.0024375,
"loss": 2.3856,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.36476558446884155,
"learning_rate": 0.0025,
"loss": 2.3902,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.485290791556867,
"eval_loss": 2.566494941711426,
"eval_runtime": 3.4233,
"eval_samples_per_second": 1311.904,
"eval_steps_per_second": 10.516,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.28605133295059204,
"learning_rate": 0.0025625,
"loss": 2.3866,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.3234470784664154,
"learning_rate": 0.002625,
"loss": 2.3149,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.37032100558280945,
"learning_rate": 0.0026875000000000002,
"loss": 2.3354,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.30851054191589355,
"learning_rate": 0.00275,
"loss": 2.3576,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.34853699803352356,
"learning_rate": 0.0028125,
"loss": 2.362,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.3221457898616791,
"learning_rate": 0.0028750000000000004,
"loss": 2.357,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.48660751073450487,
"eval_loss": 2.559926748275757,
"eval_runtime": 3.4215,
"eval_samples_per_second": 1312.576,
"eval_steps_per_second": 10.522,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.3174827992916107,
"learning_rate": 0.0029375,
"loss": 2.3456,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.305833637714386,
"learning_rate": 0.003,
"loss": 2.2982,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.3276754915714264,
"learning_rate": 0.002715909090909091,
"loss": 2.3006,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.26479214429855347,
"learning_rate": 0.0024318181818181817,
"loss": 2.3056,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.36134952306747437,
"learning_rate": 0.002147727272727273,
"loss": 2.2974,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.3356594145298004,
"learning_rate": 0.0018636363636363638,
"loss": 2.2797,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.4931981197741064,
"eval_loss": 2.5077412128448486,
"eval_runtime": 3.3528,
"eval_samples_per_second": 1339.467,
"eval_steps_per_second": 10.737,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.2849366068840027,
"learning_rate": 0.0015795454545454546,
"loss": 2.2413,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.28835079073905945,
"learning_rate": 0.0012954545454545456,
"loss": 2.1688,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.3054426610469818,
"learning_rate": 0.0010113636363636364,
"loss": 2.1665,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.295037180185318,
"learning_rate": 0.0007272727272727273,
"loss": 2.1614,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.30976712703704834,
"learning_rate": 0.0004431818181818182,
"loss": 2.1518,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.2766718864440918,
"learning_rate": 0.0001590909090909091,
"loss": 2.1365,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.4998798997820663,
"eval_loss": 2.475207805633545,
"eval_runtime": 3.4298,
"eval_samples_per_second": 1309.392,
"eval_steps_per_second": 10.496,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5651781655400177,
"train_runtime": 742.1667,
"train_samples_per_second": 631.192,
"train_steps_per_second": 39.452
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}