smolm-autoreg-bpe-seed_888 / trainer_state.json
kanishka's picture
End of training
1edb157 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.4338148832321167,
"learning_rate": 6.25e-05,
"loss": 6.2141,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.9016950130462646,
"learning_rate": 0.000125,
"loss": 3.618,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.043670892715454,
"learning_rate": 0.0001875,
"loss": 3.3456,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 0.9938444495201111,
"learning_rate": 0.00025,
"loss": 3.1599,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.9072784781455994,
"learning_rate": 0.0003125,
"loss": 3.0458,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4357148242241263,
"eval_loss": 3.02449631690979,
"eval_runtime": 3.3172,
"eval_samples_per_second": 1353.848,
"eval_steps_per_second": 10.852,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 1.00283682346344,
"learning_rate": 0.000375,
"loss": 2.9549,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.9476096034049988,
"learning_rate": 0.00043750000000000006,
"loss": 2.8709,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.8852096199989319,
"learning_rate": 0.0005,
"loss": 2.8262,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8597184419631958,
"learning_rate": 0.0005625000000000001,
"loss": 2.782,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.7898221015930176,
"learning_rate": 0.000625,
"loss": 2.7496,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.7760418653488159,
"learning_rate": 0.0006875,
"loss": 2.71,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4585250991922603,
"eval_loss": 2.788106679916382,
"eval_runtime": 3.4209,
"eval_samples_per_second": 1312.812,
"eval_steps_per_second": 10.524,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.7942250967025757,
"learning_rate": 0.00075,
"loss": 2.6615,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7591288685798645,
"learning_rate": 0.0008125,
"loss": 2.6299,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.8481829762458801,
"learning_rate": 0.0008750000000000001,
"loss": 2.6236,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.6588869094848633,
"learning_rate": 0.0009375,
"loss": 2.6021,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6927358508110046,
"learning_rate": 0.001,
"loss": 2.5995,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6657132506370544,
"learning_rate": 0.0010625,
"loss": 2.5918,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.4682225343074601,
"eval_loss": 2.6924259662628174,
"eval_runtime": 3.4436,
"eval_samples_per_second": 1304.177,
"eval_steps_per_second": 10.454,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6745225787162781,
"learning_rate": 0.0011250000000000001,
"loss": 2.5379,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.585035502910614,
"learning_rate": 0.0011875,
"loss": 2.5163,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5931078791618347,
"learning_rate": 0.00125,
"loss": 2.5212,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.520709216594696,
"learning_rate": 0.0013125,
"loss": 2.5111,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.5062893629074097,
"learning_rate": 0.001375,
"loss": 2.5212,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.4873392879962921,
"learning_rate": 0.0014375000000000002,
"loss": 2.5122,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.4758668693467425,
"eval_loss": 2.6471331119537354,
"eval_runtime": 3.4093,
"eval_samples_per_second": 1317.297,
"eval_steps_per_second": 10.559,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.44189488887786865,
"learning_rate": 0.0015,
"loss": 2.4578,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.4324429929256439,
"learning_rate": 0.0015625,
"loss": 2.4466,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.47547757625579834,
"learning_rate": 0.001625,
"loss": 2.456,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.42384257912635803,
"learning_rate": 0.0016875,
"loss": 2.458,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.39622461795806885,
"learning_rate": 0.0017500000000000003,
"loss": 2.466,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.42361584305763245,
"learning_rate": 0.0018124999999999999,
"loss": 2.4623,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.48033600008415783,
"eval_loss": 2.6053078174591064,
"eval_runtime": 3.4538,
"eval_samples_per_second": 1300.288,
"eval_steps_per_second": 10.423,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.3994704782962799,
"learning_rate": 0.001875,
"loss": 2.4137,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.4023682177066803,
"learning_rate": 0.0019375000000000002,
"loss": 2.3975,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.35793477296829224,
"learning_rate": 0.002,
"loss": 2.406,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.38212522864341736,
"learning_rate": 0.0020625,
"loss": 2.4189,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.38084688782691956,
"learning_rate": 0.002125,
"loss": 2.4184,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.34888383746147156,
"learning_rate": 0.0021874999999999998,
"loss": 2.4246,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.4823645541301325,
"eval_loss": 2.5798158645629883,
"eval_runtime": 3.4334,
"eval_samples_per_second": 1308.029,
"eval_steps_per_second": 10.485,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.3350234925746918,
"learning_rate": 0.0022500000000000003,
"loss": 2.3509,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.33967897295951843,
"learning_rate": 0.0023125000000000003,
"loss": 2.3669,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.3444729447364807,
"learning_rate": 0.002375,
"loss": 2.3877,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.3789096176624298,
"learning_rate": 0.0024375,
"loss": 2.384,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.3346331715583801,
"learning_rate": 0.0025,
"loss": 2.3871,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.48582729763989924,
"eval_loss": 2.564739227294922,
"eval_runtime": 3.4251,
"eval_samples_per_second": 1311.202,
"eval_steps_per_second": 10.511,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.31661033630371094,
"learning_rate": 0.0025625,
"loss": 2.3879,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.324239045381546,
"learning_rate": 0.002625,
"loss": 2.3191,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.3616074323654175,
"learning_rate": 0.0026875000000000002,
"loss": 2.3356,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.37798503041267395,
"learning_rate": 0.00275,
"loss": 2.3425,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.3979448974132538,
"learning_rate": 0.0028125,
"loss": 2.3573,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.31257864832878113,
"learning_rate": 0.0028750000000000004,
"loss": 2.3644,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.48534514348031144,
"eval_loss": 2.5570876598358154,
"eval_runtime": 3.4172,
"eval_samples_per_second": 1314.225,
"eval_steps_per_second": 10.535,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.2986924648284912,
"learning_rate": 0.0029375,
"loss": 2.3461,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.32609128952026367,
"learning_rate": 0.003,
"loss": 2.2927,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.3273513615131378,
"learning_rate": 0.002715909090909091,
"loss": 2.3094,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.321181982755661,
"learning_rate": 0.0024318181818181817,
"loss": 2.3011,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.3118550181388855,
"learning_rate": 0.002147727272727273,
"loss": 2.2864,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.30095553398132324,
"learning_rate": 0.0018636363636363638,
"loss": 2.2824,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.49336643540799885,
"eval_loss": 2.5034055709838867,
"eval_runtime": 3.4213,
"eval_samples_per_second": 1312.663,
"eval_steps_per_second": 10.522,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.3036137819290161,
"learning_rate": 0.0015795454545454546,
"loss": 2.2438,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.32144156098365784,
"learning_rate": 0.0012954545454545456,
"loss": 2.1689,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.3083929717540741,
"learning_rate": 0.0010113636363636364,
"loss": 2.162,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.3136695325374603,
"learning_rate": 0.0007272727272727273,
"loss": 2.1547,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.3204050362110138,
"learning_rate": 0.0004431818181818182,
"loss": 2.1479,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.3189040422439575,
"learning_rate": 0.0001590909090909091,
"loss": 2.1369,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.4999763306139839,
"eval_loss": 2.471221446990967,
"eval_runtime": 3.3986,
"eval_samples_per_second": 1321.414,
"eval_steps_per_second": 10.592,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.567346385267914,
"train_runtime": 749.2453,
"train_samples_per_second": 625.229,
"train_steps_per_second": 39.079
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}