smolm-autoreg-bpe-seed_2309 / trainer_state.json
kanishka's picture
Upload folder using huggingface_hub
0b39194 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.091705322265625,
"learning_rate": 6.25e-05,
"loss": 6.1952,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.9577378034591675,
"learning_rate": 0.000125,
"loss": 3.6423,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.2023097276687622,
"learning_rate": 0.0001875,
"loss": 3.3344,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 1.0954350233078003,
"learning_rate": 0.00025,
"loss": 3.1657,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.900829553604126,
"learning_rate": 0.0003125,
"loss": 3.0551,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4362285375650689,
"eval_loss": 3.0203442573547363,
"eval_runtime": 3.2937,
"eval_samples_per_second": 1363.516,
"eval_steps_per_second": 10.93,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.9360588192939758,
"learning_rate": 0.000375,
"loss": 2.9474,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.8689121007919312,
"learning_rate": 0.00043750000000000006,
"loss": 2.8718,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.8661333918571472,
"learning_rate": 0.0005,
"loss": 2.815,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8203218579292297,
"learning_rate": 0.0005625000000000001,
"loss": 2.7786,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.8360357880592346,
"learning_rate": 0.000625,
"loss": 2.743,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.7822497487068176,
"learning_rate": 0.0006875,
"loss": 2.7076,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.45947012134505233,
"eval_loss": 2.7878334522247314,
"eval_runtime": 3.3426,
"eval_samples_per_second": 1343.555,
"eval_steps_per_second": 10.77,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.7363527417182922,
"learning_rate": 0.00075,
"loss": 2.681,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7337239384651184,
"learning_rate": 0.0008125,
"loss": 2.6194,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.7000368237495422,
"learning_rate": 0.0008750000000000001,
"loss": 2.6334,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.7184774279594421,
"learning_rate": 0.0009375,
"loss": 2.604,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6264050602912903,
"learning_rate": 0.001,
"loss": 2.6012,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6527601480484009,
"learning_rate": 0.0010625,
"loss": 2.5796,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.47054914728845265,
"eval_loss": 2.6938107013702393,
"eval_runtime": 3.3469,
"eval_samples_per_second": 1341.821,
"eval_steps_per_second": 10.756,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.651335597038269,
"learning_rate": 0.0011250000000000001,
"loss": 2.5461,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.6495278477668762,
"learning_rate": 0.0011875,
"loss": 2.5131,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5576895475387573,
"learning_rate": 0.00125,
"loss": 2.5184,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5400159955024719,
"learning_rate": 0.0013125,
"loss": 2.5233,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.516352117061615,
"learning_rate": 0.001375,
"loss": 2.5253,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.48890888690948486,
"learning_rate": 0.0014375000000000002,
"loss": 2.4976,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.47683643752947713,
"eval_loss": 2.6370015144348145,
"eval_runtime": 3.3506,
"eval_samples_per_second": 1340.337,
"eval_steps_per_second": 10.744,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.49809128046035767,
"learning_rate": 0.0015,
"loss": 2.4639,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.453357994556427,
"learning_rate": 0.0015625,
"loss": 2.4492,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.454194039106369,
"learning_rate": 0.001625,
"loss": 2.4562,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.4470754563808441,
"learning_rate": 0.0016875,
"loss": 2.455,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.3900794982910156,
"learning_rate": 0.0017500000000000003,
"loss": 2.464,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.3795139789581299,
"learning_rate": 0.0018124999999999999,
"loss": 2.4691,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.47998008264998937,
"eval_loss": 2.614906072616577,
"eval_runtime": 3.405,
"eval_samples_per_second": 1318.952,
"eval_steps_per_second": 10.573,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.40433719754219055,
"learning_rate": 0.001875,
"loss": 2.4061,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.3789573907852173,
"learning_rate": 0.0019375000000000002,
"loss": 2.4053,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.4039715826511383,
"learning_rate": 0.002,
"loss": 2.4111,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.39668989181518555,
"learning_rate": 0.0020625,
"loss": 2.4235,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.35224515199661255,
"learning_rate": 0.002125,
"loss": 2.4232,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.360273152589798,
"learning_rate": 0.0021874999999999998,
"loss": 2.421,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.4827081985493296,
"eval_loss": 2.5835256576538086,
"eval_runtime": 3.4064,
"eval_samples_per_second": 1318.419,
"eval_steps_per_second": 10.568,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.34586748480796814,
"learning_rate": 0.0022500000000000003,
"loss": 2.3503,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.4033253490924835,
"learning_rate": 0.0023125000000000003,
"loss": 2.3652,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.3413168489933014,
"learning_rate": 0.002375,
"loss": 2.3791,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.33606722950935364,
"learning_rate": 0.0024375,
"loss": 2.3848,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.31669604778289795,
"learning_rate": 0.0025,
"loss": 2.3918,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.4860113928644691,
"eval_loss": 2.5660340785980225,
"eval_runtime": 3.4098,
"eval_samples_per_second": 1317.099,
"eval_steps_per_second": 10.558,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.29715919494628906,
"learning_rate": 0.0025625,
"loss": 2.3943,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.44059836864471436,
"learning_rate": 0.002625,
"loss": 2.3054,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.33299922943115234,
"learning_rate": 0.0026875000000000002,
"loss": 2.3385,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.3407984673976898,
"learning_rate": 0.00275,
"loss": 2.3518,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.32114025950431824,
"learning_rate": 0.0028125,
"loss": 2.3566,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.3123725652694702,
"learning_rate": 0.0028750000000000004,
"loss": 2.3654,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4854293012972577,
"eval_loss": 2.5585596561431885,
"eval_runtime": 3.3383,
"eval_samples_per_second": 1345.276,
"eval_steps_per_second": 10.784,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.3411466181278229,
"learning_rate": 0.0029375,
"loss": 2.3512,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.35489559173583984,
"learning_rate": 0.003,
"loss": 2.2925,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.32344290614128113,
"learning_rate": 0.002715909090909091,
"loss": 2.3065,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.3349243104457855,
"learning_rate": 0.0024318181818181817,
"loss": 2.3006,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.32749468088150024,
"learning_rate": 0.002147727272727273,
"loss": 2.2903,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.29682689905166626,
"learning_rate": 0.0018636363636363638,
"loss": 2.2908,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.4947567926754647,
"eval_loss": 2.505155324935913,
"eval_runtime": 3.3638,
"eval_samples_per_second": 1335.116,
"eval_steps_per_second": 10.702,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.35732176899909973,
"learning_rate": 0.0015795454545454546,
"loss": 2.2388,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.3471290171146393,
"learning_rate": 0.0012954545454545456,
"loss": 2.1577,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.31934648752212524,
"learning_rate": 0.0010113636363636364,
"loss": 2.1666,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.3358185887336731,
"learning_rate": 0.0007272727272727273,
"loss": 2.1574,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.3271017074584961,
"learning_rate": 0.0004431818181818182,
"loss": 2.1576,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.333063006401062,
"learning_rate": 0.0001590909090909091,
"loss": 2.1465,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.4999026925241559,
"eval_loss": 2.4751529693603516,
"eval_runtime": 3.4017,
"eval_samples_per_second": 1320.234,
"eval_steps_per_second": 10.583,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5679440357646004,
"train_runtime": 768.0298,
"train_samples_per_second": 609.937,
"train_steps_per_second": 38.124
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}