smolm-autoreg-bpe-seed_1102 / trainer_state.json
kanishka's picture
Upload folder using huggingface_hub
e60a32f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.801757574081421,
"learning_rate": 6.25e-05,
"loss": 6.1993,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 1.014549970626831,
"learning_rate": 0.000125,
"loss": 3.6289,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.0354454517364502,
"learning_rate": 0.0001875,
"loss": 3.3439,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 0.8663645386695862,
"learning_rate": 0.00025,
"loss": 3.1569,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.9717487692832947,
"learning_rate": 0.0003125,
"loss": 3.0413,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4380028648723519,
"eval_loss": 3.010645866394043,
"eval_runtime": 3.3126,
"eval_samples_per_second": 1355.734,
"eval_steps_per_second": 10.868,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.8488046526908875,
"learning_rate": 0.000375,
"loss": 2.9511,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.8335849046707153,
"learning_rate": 0.00043750000000000006,
"loss": 2.8601,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.8527885675430298,
"learning_rate": 0.0005,
"loss": 2.8103,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8196571469306946,
"learning_rate": 0.0005625000000000001,
"loss": 2.7757,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.7712026238441467,
"learning_rate": 0.000625,
"loss": 2.7326,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.7557732462882996,
"learning_rate": 0.0006875,
"loss": 2.706,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4614636096339661,
"eval_loss": 2.7756640911102295,
"eval_runtime": 3.3754,
"eval_samples_per_second": 1330.505,
"eval_steps_per_second": 10.665,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.7290401458740234,
"learning_rate": 0.00075,
"loss": 2.6636,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7280641794204712,
"learning_rate": 0.0008125,
"loss": 2.6292,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.7040073871612549,
"learning_rate": 0.0008750000000000001,
"loss": 2.6193,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.6477857828140259,
"learning_rate": 0.0009375,
"loss": 2.5962,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6744890213012695,
"learning_rate": 0.001,
"loss": 2.587,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.567164957523346,
"learning_rate": 0.0010625,
"loss": 2.5796,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.4708138937542627,
"eval_loss": 2.6893413066864014,
"eval_runtime": 3.4141,
"eval_samples_per_second": 1315.426,
"eval_steps_per_second": 10.544,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6116344332695007,
"learning_rate": 0.0011250000000000001,
"loss": 2.5478,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.6196800470352173,
"learning_rate": 0.0011875,
"loss": 2.5082,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5813851356506348,
"learning_rate": 0.00125,
"loss": 2.5135,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.510899543762207,
"learning_rate": 0.0013125,
"loss": 2.5229,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.4730781614780426,
"learning_rate": 0.001375,
"loss": 2.5106,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.4921322166919708,
"learning_rate": 0.0014375000000000002,
"loss": 2.5174,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.4766470824413481,
"eval_loss": 2.634284019470215,
"eval_runtime": 3.4269,
"eval_samples_per_second": 1310.502,
"eval_steps_per_second": 10.505,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.4425903558731079,
"learning_rate": 0.0015,
"loss": 2.4712,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.43356040120124817,
"learning_rate": 0.0015625,
"loss": 2.4604,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.4289402663707733,
"learning_rate": 0.001625,
"loss": 2.4623,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.4142000675201416,
"learning_rate": 0.0016875,
"loss": 2.4576,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.36214813590049744,
"learning_rate": 0.0017500000000000003,
"loss": 2.4678,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.365395188331604,
"learning_rate": 0.0018124999999999999,
"loss": 2.4609,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.47951370808107907,
"eval_loss": 2.6072192192077637,
"eval_runtime": 3.4065,
"eval_samples_per_second": 1318.36,
"eval_steps_per_second": 10.568,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.36923331022262573,
"learning_rate": 0.001875,
"loss": 2.4109,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.337181031703949,
"learning_rate": 0.0019375000000000002,
"loss": 2.4102,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.39215803146362305,
"learning_rate": 0.002,
"loss": 2.4135,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.35966119170188904,
"learning_rate": 0.0020625,
"loss": 2.4291,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.4037434458732605,
"learning_rate": 0.002125,
"loss": 2.4236,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.3071579039096832,
"learning_rate": 0.0021874999999999998,
"loss": 2.4299,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.4825889749753225,
"eval_loss": 2.585719108581543,
"eval_runtime": 3.4287,
"eval_samples_per_second": 1309.825,
"eval_steps_per_second": 10.5,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.3319147527217865,
"learning_rate": 0.0022500000000000003,
"loss": 2.3562,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.35867124795913696,
"learning_rate": 0.0023125000000000003,
"loss": 2.3702,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.3402807116508484,
"learning_rate": 0.002375,
"loss": 2.3783,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.33656784892082214,
"learning_rate": 0.0024375,
"loss": 2.3905,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.38668304681777954,
"learning_rate": 0.0025,
"loss": 2.3999,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.4859254817596698,
"eval_loss": 2.5638771057128906,
"eval_runtime": 3.4265,
"eval_samples_per_second": 1310.666,
"eval_steps_per_second": 10.506,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.2988421320915222,
"learning_rate": 0.0025625,
"loss": 2.4033,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.34959354996681213,
"learning_rate": 0.002625,
"loss": 2.3211,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.3704765737056732,
"learning_rate": 0.0026875000000000002,
"loss": 2.3397,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.3484719693660736,
"learning_rate": 0.00275,
"loss": 2.3566,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.3266860246658325,
"learning_rate": 0.0028125,
"loss": 2.3694,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.3507145941257477,
"learning_rate": 0.0028750000000000004,
"loss": 2.3645,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4874315560254367,
"eval_loss": 2.5552921295166016,
"eval_runtime": 3.3792,
"eval_samples_per_second": 1329.004,
"eval_steps_per_second": 10.653,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.3075660467147827,
"learning_rate": 0.0029375,
"loss": 2.3579,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.32643941044807434,
"learning_rate": 0.003,
"loss": 2.2952,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.31350839138031006,
"learning_rate": 0.002715909090909091,
"loss": 2.3125,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.36248230934143066,
"learning_rate": 0.0024318181818181817,
"loss": 2.3118,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.27529385685920715,
"learning_rate": 0.002147727272727273,
"loss": 2.3016,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.27803343534469604,
"learning_rate": 0.0018636363636363638,
"loss": 2.2938,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.4938678757339701,
"eval_loss": 2.5048580169677734,
"eval_runtime": 3.376,
"eval_samples_per_second": 1330.268,
"eval_steps_per_second": 10.663,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.33036813139915466,
"learning_rate": 0.0015795454545454546,
"loss": 2.2526,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.29800865054130554,
"learning_rate": 0.0012954545454545456,
"loss": 2.176,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.26307207345962524,
"learning_rate": 0.0010113636363636364,
"loss": 2.1712,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.2642175257205963,
"learning_rate": 0.0007272727272727273,
"loss": 2.1735,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.268196702003479,
"learning_rate": 0.0004431818181818182,
"loss": 2.1658,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.2505429983139038,
"learning_rate": 0.0001590909090909091,
"loss": 2.151,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.499566061256371,
"eval_loss": 2.4763824939727783,
"eval_runtime": 3.4206,
"eval_samples_per_second": 1312.914,
"eval_steps_per_second": 10.524,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5702311156225988,
"train_runtime": 763.0136,
"train_samples_per_second": 613.947,
"train_steps_per_second": 38.374
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}