smolm-autoreg-bpe-seed_1729 / trainer_state.json
kanishka's picture
Upload folder using huggingface_hub
7070836 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.1839450597763062,
"learning_rate": 6.25e-05,
"loss": 6.2404,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.9727169871330261,
"learning_rate": 0.000125,
"loss": 3.63,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.0313897132873535,
"learning_rate": 0.0001875,
"loss": 3.3461,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 1.0492364168167114,
"learning_rate": 0.00025,
"loss": 3.1763,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.9632183909416199,
"learning_rate": 0.0003125,
"loss": 3.0421,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.437592595514739,
"eval_loss": 3.014521598815918,
"eval_runtime": 3.2929,
"eval_samples_per_second": 1363.845,
"eval_steps_per_second": 10.933,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.8621485829353333,
"learning_rate": 0.000375,
"loss": 2.9614,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.8864262700080872,
"learning_rate": 0.00043750000000000006,
"loss": 2.8642,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.974511981010437,
"learning_rate": 0.0005,
"loss": 2.8239,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.843772828578949,
"learning_rate": 0.0005625000000000001,
"loss": 2.7889,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.8417291045188904,
"learning_rate": 0.000625,
"loss": 2.7435,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.7840189933776855,
"learning_rate": 0.0006875,
"loss": 2.7062,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.459005500063995,
"eval_loss": 2.790151596069336,
"eval_runtime": 3.3884,
"eval_samples_per_second": 1325.389,
"eval_steps_per_second": 10.624,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.7942721843719482,
"learning_rate": 0.00075,
"loss": 2.6608,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7657195925712585,
"learning_rate": 0.0008125,
"loss": 2.6336,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.6993751525878906,
"learning_rate": 0.0008750000000000001,
"loss": 2.6207,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.6740849614143372,
"learning_rate": 0.0009375,
"loss": 2.6097,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6975589990615845,
"learning_rate": 0.001,
"loss": 2.5888,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6254838705062866,
"learning_rate": 0.0010625,
"loss": 2.5829,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.46816818238401564,
"eval_loss": 2.6946029663085938,
"eval_runtime": 3.3593,
"eval_samples_per_second": 1336.889,
"eval_steps_per_second": 10.717,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6288382411003113,
"learning_rate": 0.0011250000000000001,
"loss": 2.5418,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.5383425354957581,
"learning_rate": 0.0011875,
"loss": 2.5137,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5171464681625366,
"learning_rate": 0.00125,
"loss": 2.5227,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5070509910583496,
"learning_rate": 0.0013125,
"loss": 2.5239,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.4720727801322937,
"learning_rate": 0.001375,
"loss": 2.5135,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.5138970017433167,
"learning_rate": 0.0014375000000000002,
"loss": 2.5042,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.47502003131372106,
"eval_loss": 2.644279956817627,
"eval_runtime": 3.3434,
"eval_samples_per_second": 1343.254,
"eval_steps_per_second": 10.768,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.499744713306427,
"learning_rate": 0.0015,
"loss": 2.4579,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.4518820345401764,
"learning_rate": 0.0015625,
"loss": 2.4445,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.4289190173149109,
"learning_rate": 0.001625,
"loss": 2.462,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.44824743270874023,
"learning_rate": 0.0016875,
"loss": 2.4688,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.4198445677757263,
"learning_rate": 0.0017500000000000003,
"loss": 2.4673,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.3872630298137665,
"learning_rate": 0.0018124999999999999,
"loss": 2.4588,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.4792787675087708,
"eval_loss": 2.6083338260650635,
"eval_runtime": 3.3503,
"eval_samples_per_second": 1340.489,
"eval_steps_per_second": 10.745,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.39676180481910706,
"learning_rate": 0.001875,
"loss": 2.4106,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.4363040626049042,
"learning_rate": 0.0019375000000000002,
"loss": 2.3922,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.41588276624679565,
"learning_rate": 0.002,
"loss": 2.4188,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.38543984293937683,
"learning_rate": 0.0020625,
"loss": 2.4218,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.35857653617858887,
"learning_rate": 0.002125,
"loss": 2.4239,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.331021249294281,
"learning_rate": 0.0021874999999999998,
"loss": 2.4252,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.48289229377389953,
"eval_loss": 2.5867631435394287,
"eval_runtime": 3.3509,
"eval_samples_per_second": 1340.236,
"eval_steps_per_second": 10.743,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.34858438372612,
"learning_rate": 0.0022500000000000003,
"loss": 2.3489,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.35453295707702637,
"learning_rate": 0.0023125000000000003,
"loss": 2.3836,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.3333473205566406,
"learning_rate": 0.002375,
"loss": 2.3823,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.34180307388305664,
"learning_rate": 0.0024375,
"loss": 2.3823,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.36889737844467163,
"learning_rate": 0.0025,
"loss": 2.3884,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.4853994954037559,
"eval_loss": 2.5670363903045654,
"eval_runtime": 3.3422,
"eval_samples_per_second": 1343.741,
"eval_steps_per_second": 10.771,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.34776511788368225,
"learning_rate": 0.0025625,
"loss": 2.3873,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.3375663161277771,
"learning_rate": 0.002625,
"loss": 2.3223,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.35632824897766113,
"learning_rate": 0.0026875000000000002,
"loss": 2.3341,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.34955427050590515,
"learning_rate": 0.00275,
"loss": 2.3471,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.33797112107276917,
"learning_rate": 0.0028125,
"loss": 2.3604,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.33891069889068604,
"learning_rate": 0.0028750000000000004,
"loss": 2.3624,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4855029393870856,
"eval_loss": 2.5581905841827393,
"eval_runtime": 3.415,
"eval_samples_per_second": 1315.097,
"eval_steps_per_second": 10.542,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.3400173783302307,
"learning_rate": 0.0029375,
"loss": 2.3493,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.37537550926208496,
"learning_rate": 0.003,
"loss": 2.2908,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.31209731101989746,
"learning_rate": 0.002715909090909091,
"loss": 2.3134,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.30068129301071167,
"learning_rate": 0.0024318181818181817,
"loss": 2.3028,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.30200621485710144,
"learning_rate": 0.002147727272727273,
"loss": 2.3009,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.3131857216358185,
"learning_rate": 0.0018636363636363638,
"loss": 2.2859,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.494355289757117,
"eval_loss": 2.501802921295166,
"eval_runtime": 3.3486,
"eval_samples_per_second": 1341.16,
"eval_steps_per_second": 10.751,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.3251318633556366,
"learning_rate": 0.0015795454545454546,
"loss": 2.2396,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.32136398553848267,
"learning_rate": 0.0012954545454545456,
"loss": 2.1712,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.2987048625946045,
"learning_rate": 0.0010113636363636364,
"loss": 2.1659,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.34757348895072937,
"learning_rate": 0.0007272727272727273,
"loss": 2.1607,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.2865099012851715,
"learning_rate": 0.0004431818181818182,
"loss": 2.1531,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.3215331733226776,
"learning_rate": 0.0001590909090909091,
"loss": 2.1433,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.49926975560920617,
"eval_loss": 2.47407603263855,
"eval_runtime": 3.3548,
"eval_samples_per_second": 1338.689,
"eval_steps_per_second": 10.731,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5694013522622363,
"train_runtime": 748.563,
"train_samples_per_second": 625.799,
"train_steps_per_second": 39.115
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}