smolm-autoreg-bpe-seed_496 / trainer_state.json
kanishka's picture
Upload folder using huggingface_hub
f814c8b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.2976419925689697,
"learning_rate": 6.25e-05,
"loss": 6.1408,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.9176012277603149,
"learning_rate": 0.000125,
"loss": 3.6186,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.0713545083999634,
"learning_rate": 0.0001875,
"loss": 3.337,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 0.9314835667610168,
"learning_rate": 0.00025,
"loss": 3.1944,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 1.087267279624939,
"learning_rate": 0.0003125,
"loss": 3.0603,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4366931588461262,
"eval_loss": 3.025468111038208,
"eval_runtime": 3.375,
"eval_samples_per_second": 1330.682,
"eval_steps_per_second": 10.667,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.9429690837860107,
"learning_rate": 0.000375,
"loss": 2.9476,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.950432300567627,
"learning_rate": 0.00043750000000000006,
"loss": 2.8889,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.9229320287704468,
"learning_rate": 0.0005,
"loss": 2.8218,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.844063937664032,
"learning_rate": 0.0005625000000000001,
"loss": 2.777,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.8429157137870789,
"learning_rate": 0.000625,
"loss": 2.7377,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.8256997466087341,
"learning_rate": 0.0006875,
"loss": 2.7088,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4579552806400202,
"eval_loss": 2.78731632232666,
"eval_runtime": 3.4275,
"eval_samples_per_second": 1310.267,
"eval_steps_per_second": 10.503,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.7644203901290894,
"learning_rate": 0.00075,
"loss": 2.6787,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.8065159320831299,
"learning_rate": 0.0008125,
"loss": 2.6291,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.7839713096618652,
"learning_rate": 0.0008750000000000001,
"loss": 2.6217,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.7241440415382385,
"learning_rate": 0.0009375,
"loss": 2.6003,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.7345842719078064,
"learning_rate": 0.001,
"loss": 2.6077,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.670850396156311,
"learning_rate": 0.0010625,
"loss": 2.586,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.4688046258746715,
"eval_loss": 2.695575714111328,
"eval_runtime": 3.4412,
"eval_samples_per_second": 1305.068,
"eval_steps_per_second": 10.461,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6207906603813171,
"learning_rate": 0.0011250000000000001,
"loss": 2.5489,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.5981828570365906,
"learning_rate": 0.0011875,
"loss": 2.5196,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.593421995639801,
"learning_rate": 0.00125,
"loss": 2.5176,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5348471999168396,
"learning_rate": 0.0013125,
"loss": 2.5265,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.5802651047706604,
"learning_rate": 0.001375,
"loss": 2.5181,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.4744044542312622,
"learning_rate": 0.0014375000000000002,
"loss": 2.5037,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.4771555359187316,
"eval_loss": 2.6362032890319824,
"eval_runtime": 3.3838,
"eval_samples_per_second": 1327.204,
"eval_steps_per_second": 10.639,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.5075201392173767,
"learning_rate": 0.0015,
"loss": 2.4663,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.47369861602783203,
"learning_rate": 0.0015625,
"loss": 2.4459,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.41244271397590637,
"learning_rate": 0.001625,
"loss": 2.4541,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.4284743070602417,
"learning_rate": 0.0016875,
"loss": 2.4717,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.4499437212944031,
"learning_rate": 0.0017500000000000003,
"loss": 2.4669,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.36233699321746826,
"learning_rate": 0.0018124999999999999,
"loss": 2.466,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.4786510904573802,
"eval_loss": 2.612313985824585,
"eval_runtime": 3.4422,
"eval_samples_per_second": 1304.691,
"eval_steps_per_second": 10.458,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.444653183221817,
"learning_rate": 0.001875,
"loss": 2.405,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.41395285725593567,
"learning_rate": 0.0019375000000000002,
"loss": 2.4002,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.38018137216567993,
"learning_rate": 0.002,
"loss": 2.4142,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.3837694823741913,
"learning_rate": 0.0020625,
"loss": 2.4187,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.36019667983055115,
"learning_rate": 0.002125,
"loss": 2.4364,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.317461222410202,
"learning_rate": 0.0021874999999999998,
"loss": 2.4203,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.48276605704848014,
"eval_loss": 2.587817430496216,
"eval_runtime": 3.4189,
"eval_samples_per_second": 1313.564,
"eval_steps_per_second": 10.53,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.32134848833084106,
"learning_rate": 0.0022500000000000003,
"loss": 2.3565,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.36488619446754456,
"learning_rate": 0.0023125000000000003,
"loss": 2.3698,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.348846435546875,
"learning_rate": 0.002375,
"loss": 2.3797,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.400886207818985,
"learning_rate": 0.0024375,
"loss": 2.3919,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.32355600595474243,
"learning_rate": 0.0025,
"loss": 2.3871,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.48550644596279174,
"eval_loss": 2.569087505340576,
"eval_runtime": 3.425,
"eval_samples_per_second": 1311.234,
"eval_steps_per_second": 10.511,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.3506123423576355,
"learning_rate": 0.0025625,
"loss": 2.3935,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.3607357144355774,
"learning_rate": 0.002625,
"loss": 2.3145,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.35818299651145935,
"learning_rate": 0.0026875000000000002,
"loss": 2.3333,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.33337295055389404,
"learning_rate": 0.00275,
"loss": 2.3555,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.36606958508491516,
"learning_rate": 0.0028125,
"loss": 2.3567,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.3098103404045105,
"learning_rate": 0.0028750000000000004,
"loss": 2.367,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4879943614262646,
"eval_loss": 2.556701183319092,
"eval_runtime": 3.419,
"eval_samples_per_second": 1313.536,
"eval_steps_per_second": 10.529,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.36782288551330566,
"learning_rate": 0.0029375,
"loss": 2.3522,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.36634373664855957,
"learning_rate": 0.003,
"loss": 2.3004,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.32430076599121094,
"learning_rate": 0.002715909090909091,
"loss": 2.304,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.31956982612609863,
"learning_rate": 0.0024318181818181817,
"loss": 2.2997,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.3134121596813202,
"learning_rate": 0.002147727272727273,
"loss": 2.3008,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.3530263900756836,
"learning_rate": 0.0018636363636363638,
"loss": 2.2871,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.49414840179045755,
"eval_loss": 2.502568483352661,
"eval_runtime": 3.3962,
"eval_samples_per_second": 1322.366,
"eval_steps_per_second": 10.6,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.33767953515052795,
"learning_rate": 0.0015795454545454546,
"loss": 2.2403,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.28383320569992065,
"learning_rate": 0.0012954545454545456,
"loss": 2.1669,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.32172703742980957,
"learning_rate": 0.0010113636363636364,
"loss": 2.1702,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.3210740387439728,
"learning_rate": 0.0007272727272727273,
"loss": 2.1621,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.33859506249427795,
"learning_rate": 0.0004431818181818182,
"loss": 2.1613,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.2973349690437317,
"learning_rate": 0.0001590909090909091,
"loss": 2.1368,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.499527488923604,
"eval_loss": 2.4752440452575684,
"eval_runtime": 3.3565,
"eval_samples_per_second": 1338.013,
"eval_steps_per_second": 10.726,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5689867467828136,
"train_runtime": 745.5496,
"train_samples_per_second": 628.328,
"train_steps_per_second": 39.273
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}