smolm-autoreg-bpe-seed_444 / trainer_state.json
kanishka's picture
End of training
594dc88 verified
raw
history blame
No virus
12 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.3195292949676514,
"learning_rate": 6.25e-05,
"loss": 6.1984,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.8932374119758606,
"learning_rate": 0.000125,
"loss": 3.6328,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.1820354461669922,
"learning_rate": 0.0001875,
"loss": 3.332,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 1.031574010848999,
"learning_rate": 0.00025,
"loss": 3.1653,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.942737340927124,
"learning_rate": 0.0003125,
"loss": 3.0639,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.4379818254181153,
"eval_loss": 3.0180583000183105,
"eval_runtime": 3.3254,
"eval_samples_per_second": 1350.527,
"eval_steps_per_second": 10.826,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.9794430732727051,
"learning_rate": 0.000375,
"loss": 2.941,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.9245285987854004,
"learning_rate": 0.00043750000000000006,
"loss": 2.8816,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.8375250697135925,
"learning_rate": 0.0005,
"loss": 2.8172,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8608168959617615,
"learning_rate": 0.0005625000000000001,
"loss": 2.7785,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.875029981136322,
"learning_rate": 0.000625,
"loss": 2.7348,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.8714506030082703,
"learning_rate": 0.0006875,
"loss": 2.7091,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.45751871196461164,
"eval_loss": 2.788527011871338,
"eval_runtime": 3.3539,
"eval_samples_per_second": 1339.048,
"eval_steps_per_second": 10.734,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.8536949753761292,
"learning_rate": 0.00075,
"loss": 2.6749,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7690100073814392,
"learning_rate": 0.0008125,
"loss": 2.622,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.7544898986816406,
"learning_rate": 0.0008750000000000001,
"loss": 2.6122,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.7384330034255981,
"learning_rate": 0.0009375,
"loss": 2.6138,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.760734498500824,
"learning_rate": 0.001,
"loss": 2.5969,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6459075808525085,
"learning_rate": 0.0010625,
"loss": 2.5802,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.471204876945492,
"eval_loss": 2.6925525665283203,
"eval_runtime": 3.4091,
"eval_samples_per_second": 1317.365,
"eval_steps_per_second": 10.56,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6531331539154053,
"learning_rate": 0.0011250000000000001,
"loss": 2.552,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.5972659587860107,
"learning_rate": 0.0011875,
"loss": 2.5069,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5470172762870789,
"learning_rate": 0.00125,
"loss": 2.5182,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5466737747192383,
"learning_rate": 0.0013125,
"loss": 2.5224,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.511480987071991,
"learning_rate": 0.001375,
"loss": 2.515,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.5196664929389954,
"learning_rate": 0.0014375000000000002,
"loss": 2.5132,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.4760264185413697,
"eval_loss": 2.6402881145477295,
"eval_runtime": 3.418,
"eval_samples_per_second": 1313.932,
"eval_steps_per_second": 10.533,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.4554392695426941,
"learning_rate": 0.0015,
"loss": 2.4592,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.4852846562862396,
"learning_rate": 0.0015625,
"loss": 2.4524,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.445295125246048,
"learning_rate": 0.001625,
"loss": 2.4504,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.47399041056632996,
"learning_rate": 0.0016875,
"loss": 2.4624,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.3997687101364136,
"learning_rate": 0.0017500000000000003,
"loss": 2.4596,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.3656143248081207,
"learning_rate": 0.0018124999999999999,
"loss": 2.4661,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.4783635512494806,
"eval_loss": 2.6108150482177734,
"eval_runtime": 3.3988,
"eval_samples_per_second": 1321.358,
"eval_steps_per_second": 10.592,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.4261285364627838,
"learning_rate": 0.001875,
"loss": 2.4039,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.3942112624645233,
"learning_rate": 0.0019375000000000002,
"loss": 2.4014,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.4000963270664215,
"learning_rate": 0.002,
"loss": 2.4071,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.3776912987232208,
"learning_rate": 0.0020625,
"loss": 2.4142,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.3140333294868469,
"learning_rate": 0.002125,
"loss": 2.4253,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.3446044325828552,
"learning_rate": 0.0021874999999999998,
"loss": 2.4316,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.48305885611993893,
"eval_loss": 2.5828359127044678,
"eval_runtime": 3.4134,
"eval_samples_per_second": 1315.703,
"eval_steps_per_second": 10.547,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.327090322971344,
"learning_rate": 0.0022500000000000003,
"loss": 2.3465,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.3468206822872162,
"learning_rate": 0.0023125000000000003,
"loss": 2.3651,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.3663696050643921,
"learning_rate": 0.002375,
"loss": 2.3794,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.2861100137233734,
"learning_rate": 0.0024375,
"loss": 2.3947,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.3619392514228821,
"learning_rate": 0.0025,
"loss": 2.3891,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.48500149906111434,
"eval_loss": 2.5699071884155273,
"eval_runtime": 3.4157,
"eval_samples_per_second": 1314.807,
"eval_steps_per_second": 10.54,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.3399389386177063,
"learning_rate": 0.0025625,
"loss": 2.387,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.29669496417045593,
"learning_rate": 0.002625,
"loss": 2.3106,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.3277042508125305,
"learning_rate": 0.0026875000000000002,
"loss": 2.3308,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.2957574427127838,
"learning_rate": 0.00275,
"loss": 2.3548,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.29501375555992126,
"learning_rate": 0.0028125,
"loss": 2.3511,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.29575708508491516,
"learning_rate": 0.0028750000000000004,
"loss": 2.3599,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.48726324039154423,
"eval_loss": 2.5551772117614746,
"eval_runtime": 3.3869,
"eval_samples_per_second": 1325.979,
"eval_steps_per_second": 10.629,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.2991016209125519,
"learning_rate": 0.0029375,
"loss": 2.3489,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.332133412361145,
"learning_rate": 0.003,
"loss": 2.2938,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.3354604244232178,
"learning_rate": 0.002715909090909091,
"loss": 2.3065,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.32910189032554626,
"learning_rate": 0.0024318181818181817,
"loss": 2.2954,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.3498801589012146,
"learning_rate": 0.002147727272727273,
"loss": 2.2956,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.3250719904899597,
"learning_rate": 0.0018636363636363638,
"loss": 2.2821,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.494129115624074,
"eval_loss": 2.501997470855713,
"eval_runtime": 3.3639,
"eval_samples_per_second": 1335.064,
"eval_steps_per_second": 10.702,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.3215320408344269,
"learning_rate": 0.0015795454545454546,
"loss": 2.244,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.275278240442276,
"learning_rate": 0.0012954545454545456,
"loss": 2.1635,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.33266499638557434,
"learning_rate": 0.0010113636363636364,
"loss": 2.1561,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.3034181594848633,
"learning_rate": 0.0007272727272727273,
"loss": 2.1569,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.32139885425567627,
"learning_rate": 0.0004431818181818182,
"loss": 2.1493,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.2935737073421478,
"learning_rate": 0.0001590909090909091,
"loss": 2.1425,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.4995467750899875,
"eval_loss": 2.4751720428466797,
"eval_runtime": 3.364,
"eval_samples_per_second": 1335.021,
"eval_steps_per_second": 10.702,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5668444753344595,
"train_runtime": 741.451,
"train_samples_per_second": 631.802,
"train_steps_per_second": 39.49
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}