smolm-autoreg-bpe-seed_666 / trainer_state.json
kanishka's picture
End of training
e405215 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.3569283485412598,
"learning_rate": 6.25e-05,
"loss": 6.1959,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.8884565830230713,
"learning_rate": 0.000125,
"loss": 3.6302,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 0.9340901374816895,
"learning_rate": 0.0001875,
"loss": 3.3395,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 1.026479721069336,
"learning_rate": 0.00025,
"loss": 3.1647,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.9679443836212158,
"learning_rate": 0.0003125,
"loss": 3.0568,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.43760837510541645,
"eval_loss": 3.0170083045959473,
"eval_runtime": 3.3233,
"eval_samples_per_second": 1351.351,
"eval_steps_per_second": 10.832,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.8825171589851379,
"learning_rate": 0.000375,
"loss": 2.9449,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.9031845927238464,
"learning_rate": 0.00043750000000000006,
"loss": 2.8664,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.8770526051521301,
"learning_rate": 0.0005,
"loss": 2.8193,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8624604940414429,
"learning_rate": 0.0005625000000000001,
"loss": 2.7658,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.8200684189796448,
"learning_rate": 0.000625,
"loss": 2.7358,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.8737301230430603,
"learning_rate": 0.0006875,
"loss": 2.7079,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4585023064501707,
"eval_loss": 2.7878565788269043,
"eval_runtime": 3.4124,
"eval_samples_per_second": 1316.077,
"eval_steps_per_second": 10.55,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.7897689342498779,
"learning_rate": 0.00075,
"loss": 2.6723,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.8149366974830627,
"learning_rate": 0.0008125,
"loss": 2.6196,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.684084951877594,
"learning_rate": 0.0008750000000000001,
"loss": 2.6148,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.6549258828163147,
"learning_rate": 0.0009375,
"loss": 2.6107,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.670656681060791,
"learning_rate": 0.001,
"loss": 2.5927,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6147797703742981,
"learning_rate": 0.0010625,
"loss": 2.5856,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.47057018674268924,
"eval_loss": 2.6923789978027344,
"eval_runtime": 3.3929,
"eval_samples_per_second": 1323.657,
"eval_steps_per_second": 10.61,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6165093779563904,
"learning_rate": 0.0011250000000000001,
"loss": 2.5405,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.5891627669334412,
"learning_rate": 0.0011875,
"loss": 2.513,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5683926939964294,
"learning_rate": 0.00125,
"loss": 2.5239,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.49277979135513306,
"learning_rate": 0.0013125,
"loss": 2.5101,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.5084903836250305,
"learning_rate": 0.001375,
"loss": 2.5176,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.4873659014701843,
"learning_rate": 0.0014375000000000002,
"loss": 2.5108,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.4762087604780865,
"eval_loss": 2.6403005123138428,
"eval_runtime": 3.4307,
"eval_samples_per_second": 1309.069,
"eval_steps_per_second": 10.494,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.47938045859336853,
"learning_rate": 0.0015,
"loss": 2.4624,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.4903472065925598,
"learning_rate": 0.0015625,
"loss": 2.4398,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.43617236614227295,
"learning_rate": 0.001625,
"loss": 2.4683,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.40969905257225037,
"learning_rate": 0.0016875,
"loss": 2.4612,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.367087185382843,
"learning_rate": 0.0017500000000000003,
"loss": 2.4706,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.37903687357902527,
"learning_rate": 0.0018124999999999999,
"loss": 2.4576,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.47962591850367403,
"eval_loss": 2.607229709625244,
"eval_runtime": 3.4265,
"eval_samples_per_second": 1310.668,
"eval_steps_per_second": 10.506,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.38673949241638184,
"learning_rate": 0.001875,
"loss": 2.4,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.38303470611572266,
"learning_rate": 0.0019375000000000002,
"loss": 2.3994,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.3417862057685852,
"learning_rate": 0.002,
"loss": 2.4168,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.34288668632507324,
"learning_rate": 0.0020625,
"loss": 2.4167,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.36294203996658325,
"learning_rate": 0.002125,
"loss": 2.4319,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.3338480293750763,
"learning_rate": 0.0021874999999999998,
"loss": 2.423,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.48246449153775617,
"eval_loss": 2.5827646255493164,
"eval_runtime": 3.3721,
"eval_samples_per_second": 1331.828,
"eval_steps_per_second": 10.676,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.34943464398384094,
"learning_rate": 0.0022500000000000003,
"loss": 2.3545,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.33251243829727173,
"learning_rate": 0.0023125000000000003,
"loss": 2.371,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.36209550499916077,
"learning_rate": 0.002375,
"loss": 2.3805,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.36579540371894836,
"learning_rate": 0.0024375,
"loss": 2.3914,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.3118489980697632,
"learning_rate": 0.0025,
"loss": 2.3921,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.48544858746364117,
"eval_loss": 2.566972017288208,
"eval_runtime": 3.3834,
"eval_samples_per_second": 1327.359,
"eval_steps_per_second": 10.64,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.33852943778038025,
"learning_rate": 0.0025625,
"loss": 2.3805,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.3410607874393463,
"learning_rate": 0.002625,
"loss": 2.3178,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.3233848810195923,
"learning_rate": 0.0026875000000000002,
"loss": 2.3326,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.3061616122722626,
"learning_rate": 0.00275,
"loss": 2.3472,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.3195803463459015,
"learning_rate": 0.0028125,
"loss": 2.3588,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.37057217955589294,
"learning_rate": 0.0028750000000000004,
"loss": 2.3602,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4870756385912683,
"eval_loss": 2.553264856338501,
"eval_runtime": 3.4098,
"eval_samples_per_second": 1317.103,
"eval_steps_per_second": 10.558,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.31338265538215637,
"learning_rate": 0.0029375,
"loss": 2.3546,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.3508692979812622,
"learning_rate": 0.003,
"loss": 2.2832,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.3429717421531677,
"learning_rate": 0.002715909090909091,
"loss": 2.3064,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.3300691545009613,
"learning_rate": 0.0024318181818181817,
"loss": 2.3012,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.31728777289390564,
"learning_rate": 0.002147727272727273,
"loss": 2.2953,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.29462775588035583,
"learning_rate": 0.0018636363636363638,
"loss": 2.2861,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.49417470110825323,
"eval_loss": 2.500990867614746,
"eval_runtime": 3.4136,
"eval_samples_per_second": 1315.633,
"eval_steps_per_second": 10.546,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.29570701718330383,
"learning_rate": 0.0015795454545454546,
"loss": 2.2492,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.3393871486186981,
"learning_rate": 0.0012954545454545456,
"loss": 2.1726,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.32914644479751587,
"learning_rate": 0.0010113636363636364,
"loss": 2.1655,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.2837465703487396,
"learning_rate": 0.0007272727272727273,
"loss": 2.1501,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.3447483479976654,
"learning_rate": 0.0004431818181818182,
"loss": 2.1465,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.30058619379997253,
"learning_rate": 0.0001590909090909091,
"loss": 2.1381,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.49974139004167567,
"eval_loss": 2.4754340648651123,
"eval_runtime": 3.3618,
"eval_samples_per_second": 1335.893,
"eval_steps_per_second": 10.709,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.566941033035028,
"train_runtime": 742.3412,
"train_samples_per_second": 631.044,
"train_steps_per_second": 39.443
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}