smolm-autoreg-bpe-seed_394 / trainer_state.json
venkatasg's picture
End of training
eccbc9c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.2606134414672852,
"learning_rate": 6.25e-05,
"loss": 6.1193,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 1.0857479572296143,
"learning_rate": 0.000125,
"loss": 3.6291,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 0.9009315967559814,
"learning_rate": 0.0001875,
"loss": 3.3499,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 1.0317490100860596,
"learning_rate": 0.00025,
"loss": 3.1662,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.9014867544174194,
"learning_rate": 0.0003125,
"loss": 3.0436,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.43863580178730166,
"eval_loss": 3.012347936630249,
"eval_runtime": 3.3529,
"eval_samples_per_second": 1339.456,
"eval_steps_per_second": 10.737,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.8881410956382751,
"learning_rate": 0.000375,
"loss": 2.9386,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.9484242796897888,
"learning_rate": 0.00043750000000000006,
"loss": 2.8566,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.8347967863082886,
"learning_rate": 0.0005,
"loss": 2.8143,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8291537761688232,
"learning_rate": 0.0005625000000000001,
"loss": 2.7705,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.8921717405319214,
"learning_rate": 0.000625,
"loss": 2.7471,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.8225255608558655,
"learning_rate": 0.0006875,
"loss": 2.7076,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.4602170219704501,
"eval_loss": 2.7808685302734375,
"eval_runtime": 3.378,
"eval_samples_per_second": 1329.483,
"eval_steps_per_second": 10.657,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.8256505131721497,
"learning_rate": 0.00075,
"loss": 2.6746,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7672858834266663,
"learning_rate": 0.0008125,
"loss": 2.6239,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.7460688948631287,
"learning_rate": 0.0008750000000000001,
"loss": 2.62,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.6622563004493713,
"learning_rate": 0.0009375,
"loss": 2.6093,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6603254675865173,
"learning_rate": 0.001,
"loss": 2.5886,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6143876910209656,
"learning_rate": 0.0010625,
"loss": 2.5717,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.47059823934833794,
"eval_loss": 2.6882147789001465,
"eval_runtime": 3.4363,
"eval_samples_per_second": 1306.937,
"eval_steps_per_second": 10.476,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.5893853902816772,
"learning_rate": 0.0011250000000000001,
"loss": 2.5493,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.5745663642883301,
"learning_rate": 0.0011875,
"loss": 2.5131,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5763567686080933,
"learning_rate": 0.00125,
"loss": 2.5234,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5464815497398376,
"learning_rate": 0.0013125,
"loss": 2.5211,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.4787265658378601,
"learning_rate": 0.001375,
"loss": 2.5021,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.474524587392807,
"learning_rate": 0.0014375000000000002,
"loss": 2.5132,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.47702228604190006,
"eval_loss": 2.6347815990448,
"eval_runtime": 3.3697,
"eval_samples_per_second": 1332.743,
"eval_steps_per_second": 10.683,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.5468990802764893,
"learning_rate": 0.0015,
"loss": 2.4629,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.46725761890411377,
"learning_rate": 0.0015625,
"loss": 2.4516,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.44199100136756897,
"learning_rate": 0.001625,
"loss": 2.4517,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.3936983048915863,
"learning_rate": 0.0016875,
"loss": 2.4622,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.3848307430744171,
"learning_rate": 0.0017500000000000003,
"loss": 2.4646,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.40697723627090454,
"learning_rate": 0.0018124999999999999,
"loss": 2.4683,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.48105835467961294,
"eval_loss": 2.604564666748047,
"eval_runtime": 3.3835,
"eval_samples_per_second": 1327.329,
"eval_steps_per_second": 10.64,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.356196790933609,
"learning_rate": 0.001875,
"loss": 2.4136,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.39348748326301575,
"learning_rate": 0.0019375000000000002,
"loss": 2.4034,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.3885677456855774,
"learning_rate": 0.002,
"loss": 2.4123,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.36985746026039124,
"learning_rate": 0.0020625,
"loss": 2.4175,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.3625586926937103,
"learning_rate": 0.002125,
"loss": 2.4222,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.3486401438713074,
"learning_rate": 0.0021874999999999998,
"loss": 2.4265,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.48315178037615036,
"eval_loss": 2.5843474864959717,
"eval_runtime": 3.4306,
"eval_samples_per_second": 1309.113,
"eval_steps_per_second": 10.494,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.3697022497653961,
"learning_rate": 0.0022500000000000003,
"loss": 2.3574,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.33410874009132385,
"learning_rate": 0.0023125000000000003,
"loss": 2.3638,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.31180012226104736,
"learning_rate": 0.002375,
"loss": 2.3769,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.3748941421508789,
"learning_rate": 0.0024375,
"loss": 2.3866,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.33421775698661804,
"learning_rate": 0.0025,
"loss": 2.3937,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.4853363770410462,
"eval_loss": 2.570960283279419,
"eval_runtime": 3.3663,
"eval_samples_per_second": 1334.095,
"eval_steps_per_second": 10.694,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.3082391917705536,
"learning_rate": 0.0025625,
"loss": 2.3981,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.33345505595207214,
"learning_rate": 0.002625,
"loss": 2.3179,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.3456808924674988,
"learning_rate": 0.0026875000000000002,
"loss": 2.3446,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.33925849199295044,
"learning_rate": 0.00275,
"loss": 2.3468,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.31841370463371277,
"learning_rate": 0.0028125,
"loss": 2.3532,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.30321213603019714,
"learning_rate": 0.0028750000000000004,
"loss": 2.3639,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4873789573898453,
"eval_loss": 2.5551645755767822,
"eval_runtime": 3.3748,
"eval_samples_per_second": 1330.731,
"eval_steps_per_second": 10.667,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.3192748427391052,
"learning_rate": 0.0029375,
"loss": 2.3562,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.3336254060268402,
"learning_rate": 0.003,
"loss": 2.2971,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.3261987566947937,
"learning_rate": 0.002715909090909091,
"loss": 2.304,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.27662965655326843,
"learning_rate": 0.0024318181818181817,
"loss": 2.3079,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.3111967444419861,
"learning_rate": 0.002147727272727273,
"loss": 2.3027,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.29187533259391785,
"learning_rate": 0.0018636363636363638,
"loss": 2.2858,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.4944184081198267,
"eval_loss": 2.5044519901275635,
"eval_runtime": 3.3686,
"eval_samples_per_second": 1333.188,
"eval_steps_per_second": 10.687,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.30923736095428467,
"learning_rate": 0.0015795454545454546,
"loss": 2.241,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.3139377534389496,
"learning_rate": 0.0012954545454545456,
"loss": 2.1769,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.2745420038700104,
"learning_rate": 0.0010113636363636364,
"loss": 2.171,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.3075203001499176,
"learning_rate": 0.0007272727272727273,
"loss": 2.159,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.29712754487991333,
"learning_rate": 0.0004431818181818182,
"loss": 2.1517,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.28090161085128784,
"learning_rate": 0.0001590909090909091,
"loss": 2.1423,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.4999517845840412,
"eval_loss": 2.4779233932495117,
"eval_runtime": 3.3775,
"eval_samples_per_second": 1329.667,
"eval_steps_per_second": 10.659,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.566767305884856,
"train_runtime": 751.072,
"train_samples_per_second": 623.709,
"train_steps_per_second": 38.984
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}