smolm-autoreg-bpe-seed_1024 / trainer_state.json
kanishka's picture
Upload folder using huggingface_hub
cdde4a1 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 29280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 1.488160490989685,
"learning_rate": 6.25e-05,
"loss": 6.1623,
"step": 500
},
{
"epoch": 0.34,
"grad_norm": 0.9419446587562561,
"learning_rate": 0.000125,
"loss": 3.618,
"step": 1000
},
{
"epoch": 0.51,
"grad_norm": 1.0650488138198853,
"learning_rate": 0.0001875,
"loss": 3.3416,
"step": 1500
},
{
"epoch": 0.68,
"grad_norm": 1.0354753732681274,
"learning_rate": 0.00025,
"loss": 3.1604,
"step": 2000
},
{
"epoch": 0.85,
"grad_norm": 0.9344812631607056,
"learning_rate": 0.0003125,
"loss": 3.0469,
"step": 2500
},
{
"epoch": 1.0,
"eval_accuracy": 0.43948088653247,
"eval_loss": 3.010124444961548,
"eval_runtime": 3.3747,
"eval_samples_per_second": 1330.78,
"eval_steps_per_second": 10.668,
"step": 2928
},
{
"epoch": 1.02,
"grad_norm": 0.9709697365760803,
"learning_rate": 0.000375,
"loss": 2.9646,
"step": 3000
},
{
"epoch": 1.2,
"grad_norm": 0.8762878775596619,
"learning_rate": 0.00043750000000000006,
"loss": 2.8683,
"step": 3500
},
{
"epoch": 1.37,
"grad_norm": 0.8630770444869995,
"learning_rate": 0.0005,
"loss": 2.8084,
"step": 4000
},
{
"epoch": 1.54,
"grad_norm": 0.8325084447860718,
"learning_rate": 0.0005625000000000001,
"loss": 2.773,
"step": 4500
},
{
"epoch": 1.71,
"grad_norm": 0.7819183468818665,
"learning_rate": 0.000625,
"loss": 2.7315,
"step": 5000
},
{
"epoch": 1.88,
"grad_norm": 0.7667275667190552,
"learning_rate": 0.0006875,
"loss": 2.7142,
"step": 5500
},
{
"epoch": 2.0,
"eval_accuracy": 0.46125496837945357,
"eval_loss": 2.7790303230285645,
"eval_runtime": 3.436,
"eval_samples_per_second": 1307.031,
"eval_steps_per_second": 10.477,
"step": 5856
},
{
"epoch": 2.05,
"grad_norm": 0.7901221513748169,
"learning_rate": 0.00075,
"loss": 2.664,
"step": 6000
},
{
"epoch": 2.22,
"grad_norm": 0.7636890411376953,
"learning_rate": 0.0008125,
"loss": 2.6219,
"step": 6500
},
{
"epoch": 2.39,
"grad_norm": 0.722453236579895,
"learning_rate": 0.0008750000000000001,
"loss": 2.6068,
"step": 7000
},
{
"epoch": 2.56,
"grad_norm": 0.7329472303390503,
"learning_rate": 0.0009375,
"loss": 2.6052,
"step": 7500
},
{
"epoch": 2.73,
"grad_norm": 0.6657545566558838,
"learning_rate": 0.001,
"loss": 2.5877,
"step": 8000
},
{
"epoch": 2.9,
"grad_norm": 0.6002160906791687,
"learning_rate": 0.0010625,
"loss": 2.5841,
"step": 8500
},
{
"epoch": 3.0,
"eval_accuracy": 0.4693832108661768,
"eval_loss": 2.6902520656585693,
"eval_runtime": 3.4276,
"eval_samples_per_second": 1310.233,
"eval_steps_per_second": 10.503,
"step": 8784
},
{
"epoch": 3.07,
"grad_norm": 0.6292432546615601,
"learning_rate": 0.0011250000000000001,
"loss": 2.537,
"step": 9000
},
{
"epoch": 3.24,
"grad_norm": 0.5978102684020996,
"learning_rate": 0.0011875,
"loss": 2.5072,
"step": 9500
},
{
"epoch": 3.42,
"grad_norm": 0.5643737316131592,
"learning_rate": 0.00125,
"loss": 2.5165,
"step": 10000
},
{
"epoch": 3.59,
"grad_norm": 0.5356239080429077,
"learning_rate": 0.0013125,
"loss": 2.5138,
"step": 10500
},
{
"epoch": 3.76,
"grad_norm": 0.5515431761741638,
"learning_rate": 0.001375,
"loss": 2.5133,
"step": 11000
},
{
"epoch": 3.93,
"grad_norm": 0.47632288932800293,
"learning_rate": 0.0014375000000000002,
"loss": 2.5089,
"step": 11500
},
{
"epoch": 4.0,
"eval_accuracy": 0.47639460899050945,
"eval_loss": 2.63588547706604,
"eval_runtime": 3.3985,
"eval_samples_per_second": 1321.47,
"eval_steps_per_second": 10.593,
"step": 11712
},
{
"epoch": 4.1,
"grad_norm": 0.4891969561576843,
"learning_rate": 0.0015,
"loss": 2.4601,
"step": 12000
},
{
"epoch": 4.27,
"grad_norm": 0.4287746548652649,
"learning_rate": 0.0015625,
"loss": 2.4371,
"step": 12500
},
{
"epoch": 4.44,
"grad_norm": 0.43466711044311523,
"learning_rate": 0.001625,
"loss": 2.4523,
"step": 13000
},
{
"epoch": 4.61,
"grad_norm": 0.417784720659256,
"learning_rate": 0.0016875,
"loss": 2.4623,
"step": 13500
},
{
"epoch": 4.78,
"grad_norm": 0.4051859974861145,
"learning_rate": 0.0017500000000000003,
"loss": 2.4674,
"step": 14000
},
{
"epoch": 4.95,
"grad_norm": 0.40950262546539307,
"learning_rate": 0.0018124999999999999,
"loss": 2.4566,
"step": 14500
},
{
"epoch": 5.0,
"eval_accuracy": 0.4807094503968567,
"eval_loss": 2.602451801300049,
"eval_runtime": 3.3747,
"eval_samples_per_second": 1330.789,
"eval_steps_per_second": 10.668,
"step": 14640
},
{
"epoch": 5.12,
"grad_norm": 0.39824172854423523,
"learning_rate": 0.001875,
"loss": 2.4126,
"step": 15000
},
{
"epoch": 5.29,
"grad_norm": 0.3892059326171875,
"learning_rate": 0.0019375000000000002,
"loss": 2.3928,
"step": 15500
},
{
"epoch": 5.46,
"grad_norm": 0.3697751462459564,
"learning_rate": 0.002,
"loss": 2.4102,
"step": 16000
},
{
"epoch": 5.64,
"grad_norm": 0.38204559683799744,
"learning_rate": 0.0020625,
"loss": 2.416,
"step": 16500
},
{
"epoch": 5.81,
"grad_norm": 0.3960261642932892,
"learning_rate": 0.002125,
"loss": 2.4218,
"step": 17000
},
{
"epoch": 5.98,
"grad_norm": 0.30173224210739136,
"learning_rate": 0.0021874999999999998,
"loss": 2.4168,
"step": 17500
},
{
"epoch": 6.0,
"eval_accuracy": 0.4828274221233368,
"eval_loss": 2.5854403972625732,
"eval_runtime": 3.3792,
"eval_samples_per_second": 1329.014,
"eval_steps_per_second": 10.653,
"step": 17568
},
{
"epoch": 6.15,
"grad_norm": 0.30894598364830017,
"learning_rate": 0.0022500000000000003,
"loss": 2.357,
"step": 18000
},
{
"epoch": 6.32,
"grad_norm": 0.36697593331336975,
"learning_rate": 0.0023125000000000003,
"loss": 2.36,
"step": 18500
},
{
"epoch": 6.49,
"grad_norm": 0.3668546974658966,
"learning_rate": 0.002375,
"loss": 2.3825,
"step": 19000
},
{
"epoch": 6.66,
"grad_norm": 0.3038611114025116,
"learning_rate": 0.0024375,
"loss": 2.3875,
"step": 19500
},
{
"epoch": 6.83,
"grad_norm": 0.290789395570755,
"learning_rate": 0.0025,
"loss": 2.3886,
"step": 20000
},
{
"epoch": 7.0,
"eval_accuracy": 0.4851137094837093,
"eval_loss": 2.5672802925109863,
"eval_runtime": 3.3776,
"eval_samples_per_second": 1329.659,
"eval_steps_per_second": 10.659,
"step": 20496
},
{
"epoch": 7.0,
"grad_norm": 0.2979215681552887,
"learning_rate": 0.0025625,
"loss": 2.3835,
"step": 20500
},
{
"epoch": 7.17,
"grad_norm": 0.2849644422531128,
"learning_rate": 0.002625,
"loss": 2.3134,
"step": 21000
},
{
"epoch": 7.34,
"grad_norm": 0.37557193636894226,
"learning_rate": 0.0026875000000000002,
"loss": 2.3387,
"step": 21500
},
{
"epoch": 7.51,
"grad_norm": 0.3276946544647217,
"learning_rate": 0.00275,
"loss": 2.3492,
"step": 22000
},
{
"epoch": 7.68,
"grad_norm": 0.30562373995780945,
"learning_rate": 0.0028125,
"loss": 2.3541,
"step": 22500
},
{
"epoch": 7.86,
"grad_norm": 0.32304540276527405,
"learning_rate": 0.0028750000000000004,
"loss": 2.3618,
"step": 23000
},
{
"epoch": 8.0,
"eval_accuracy": 0.4873631777991679,
"eval_loss": 2.556323528289795,
"eval_runtime": 3.3854,
"eval_samples_per_second": 1326.589,
"eval_steps_per_second": 10.634,
"step": 23424
},
{
"epoch": 8.03,
"grad_norm": 0.30651578307151794,
"learning_rate": 0.0029375,
"loss": 2.3431,
"step": 23500
},
{
"epoch": 8.2,
"grad_norm": 0.33351045846939087,
"learning_rate": 0.003,
"loss": 2.2909,
"step": 24000
},
{
"epoch": 8.37,
"grad_norm": 0.36140263080596924,
"learning_rate": 0.002715909090909091,
"loss": 2.3174,
"step": 24500
},
{
"epoch": 8.54,
"grad_norm": 0.32251298427581787,
"learning_rate": 0.0024318181818181817,
"loss": 2.2999,
"step": 25000
},
{
"epoch": 8.71,
"grad_norm": 0.3153398931026459,
"learning_rate": 0.002147727272727273,
"loss": 2.2891,
"step": 25500
},
{
"epoch": 8.88,
"grad_norm": 0.2726821005344391,
"learning_rate": 0.0018636363636363638,
"loss": 2.2757,
"step": 26000
},
{
"epoch": 9.0,
"eval_accuracy": 0.493145521138515,
"eval_loss": 2.502424955368042,
"eval_runtime": 3.3675,
"eval_samples_per_second": 1333.647,
"eval_steps_per_second": 10.691,
"step": 26352
},
{
"epoch": 9.05,
"grad_norm": 0.29384592175483704,
"learning_rate": 0.0015795454545454546,
"loss": 2.2495,
"step": 26500
},
{
"epoch": 9.22,
"grad_norm": 0.343348890542984,
"learning_rate": 0.0012954545454545456,
"loss": 2.176,
"step": 27000
},
{
"epoch": 9.39,
"grad_norm": 0.31881004571914673,
"learning_rate": 0.0010113636363636364,
"loss": 2.1687,
"step": 27500
},
{
"epoch": 9.56,
"grad_norm": 0.3788747787475586,
"learning_rate": 0.0007272727272727273,
"loss": 2.1582,
"step": 28000
},
{
"epoch": 9.73,
"grad_norm": 0.3564055860042572,
"learning_rate": 0.0004431818181818182,
"loss": 2.1505,
"step": 28500
},
{
"epoch": 9.9,
"grad_norm": 0.28436481952667236,
"learning_rate": 0.0001590909090909091,
"loss": 2.139,
"step": 29000
},
{
"epoch": 10.0,
"eval_accuracy": 0.5000622417187831,
"eval_loss": 2.4731357097625732,
"eval_runtime": 3.4247,
"eval_samples_per_second": 1311.358,
"eval_steps_per_second": 10.512,
"step": 29280
},
{
"epoch": 10.0,
"step": 29280,
"total_flos": 2273237316403200.0,
"train_loss": 2.5646611927636984,
"train_runtime": 771.972,
"train_samples_per_second": 606.822,
"train_steps_per_second": 37.929
}
],
"logging_steps": 500,
"max_steps": 29280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 2000,
"total_flos": 2273237316403200.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}