idbwtiny / trainer_state.json
sooks's picture
🍻 cheers
96751bb verified
{
"best_metric": 0.12666618824005127,
"best_model_checkpoint": "idbwtiny/checkpoint-20100",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 20100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25,
"grad_norm": 2.5227627754211426,
"learning_rate": 0.00019503482587064676,
"loss": 0.4184,
"step": 500
},
{
"epoch": 0.5,
"grad_norm": 2.693358898162842,
"learning_rate": 0.0001900597014925373,
"loss": 0.4215,
"step": 1000
},
{
"epoch": 0.75,
"grad_norm": 1.985125184059143,
"learning_rate": 0.00018508457711442785,
"loss": 0.3987,
"step": 1500
},
{
"epoch": 1.0,
"grad_norm": 1.946892261505127,
"learning_rate": 0.0001801094527363184,
"loss": 0.3871,
"step": 2000
},
{
"epoch": 1.0,
"eval_accuracy": 0.8674150542486978,
"eval_loss": 0.3020988404750824,
"eval_runtime": 206.2751,
"eval_samples_per_second": 257.816,
"eval_steps_per_second": 32.229,
"step": 2010
},
{
"epoch": 1.24,
"grad_norm": 2.351902723312378,
"learning_rate": 0.00017514427860696518,
"loss": 0.3685,
"step": 2500
},
{
"epoch": 1.49,
"grad_norm": 2.192362070083618,
"learning_rate": 0.00017016915422885572,
"loss": 0.3615,
"step": 3000
},
{
"epoch": 1.74,
"grad_norm": 2.392155647277832,
"learning_rate": 0.00016519402985074627,
"loss": 0.3474,
"step": 3500
},
{
"epoch": 1.99,
"grad_norm": 2.2455360889434814,
"learning_rate": 0.00016021890547263681,
"loss": 0.3403,
"step": 4000
},
{
"epoch": 2.0,
"eval_accuracy": 0.8966548203305692,
"eval_loss": 0.24423933029174805,
"eval_runtime": 204.2994,
"eval_samples_per_second": 260.309,
"eval_steps_per_second": 32.54,
"step": 4020
},
{
"epoch": 2.24,
"grad_norm": 2.0239338874816895,
"learning_rate": 0.0001552437810945274,
"loss": 0.3266,
"step": 4500
},
{
"epoch": 2.49,
"grad_norm": 2.220857858657837,
"learning_rate": 0.00015027860696517414,
"loss": 0.316,
"step": 5000
},
{
"epoch": 2.74,
"grad_norm": 1.665713906288147,
"learning_rate": 0.00014530348258706468,
"loss": 0.3089,
"step": 5500
},
{
"epoch": 2.99,
"grad_norm": 2.8135392665863037,
"learning_rate": 0.00014032835820895523,
"loss": 0.3058,
"step": 6000
},
{
"epoch": 3.0,
"eval_accuracy": 0.9052105075120814,
"eval_loss": 0.2269268035888672,
"eval_runtime": 209.4743,
"eval_samples_per_second": 253.878,
"eval_steps_per_second": 31.737,
"step": 6030
},
{
"epoch": 3.23,
"grad_norm": 2.630223512649536,
"learning_rate": 0.000135363184079602,
"loss": 0.2868,
"step": 6500
},
{
"epoch": 3.48,
"grad_norm": 1.8844317197799683,
"learning_rate": 0.00013038805970149256,
"loss": 0.2843,
"step": 7000
},
{
"epoch": 3.73,
"grad_norm": 2.541440010070801,
"learning_rate": 0.0001254129353233831,
"loss": 0.2794,
"step": 7500
},
{
"epoch": 3.98,
"grad_norm": 2.5618529319763184,
"learning_rate": 0.00012043781094527365,
"loss": 0.2722,
"step": 8000
},
{
"epoch": 4.0,
"eval_accuracy": 0.9196893627423328,
"eval_loss": 0.20079384744167328,
"eval_runtime": 208.4042,
"eval_samples_per_second": 255.182,
"eval_steps_per_second": 31.9,
"step": 8040
},
{
"epoch": 4.23,
"grad_norm": 1.9443061351776123,
"learning_rate": 0.0001154726368159204,
"loss": 0.2546,
"step": 8500
},
{
"epoch": 4.48,
"grad_norm": 1.7675588130950928,
"learning_rate": 0.00011049751243781094,
"loss": 0.2498,
"step": 9000
},
{
"epoch": 4.73,
"grad_norm": 2.6675548553466797,
"learning_rate": 0.00010552238805970149,
"loss": 0.2472,
"step": 9500
},
{
"epoch": 4.98,
"grad_norm": 3.0125935077667236,
"learning_rate": 0.00010054726368159203,
"loss": 0.2496,
"step": 10000
},
{
"epoch": 5.0,
"eval_accuracy": 0.9271920422707358,
"eval_loss": 0.184106707572937,
"eval_runtime": 208.5204,
"eval_samples_per_second": 255.04,
"eval_steps_per_second": 31.882,
"step": 10050
},
{
"epoch": 5.22,
"grad_norm": 3.092007637023926,
"learning_rate": 9.55721393034826e-05,
"loss": 0.2271,
"step": 10500
},
{
"epoch": 5.47,
"grad_norm": 4.371829509735107,
"learning_rate": 9.059701492537314e-05,
"loss": 0.2264,
"step": 11000
},
{
"epoch": 5.72,
"grad_norm": 2.334900140762329,
"learning_rate": 8.562189054726368e-05,
"loss": 0.2257,
"step": 11500
},
{
"epoch": 5.97,
"grad_norm": 3.219524621963501,
"learning_rate": 8.064676616915423e-05,
"loss": 0.2219,
"step": 12000
},
{
"epoch": 6.0,
"eval_accuracy": 0.9352588330418758,
"eval_loss": 0.1706165224313736,
"eval_runtime": 212.161,
"eval_samples_per_second": 250.663,
"eval_steps_per_second": 31.335,
"step": 12060
},
{
"epoch": 6.22,
"grad_norm": 4.32515287399292,
"learning_rate": 7.5681592039801e-05,
"loss": 0.207,
"step": 12500
},
{
"epoch": 6.47,
"grad_norm": 2.8301429748535156,
"learning_rate": 7.070646766169154e-05,
"loss": 0.1996,
"step": 13000
},
{
"epoch": 6.72,
"grad_norm": 4.194028377532959,
"learning_rate": 6.573134328358209e-05,
"loss": 0.2018,
"step": 13500
},
{
"epoch": 6.97,
"grad_norm": 3.551224708557129,
"learning_rate": 6.075621890547264e-05,
"loss": 0.198,
"step": 14000
},
{
"epoch": 7.0,
"eval_accuracy": 0.9451871909140482,
"eval_loss": 0.147793248295784,
"eval_runtime": 210.5297,
"eval_samples_per_second": 252.606,
"eval_steps_per_second": 31.577,
"step": 14070
},
{
"epoch": 7.21,
"grad_norm": 2.972999095916748,
"learning_rate": 5.5791044776119405e-05,
"loss": 0.1892,
"step": 14500
},
{
"epoch": 7.46,
"grad_norm": 2.5483310222625732,
"learning_rate": 5.081592039800995e-05,
"loss": 0.1845,
"step": 15000
},
{
"epoch": 7.71,
"grad_norm": 2.5494730472564697,
"learning_rate": 4.58407960199005e-05,
"loss": 0.1848,
"step": 15500
},
{
"epoch": 7.96,
"grad_norm": 4.215138912200928,
"learning_rate": 4.086567164179105e-05,
"loss": 0.1812,
"step": 16000
},
{
"epoch": 8.0,
"eval_accuracy": 0.9503206032229555,
"eval_loss": 0.1412646621465683,
"eval_runtime": 211.1854,
"eval_samples_per_second": 251.821,
"eval_steps_per_second": 31.479,
"step": 16080
},
{
"epoch": 8.21,
"grad_norm": 3.0078041553497314,
"learning_rate": 3.5900497512437814e-05,
"loss": 0.1719,
"step": 16500
},
{
"epoch": 8.46,
"grad_norm": 3.556603193283081,
"learning_rate": 3.092537313432836e-05,
"loss": 0.1717,
"step": 17000
},
{
"epoch": 8.71,
"grad_norm": 2.1547062397003174,
"learning_rate": 2.595024875621891e-05,
"loss": 0.1682,
"step": 17500
},
{
"epoch": 8.96,
"grad_norm": 2.7211575508117676,
"learning_rate": 2.0975124378109454e-05,
"loss": 0.1632,
"step": 18000
},
{
"epoch": 9.0,
"eval_accuracy": 0.955303585867133,
"eval_loss": 0.13010455667972565,
"eval_runtime": 201.7962,
"eval_samples_per_second": 263.538,
"eval_steps_per_second": 32.944,
"step": 18090
},
{
"epoch": 9.2,
"grad_norm": 2.98066782951355,
"learning_rate": 1.600995024875622e-05,
"loss": 0.1626,
"step": 18500
},
{
"epoch": 9.45,
"grad_norm": 2.36365008354187,
"learning_rate": 1.1034825870646767e-05,
"loss": 0.1599,
"step": 19000
},
{
"epoch": 9.7,
"grad_norm": 3.5116114616394043,
"learning_rate": 6.059701492537314e-06,
"loss": 0.1568,
"step": 19500
},
{
"epoch": 9.95,
"grad_norm": 2.070329427719116,
"learning_rate": 1.0845771144278609e-06,
"loss": 0.1576,
"step": 20000
},
{
"epoch": 10.0,
"eval_accuracy": 0.9568830973467968,
"eval_loss": 0.12666618824005127,
"eval_runtime": 201.0618,
"eval_samples_per_second": 264.501,
"eval_steps_per_second": 33.064,
"step": 20100
},
{
"epoch": 10.0,
"step": 20100,
"total_flos": 7.572439992003492e+19,
"train_loss": 0.2540779383265557,
"train_runtime": 33188.7721,
"train_samples_per_second": 90.801,
"train_steps_per_second": 0.606
}
],
"logging_steps": 500,
"max_steps": 20100,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 7.572439992003492e+19,
"train_batch_size": 150,
"trial_name": null,
"trial_params": null
}