bert-large-qa / trainer_state.json
srcocotero's picture
End of training
9d2871c
raw
history blame
7.67 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"global_step": 29238,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 2.9486969012928382e-05,
"loss": 2.7395,
"step": 500
},
{
"epoch": 0.07,
"learning_rate": 2.8973938025856763e-05,
"loss": 1.725,
"step": 1000
},
{
"epoch": 0.1,
"learning_rate": 2.8460907038785144e-05,
"loss": 1.4616,
"step": 1500
},
{
"epoch": 0.14,
"learning_rate": 2.7947876051713525e-05,
"loss": 1.4496,
"step": 2000
},
{
"epoch": 0.17,
"learning_rate": 2.7434845064641902e-05,
"loss": 1.3397,
"step": 2500
},
{
"epoch": 0.21,
"learning_rate": 2.6921814077570283e-05,
"loss": 1.3652,
"step": 3000
},
{
"epoch": 0.24,
"learning_rate": 2.6408783090498668e-05,
"loss": 1.2952,
"step": 3500
},
{
"epoch": 0.27,
"learning_rate": 2.589575210342705e-05,
"loss": 1.288,
"step": 4000
},
{
"epoch": 0.31,
"learning_rate": 2.538272111635543e-05,
"loss": 1.2415,
"step": 4500
},
{
"epoch": 0.34,
"learning_rate": 2.486969012928381e-05,
"loss": 1.2008,
"step": 5000
},
{
"epoch": 0.38,
"learning_rate": 2.435665914221219e-05,
"loss": 1.216,
"step": 5500
},
{
"epoch": 0.41,
"learning_rate": 2.3843628155140572e-05,
"loss": 1.2232,
"step": 6000
},
{
"epoch": 0.44,
"learning_rate": 2.3330597168068953e-05,
"loss": 1.2123,
"step": 6500
},
{
"epoch": 0.48,
"learning_rate": 2.281756618099733e-05,
"loss": 1.174,
"step": 7000
},
{
"epoch": 0.51,
"learning_rate": 2.2304535193925712e-05,
"loss": 1.1183,
"step": 7500
},
{
"epoch": 0.55,
"learning_rate": 2.1791504206854093e-05,
"loss": 1.1476,
"step": 8000
},
{
"epoch": 0.58,
"learning_rate": 2.1278473219782474e-05,
"loss": 1.121,
"step": 8500
},
{
"epoch": 0.62,
"learning_rate": 2.0765442232710855e-05,
"loss": 1.1595,
"step": 9000
},
{
"epoch": 0.65,
"learning_rate": 2.0252411245639236e-05,
"loss": 1.1104,
"step": 9500
},
{
"epoch": 0.68,
"learning_rate": 1.973938025856762e-05,
"loss": 1.0875,
"step": 10000
},
{
"epoch": 0.72,
"learning_rate": 1.9226349271496e-05,
"loss": 1.0816,
"step": 10500
},
{
"epoch": 0.75,
"learning_rate": 1.8713318284424382e-05,
"loss": 1.0924,
"step": 11000
},
{
"epoch": 0.79,
"learning_rate": 1.820028729735276e-05,
"loss": 1.0699,
"step": 11500
},
{
"epoch": 0.82,
"learning_rate": 1.768725631028114e-05,
"loss": 1.0639,
"step": 12000
},
{
"epoch": 0.86,
"learning_rate": 1.717422532320952e-05,
"loss": 1.0217,
"step": 12500
},
{
"epoch": 0.89,
"learning_rate": 1.6661194336137902e-05,
"loss": 1.0369,
"step": 13000
},
{
"epoch": 0.92,
"learning_rate": 1.6148163349066283e-05,
"loss": 1.0832,
"step": 13500
},
{
"epoch": 0.96,
"learning_rate": 1.5635132361994664e-05,
"loss": 1.0152,
"step": 14000
},
{
"epoch": 0.99,
"learning_rate": 1.5122101374923047e-05,
"loss": 1.0104,
"step": 14500
},
{
"epoch": 1.03,
"learning_rate": 1.4609070387851426e-05,
"loss": 0.7741,
"step": 15000
},
{
"epoch": 1.06,
"learning_rate": 1.4096039400779807e-05,
"loss": 0.7154,
"step": 15500
},
{
"epoch": 1.09,
"learning_rate": 1.3583008413708188e-05,
"loss": 0.7412,
"step": 16000
},
{
"epoch": 1.13,
"learning_rate": 1.306997742663657e-05,
"loss": 0.7439,
"step": 16500
},
{
"epoch": 1.16,
"learning_rate": 1.255694643956495e-05,
"loss": 0.6894,
"step": 17000
},
{
"epoch": 1.2,
"learning_rate": 1.2043915452493331e-05,
"loss": 0.6848,
"step": 17500
},
{
"epoch": 1.23,
"learning_rate": 1.1530884465421712e-05,
"loss": 0.7217,
"step": 18000
},
{
"epoch": 1.27,
"learning_rate": 1.1017853478350093e-05,
"loss": 0.7294,
"step": 18500
},
{
"epoch": 1.3,
"learning_rate": 1.0504822491278474e-05,
"loss": 0.7065,
"step": 19000
},
{
"epoch": 1.33,
"learning_rate": 9.991791504206853e-06,
"loss": 0.704,
"step": 19500
},
{
"epoch": 1.37,
"learning_rate": 9.478760517135234e-06,
"loss": 0.7225,
"step": 20000
},
{
"epoch": 1.4,
"learning_rate": 8.965729530063615e-06,
"loss": 0.6821,
"step": 20500
},
{
"epoch": 1.44,
"learning_rate": 8.452698542991998e-06,
"loss": 0.6722,
"step": 21000
},
{
"epoch": 1.47,
"learning_rate": 7.939667555920379e-06,
"loss": 0.7089,
"step": 21500
},
{
"epoch": 1.5,
"learning_rate": 7.426636568848759e-06,
"loss": 0.71,
"step": 22000
},
{
"epoch": 1.54,
"learning_rate": 6.913605581777139e-06,
"loss": 0.7121,
"step": 22500
},
{
"epoch": 1.57,
"learning_rate": 6.40057459470552e-06,
"loss": 0.7063,
"step": 23000
},
{
"epoch": 1.61,
"learning_rate": 5.887543607633901e-06,
"loss": 0.7064,
"step": 23500
},
{
"epoch": 1.64,
"learning_rate": 5.374512620562282e-06,
"loss": 0.7128,
"step": 24000
},
{
"epoch": 1.68,
"learning_rate": 4.861481633490662e-06,
"loss": 0.6573,
"step": 24500
},
{
"epoch": 1.71,
"learning_rate": 4.348450646419044e-06,
"loss": 0.6695,
"step": 25000
},
{
"epoch": 1.74,
"learning_rate": 3.835419659347425e-06,
"loss": 0.6928,
"step": 25500
},
{
"epoch": 1.78,
"learning_rate": 3.3223886722758055e-06,
"loss": 0.6625,
"step": 26000
},
{
"epoch": 1.81,
"learning_rate": 2.809357685204186e-06,
"loss": 0.6704,
"step": 26500
},
{
"epoch": 1.85,
"learning_rate": 2.2963266981325674e-06,
"loss": 0.6487,
"step": 27000
},
{
"epoch": 1.88,
"learning_rate": 1.783295711060948e-06,
"loss": 0.6707,
"step": 27500
},
{
"epoch": 1.92,
"learning_rate": 1.270264723989329e-06,
"loss": 0.6782,
"step": 28000
},
{
"epoch": 1.95,
"learning_rate": 7.572337369177098e-07,
"loss": 0.6796,
"step": 28500
},
{
"epoch": 1.98,
"learning_rate": 2.442027498460907e-07,
"loss": 0.6222,
"step": 29000
},
{
"epoch": 2.0,
"step": 29238,
"total_flos": 4.583876744068301e+16,
"train_loss": 0.9686727332664389,
"train_runtime": 18778.1209,
"train_samples_per_second": 9.342,
"train_steps_per_second": 1.557
}
],
"max_steps": 29238,
"num_train_epochs": 2,
"total_flos": 4.583876744068301e+16,
"trial_name": null,
"trial_params": null
}