bert-large-qa / trainer_state.json
srcocotero's picture
End of training
cf0ef27
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"global_step": 16235,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 2.9076070218663384e-05,
"loss": 2.2393,
"step": 500
},
{
"epoch": 0.06,
"learning_rate": 2.8152140437326767e-05,
"loss": 1.4831,
"step": 1000
},
{
"epoch": 0.09,
"learning_rate": 2.7228210655990143e-05,
"loss": 1.3501,
"step": 1500
},
{
"epoch": 0.12,
"learning_rate": 2.6304280874653526e-05,
"loss": 1.3197,
"step": 2000
},
{
"epoch": 0.15,
"learning_rate": 2.538035109331691e-05,
"loss": 1.2581,
"step": 2500
},
{
"epoch": 0.18,
"learning_rate": 2.4456421311980292e-05,
"loss": 1.2174,
"step": 3000
},
{
"epoch": 0.22,
"learning_rate": 2.3532491530643672e-05,
"loss": 1.1655,
"step": 3500
},
{
"epoch": 0.25,
"learning_rate": 2.2608561749307052e-05,
"loss": 1.1402,
"step": 4000
},
{
"epoch": 0.28,
"learning_rate": 2.1684631967970435e-05,
"loss": 1.0944,
"step": 4500
},
{
"epoch": 0.31,
"learning_rate": 2.0760702186633818e-05,
"loss": 1.152,
"step": 5000
},
{
"epoch": 0.34,
"learning_rate": 1.9836772405297198e-05,
"loss": 1.1138,
"step": 5500
},
{
"epoch": 0.37,
"learning_rate": 1.8912842623960577e-05,
"loss": 1.0516,
"step": 6000
},
{
"epoch": 0.4,
"learning_rate": 1.798891284262396e-05,
"loss": 1.0912,
"step": 6500
},
{
"epoch": 0.43,
"learning_rate": 1.7064983061287344e-05,
"loss": 1.0322,
"step": 7000
},
{
"epoch": 0.46,
"learning_rate": 1.6141053279950723e-05,
"loss": 0.9863,
"step": 7500
},
{
"epoch": 0.49,
"learning_rate": 1.5217123498614105e-05,
"loss": 0.9888,
"step": 8000
},
{
"epoch": 0.52,
"learning_rate": 1.4293193717277488e-05,
"loss": 1.0046,
"step": 8500
},
{
"epoch": 0.55,
"learning_rate": 1.3369263935940867e-05,
"loss": 0.9813,
"step": 9000
},
{
"epoch": 0.59,
"learning_rate": 1.244533415460425e-05,
"loss": 0.9854,
"step": 9500
},
{
"epoch": 0.62,
"learning_rate": 1.1521404373267632e-05,
"loss": 0.9457,
"step": 10000
},
{
"epoch": 0.65,
"learning_rate": 1.0597474591931013e-05,
"loss": 0.931,
"step": 10500
},
{
"epoch": 0.68,
"learning_rate": 9.673544810594396e-06,
"loss": 0.8925,
"step": 11000
},
{
"epoch": 0.71,
"learning_rate": 8.749615029257776e-06,
"loss": 0.9469,
"step": 11500
},
{
"epoch": 0.74,
"learning_rate": 7.82568524792116e-06,
"loss": 0.9721,
"step": 12000
},
{
"epoch": 0.77,
"learning_rate": 6.90175546658454e-06,
"loss": 0.9377,
"step": 12500
},
{
"epoch": 0.8,
"learning_rate": 5.977825685247921e-06,
"loss": 0.8359,
"step": 13000
},
{
"epoch": 0.83,
"learning_rate": 5.0538959039113025e-06,
"loss": 0.9046,
"step": 13500
},
{
"epoch": 0.86,
"learning_rate": 4.129966122574684e-06,
"loss": 0.8857,
"step": 14000
},
{
"epoch": 0.89,
"learning_rate": 3.206036341238066e-06,
"loss": 0.8674,
"step": 14500
},
{
"epoch": 0.92,
"learning_rate": 2.2821065599014475e-06,
"loss": 0.8429,
"step": 15000
},
{
"epoch": 0.95,
"learning_rate": 1.3581767785648291e-06,
"loss": 0.9049,
"step": 15500
},
{
"epoch": 0.99,
"learning_rate": 4.3424699722821066e-07,
"loss": 0.8825,
"step": 16000
},
{
"epoch": 1.0,
"step": 16235,
"total_flos": 4.523174534460211e+16,
"train_loss": 1.0714931627842768,
"train_runtime": 16914.7753,
"train_samples_per_second": 5.759,
"train_steps_per_second": 0.96
}
],
"max_steps": 16235,
"num_train_epochs": 1,
"total_flos": 4.523174534460211e+16,
"trial_name": null,
"trial_params": null
}