deberta-v3-xsmall-squad2 / trainer_state.json
nbroad's picture
nbroad HF staff
push from colab
67711a4
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.433682161109759,
"global_step": 10000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 1.919201752251156e-05,
"loss": 2.3883,
"step": 500
},
{
"epoch": 0.24,
"learning_rate": 1.8380790135474976e-05,
"loss": 1.4529,
"step": 1000
},
{
"epoch": 0.37,
"learning_rate": 1.7569562748438388e-05,
"loss": 1.2698,
"step": 1500
},
{
"epoch": 0.49,
"learning_rate": 1.6758335361401804e-05,
"loss": 1.1845,
"step": 2000
},
{
"epoch": 0.61,
"learning_rate": 1.594873042913929e-05,
"loss": 1.1239,
"step": 2500
},
{
"epoch": 0.73,
"learning_rate": 1.5137503042102703e-05,
"loss": 1.0576,
"step": 3000
},
{
"epoch": 0.85,
"learning_rate": 1.4326275655066115e-05,
"loss": 1.0744,
"step": 3500
},
{
"epoch": 0.97,
"learning_rate": 1.3515048268029529e-05,
"loss": 1.0113,
"step": 4000
},
{
"epoch": 1.0,
"eval_loss": 0.8904165625572205,
"eval_runtime": 91.887,
"eval_samples_per_second": 131.694,
"eval_steps_per_second": 2.068,
"step": 4109
},
{
"epoch": 1.1,
"learning_rate": 1.2703820880992943e-05,
"loss": 0.9452,
"step": 4500
},
{
"epoch": 1.22,
"learning_rate": 1.1892593493956356e-05,
"loss": 0.9205,
"step": 5000
},
{
"epoch": 1.34,
"learning_rate": 1.108136610691977e-05,
"loss": 0.8961,
"step": 5500
},
{
"epoch": 1.46,
"learning_rate": 1.0271761174657257e-05,
"loss": 0.906,
"step": 6000
},
{
"epoch": 1.58,
"learning_rate": 9.460533787620671e-06,
"loss": 0.8696,
"step": 6500
},
{
"epoch": 1.7,
"learning_rate": 8.649306400584085e-06,
"loss": 0.8745,
"step": 7000
},
{
"epoch": 1.83,
"learning_rate": 7.838079013547499e-06,
"loss": 0.8528,
"step": 7500
},
{
"epoch": 1.95,
"learning_rate": 7.028474081284985e-06,
"loss": 0.8645,
"step": 8000
},
{
"epoch": 2.0,
"eval_loss": 0.8758471012115479,
"eval_runtime": 91.8658,
"eval_samples_per_second": 131.725,
"eval_steps_per_second": 2.068,
"step": 8218
},
{
"epoch": 2.07,
"learning_rate": 6.217246694248399e-06,
"loss": 0.8209,
"step": 8500
},
{
"epoch": 2.19,
"learning_rate": 5.406019307211812e-06,
"loss": 0.798,
"step": 9000
},
{
"epoch": 2.31,
"learning_rate": 4.594791920175226e-06,
"loss": 0.7944,
"step": 9500
},
{
"epoch": 2.43,
"learning_rate": 3.783564533138639e-06,
"loss": 0.7893,
"step": 10000
}
],
"max_steps": 12327,
"num_train_epochs": 3,
"total_flos": 1.5698961223584768e+16,
"trial_name": null,
"trial_params": null
}