wav2vec2-large-xls-r-300m-el / trainer_state.json
ayameRushia's picture
End of training
3821326
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 80.0,
"global_step": 4560,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.77,
"learning_rate": 4.8822115384615383e-05,
"loss": 6.3683,
"step": 500
},
{
"epoch": 8.77,
"eval_loss": 3.128021001815796,
"eval_runtime": 61.7966,
"eval_samples_per_second": 27.202,
"eval_steps_per_second": 3.414,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 17.54,
"learning_rate": 4.28125e-05,
"loss": 1.9915,
"step": 1000
},
{
"epoch": 17.54,
"eval_loss": 0.6600078344345093,
"eval_runtime": 61.9497,
"eval_samples_per_second": 27.135,
"eval_steps_per_second": 3.406,
"eval_wer": 0.6444381384790011,
"step": 1000
},
{
"epoch": 26.32,
"learning_rate": 3.680288461538462e-05,
"loss": 0.6565,
"step": 1500
},
{
"epoch": 26.32,
"eval_loss": 0.4207763671875,
"eval_runtime": 61.6392,
"eval_samples_per_second": 27.272,
"eval_steps_per_second": 3.423,
"eval_wer": 0.4486379114642452,
"step": 1500
},
{
"epoch": 35.09,
"learning_rate": 3.079326923076923e-05,
"loss": 0.4484,
"step": 2000
},
{
"epoch": 35.09,
"eval_loss": 0.38846877217292786,
"eval_runtime": 60.5282,
"eval_samples_per_second": 27.772,
"eval_steps_per_second": 3.486,
"eval_wer": 0.4005864547862278,
"step": 2000
},
{
"epoch": 43.86,
"learning_rate": 2.478365384615385e-05,
"loss": 0.3573,
"step": 2500
},
{
"epoch": 43.86,
"eval_loss": 0.3548009693622589,
"eval_runtime": 60.4916,
"eval_samples_per_second": 27.789,
"eval_steps_per_second": 3.488,
"eval_wer": 0.36256148316307224,
"step": 2500
},
{
"epoch": 52.63,
"learning_rate": 1.8774038461538462e-05,
"loss": 0.3063,
"step": 3000
},
{
"epoch": 52.63,
"eval_loss": 0.3374779522418976,
"eval_runtime": 60.6448,
"eval_samples_per_second": 27.719,
"eval_steps_per_second": 3.479,
"eval_wer": 0.34298146046159667,
"step": 3000
},
{
"epoch": 61.4,
"learning_rate": 1.2764423076923077e-05,
"loss": 0.2751,
"step": 3500
},
{
"epoch": 61.4,
"eval_loss": 0.3359163701534271,
"eval_runtime": 61.2942,
"eval_samples_per_second": 27.425,
"eval_steps_per_second": 3.442,
"eval_wer": 0.32406356413166854,
"step": 3500
},
{
"epoch": 70.18,
"learning_rate": 6.754807692307692e-06,
"loss": 0.2511,
"step": 4000
},
{
"epoch": 70.18,
"eval_loss": 0.32216018438339233,
"eval_runtime": 60.7546,
"eval_samples_per_second": 27.669,
"eval_steps_per_second": 3.473,
"eval_wer": 0.31082103670071887,
"step": 4000
},
{
"epoch": 78.95,
"learning_rate": 7.451923076923078e-07,
"loss": 0.2361,
"step": 4500
},
{
"epoch": 78.95,
"eval_loss": 0.3205258548259735,
"eval_runtime": 61.518,
"eval_samples_per_second": 27.325,
"eval_steps_per_second": 3.43,
"eval_wer": 0.3083617101778282,
"step": 4500
},
{
"epoch": 80.0,
"step": 4560,
"total_flos": 3.6657223992885486e+19,
"train_loss": 1.1972084400946634,
"train_runtime": 14228.904,
"train_samples_per_second": 20.387,
"train_steps_per_second": 0.32
}
],
"max_steps": 4560,
"num_train_epochs": 80,
"total_flos": 3.6657223992885486e+19,
"trial_name": null,
"trial_params": null
}