wav2vec2-base-EMOPIA / trainer_state.json
kurosekurose's picture
End of training
042a41a
raw
history blame
2.42 kB
{
"best_metric": 1.0591806173324585,
"best_model_checkpoint": "kurosekurose/wav2vec2-base-EMOPIA/checkpoint-205",
"epoch": 4.984,
"global_step": 205,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.98,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2789,
"step": 41
},
{
"epoch": 0.98,
"eval_accuracy": 0.4000000059604645,
"eval_loss": 1.201923131942749,
"eval_runtime": 40.4421,
"eval_samples_per_second": 1.236,
"eval_steps_per_second": 0.321,
"step": 41
},
{
"epoch": 1.98,
"learning_rate": 6e-06,
"loss": 1.0522,
"step": 82
},
{
"epoch": 1.98,
"eval_accuracy": 0.4000000059604645,
"eval_loss": 1.1521090269088745,
"eval_runtime": 39.8895,
"eval_samples_per_second": 1.253,
"eval_steps_per_second": 0.326,
"step": 82
},
{
"epoch": 2.98,
"learning_rate": 4.000000000000001e-06,
"loss": 0.957,
"step": 123
},
{
"epoch": 2.98,
"eval_accuracy": 0.5199999809265137,
"eval_loss": 1.0672701597213745,
"eval_runtime": 40.1526,
"eval_samples_per_second": 1.245,
"eval_steps_per_second": 0.324,
"step": 123
},
{
"epoch": 3.98,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.87,
"step": 164
},
{
"epoch": 3.98,
"eval_accuracy": 0.5,
"eval_loss": 1.09552001953125,
"eval_runtime": 40.1131,
"eval_samples_per_second": 1.246,
"eval_steps_per_second": 0.324,
"step": 164
},
{
"epoch": 4.98,
"learning_rate": 0.0,
"loss": 0.8053,
"step": 205
},
{
"epoch": 4.98,
"eval_accuracy": 0.5199999809265137,
"eval_loss": 1.0591806173324585,
"eval_runtime": 39.9652,
"eval_samples_per_second": 1.251,
"eval_steps_per_second": 0.325,
"step": 205
},
{
"epoch": 4.98,
"step": 205,
"total_flos": 1.4276872903849574e+18,
"train_loss": 0.992677939810404,
"train_runtime": 5632.3881,
"train_samples_per_second": 0.444,
"train_steps_per_second": 0.036
}
],
"max_steps": 205,
"num_train_epochs": 5,
"total_flos": 1.4276872903849574e+18,
"trial_name": null,
"trial_params": null
}