whisper-large-somali / trainer_state.json
steja's picture
End of training
babac63
raw
history blame
5.87 kB
{
"best_metric": 55.04238164151334,
"best_model_checkpoint": "./whisper-large-somali/checkpoint-1000",
"epoch": 76.88888888888889,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.89,
"learning_rate": 6.611096473669596e-06,
"loss": 2.191,
"step": 25
},
{
"epoch": 3.81,
"learning_rate": 8.31378915840787e-06,
"loss": 1.015,
"step": 50
},
{
"epoch": 5.74,
"learning_rate": 9.28666248215634e-06,
"loss": 0.5048,
"step": 75
},
{
"epoch": 7.67,
"learning_rate": 9.933858671331224e-06,
"loss": 0.2122,
"step": 100
},
{
"epoch": 9.59,
"learning_rate": 9.755555555555556e-06,
"loss": 0.0748,
"step": 125
},
{
"epoch": 11.52,
"learning_rate": 9.466666666666667e-06,
"loss": 0.0338,
"step": 150
},
{
"epoch": 13.44,
"learning_rate": 9.17777777777778e-06,
"loss": 0.0206,
"step": 175
},
{
"epoch": 15.37,
"learning_rate": 8.888888888888888e-06,
"loss": 0.0137,
"step": 200
},
{
"epoch": 17.3,
"learning_rate": 8.6e-06,
"loss": 0.0101,
"step": 225
},
{
"epoch": 19.22,
"learning_rate": 8.311111111111111e-06,
"loss": 0.0066,
"step": 250
},
{
"epoch": 21.15,
"learning_rate": 8.022222222222222e-06,
"loss": 0.0057,
"step": 275
},
{
"epoch": 23.07,
"learning_rate": 7.733333333333334e-06,
"loss": 0.0055,
"step": 300
},
{
"epoch": 24.96,
"learning_rate": 7.444444444444445e-06,
"loss": 0.0058,
"step": 325
},
{
"epoch": 26.89,
"learning_rate": 7.155555555555556e-06,
"loss": 0.0047,
"step": 350
},
{
"epoch": 28.81,
"learning_rate": 6.866666666666667e-06,
"loss": 0.0031,
"step": 375
},
{
"epoch": 30.74,
"learning_rate": 6.577777777777779e-06,
"loss": 0.0026,
"step": 400
},
{
"epoch": 32.67,
"learning_rate": 6.28888888888889e-06,
"loss": 0.0037,
"step": 425
},
{
"epoch": 34.59,
"learning_rate": 6e-06,
"loss": 0.0026,
"step": 450
},
{
"epoch": 36.52,
"learning_rate": 5.711111111111112e-06,
"loss": 0.0018,
"step": 475
},
{
"epoch": 38.44,
"learning_rate": 5.422222222222223e-06,
"loss": 0.0012,
"step": 500
},
{
"epoch": 38.44,
"eval_loss": 1.890625,
"eval_runtime": 421.0342,
"eval_samples_per_second": 2.42,
"eval_steps_per_second": 0.038,
"eval_wer": 55.41451312797189,
"step": 500
},
{
"epoch": 40.37,
"learning_rate": 5.133333333333334e-06,
"loss": 0.0009,
"step": 525
},
{
"epoch": 42.3,
"learning_rate": 4.8444444444444446e-06,
"loss": 0.0008,
"step": 550
},
{
"epoch": 44.22,
"learning_rate": 4.555555555555556e-06,
"loss": 0.0007,
"step": 575
},
{
"epoch": 46.15,
"learning_rate": 4.266666666666668e-06,
"loss": 0.0006,
"step": 600
},
{
"epoch": 48.07,
"learning_rate": 3.977777777777778e-06,
"loss": 0.0006,
"step": 625
},
{
"epoch": 49.96,
"learning_rate": 3.7e-06,
"loss": 0.0006,
"step": 650
},
{
"epoch": 51.89,
"learning_rate": 3.4111111111111113e-06,
"loss": 0.0006,
"step": 675
},
{
"epoch": 53.81,
"learning_rate": 3.1222222222222228e-06,
"loss": 0.0005,
"step": 700
},
{
"epoch": 55.74,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.0005,
"step": 725
},
{
"epoch": 57.67,
"learning_rate": 2.5444444444444446e-06,
"loss": 0.0005,
"step": 750
},
{
"epoch": 59.59,
"learning_rate": 2.2555555555555557e-06,
"loss": 0.0005,
"step": 775
},
{
"epoch": 61.52,
"learning_rate": 1.9666666666666668e-06,
"loss": 0.0005,
"step": 800
},
{
"epoch": 63.44,
"learning_rate": 1.6777777777777779e-06,
"loss": 0.0005,
"step": 825
},
{
"epoch": 65.37,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.0005,
"step": 850
},
{
"epoch": 67.3,
"learning_rate": 1.1e-06,
"loss": 0.0005,
"step": 875
},
{
"epoch": 69.22,
"learning_rate": 8.111111111111112e-07,
"loss": 0.0005,
"step": 900
},
{
"epoch": 71.15,
"learning_rate": 5.222222222222223e-07,
"loss": 0.0004,
"step": 925
},
{
"epoch": 73.07,
"learning_rate": 2.3333333333333336e-07,
"loss": 0.0004,
"step": 950
},
{
"epoch": 74.96,
"learning_rate": 0.0,
"loss": 0.0004,
"step": 975
},
{
"epoch": 76.89,
"learning_rate": 0.0,
"loss": 0.0004,
"step": 1000
},
{
"epoch": 76.89,
"eval_loss": 2.00390625,
"eval_runtime": 421.4824,
"eval_samples_per_second": 2.418,
"eval_steps_per_second": 0.038,
"eval_wer": 55.04238164151334,
"step": 1000
},
{
"epoch": 76.89,
"step": 1000,
"total_flos": 1.384647263524592e+20,
"train_loss": 0.1032530371248722,
"train_runtime": 9059.3866,
"train_samples_per_second": 7.064,
"train_steps_per_second": 0.11
}
],
"max_steps": 1000,
"num_train_epochs": 77,
"total_flos": 1.384647263524592e+20,
"trial_name": null,
"trial_params": null
}