whisper-small-tamil / trainer_state.json
steja's picture
whisper small trained on fleurs tamil
2e701d4
{
"best_metric": 15.021535719590078,
"best_model_checkpoint": "../../outdir/2022-12-08_00-59-06/checkpoint-2500",
"epoch": 29.4093567251462,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.29,
"learning_rate": 5.000000000000001e-07,
"loss": 1.001,
"step": 25
},
{
"epoch": 0.58,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.8166,
"step": 50
},
{
"epoch": 0.88,
"learning_rate": 1.5e-06,
"loss": 0.6237,
"step": 75
},
{
"epoch": 1.18,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.5312,
"step": 100
},
{
"epoch": 1.47,
"learning_rate": 2.5e-06,
"loss": 0.4587,
"step": 125
},
{
"epoch": 1.76,
"learning_rate": 3e-06,
"loss": 0.425,
"step": 150
},
{
"epoch": 2.06,
"learning_rate": 3.5e-06,
"loss": 0.4055,
"step": 175
},
{
"epoch": 2.35,
"learning_rate": 4.000000000000001e-06,
"loss": 0.3455,
"step": 200
},
{
"epoch": 2.64,
"learning_rate": 4.5e-06,
"loss": 0.3192,
"step": 225
},
{
"epoch": 2.94,
"learning_rate": 5e-06,
"loss": 0.3146,
"step": 250
},
{
"epoch": 3.23,
"learning_rate": 5.500000000000001e-06,
"loss": 0.2732,
"step": 275
},
{
"epoch": 3.53,
"learning_rate": 6e-06,
"loss": 0.2073,
"step": 300
},
{
"epoch": 3.82,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.185,
"step": 325
},
{
"epoch": 4.12,
"learning_rate": 7e-06,
"loss": 0.1786,
"step": 350
},
{
"epoch": 4.41,
"learning_rate": 7.500000000000001e-06,
"loss": 0.1354,
"step": 375
},
{
"epoch": 4.7,
"learning_rate": 8.000000000000001e-06,
"loss": 0.1369,
"step": 400
},
{
"epoch": 4.99,
"learning_rate": 8.5e-06,
"loss": 0.1298,
"step": 425
},
{
"epoch": 5.29,
"learning_rate": 9e-06,
"loss": 0.0926,
"step": 450
},
{
"epoch": 5.58,
"learning_rate": 9.5e-06,
"loss": 0.0898,
"step": 475
},
{
"epoch": 5.88,
"learning_rate": 1e-05,
"loss": 0.0882,
"step": 500
},
{
"epoch": 5.88,
"eval_loss": 0.267423152923584,
"eval_runtime": 1165.7105,
"eval_samples_per_second": 0.507,
"eval_steps_per_second": 0.016,
"eval_wer": 16.735481954552206,
"step": 500
},
{
"epoch": 6.18,
"learning_rate": 9.944444444444445e-06,
"loss": 0.067,
"step": 525
},
{
"epoch": 6.47,
"learning_rate": 9.88888888888889e-06,
"loss": 0.0524,
"step": 550
},
{
"epoch": 6.76,
"learning_rate": 9.833333333333333e-06,
"loss": 0.049,
"step": 575
},
{
"epoch": 7.06,
"learning_rate": 9.777777777777779e-06,
"loss": 0.0497,
"step": 600
},
{
"epoch": 7.35,
"learning_rate": 9.722222222222223e-06,
"loss": 0.026,
"step": 625
},
{
"epoch": 7.64,
"learning_rate": 9.666666666666667e-06,
"loss": 0.0286,
"step": 650
},
{
"epoch": 7.94,
"learning_rate": 9.611111111111112e-06,
"loss": 0.0279,
"step": 675
},
{
"epoch": 8.23,
"learning_rate": 9.555555555555556e-06,
"loss": 0.0189,
"step": 700
},
{
"epoch": 8.53,
"learning_rate": 9.5e-06,
"loss": 0.0161,
"step": 725
},
{
"epoch": 8.82,
"learning_rate": 9.444444444444445e-06,
"loss": 0.0131,
"step": 750
},
{
"epoch": 9.12,
"learning_rate": 9.38888888888889e-06,
"loss": 0.0124,
"step": 775
},
{
"epoch": 9.41,
"learning_rate": 9.333333333333334e-06,
"loss": 0.0089,
"step": 800
},
{
"epoch": 9.7,
"learning_rate": 9.277777777777778e-06,
"loss": 0.0087,
"step": 825
},
{
"epoch": 9.99,
"learning_rate": 9.222222222222224e-06,
"loss": 0.0081,
"step": 850
},
{
"epoch": 10.29,
"learning_rate": 9.166666666666666e-06,
"loss": 0.0059,
"step": 875
},
{
"epoch": 10.58,
"learning_rate": 9.111111111111112e-06,
"loss": 0.0059,
"step": 900
},
{
"epoch": 10.88,
"learning_rate": 9.055555555555556e-06,
"loss": 0.0047,
"step": 925
},
{
"epoch": 11.18,
"learning_rate": 9e-06,
"loss": 0.0036,
"step": 950
},
{
"epoch": 11.47,
"learning_rate": 8.944444444444446e-06,
"loss": 0.0027,
"step": 975
},
{
"epoch": 11.76,
"learning_rate": 8.888888888888888e-06,
"loss": 0.0026,
"step": 1000
},
{
"epoch": 11.76,
"eval_loss": 0.3508181571960449,
"eval_runtime": 1146.9726,
"eval_samples_per_second": 0.515,
"eval_steps_per_second": 0.017,
"eval_wer": 15.372048121194117,
"step": 1000
},
{
"epoch": 12.06,
"learning_rate": 8.833333333333334e-06,
"loss": 0.0028,
"step": 1025
},
{
"epoch": 12.35,
"learning_rate": 8.777777777777778e-06,
"loss": 0.0015,
"step": 1050
},
{
"epoch": 12.64,
"learning_rate": 8.722222222222224e-06,
"loss": 0.0017,
"step": 1075
},
{
"epoch": 12.94,
"learning_rate": 8.666666666666668e-06,
"loss": 0.0019,
"step": 1100
},
{
"epoch": 13.23,
"learning_rate": 8.611111111111112e-06,
"loss": 0.0011,
"step": 1125
},
{
"epoch": 13.53,
"learning_rate": 8.555555555555556e-06,
"loss": 0.0011,
"step": 1150
},
{
"epoch": 13.82,
"learning_rate": 8.5e-06,
"loss": 0.0012,
"step": 1175
},
{
"epoch": 14.12,
"learning_rate": 8.444444444444446e-06,
"loss": 0.0012,
"step": 1200
},
{
"epoch": 14.41,
"learning_rate": 8.38888888888889e-06,
"loss": 0.0012,
"step": 1225
},
{
"epoch": 14.7,
"learning_rate": 8.333333333333334e-06,
"loss": 0.001,
"step": 1250
},
{
"epoch": 14.99,
"learning_rate": 8.277777777777778e-06,
"loss": 0.0017,
"step": 1275
},
{
"epoch": 15.29,
"learning_rate": 8.222222222222222e-06,
"loss": 0.002,
"step": 1300
},
{
"epoch": 15.58,
"learning_rate": 8.166666666666668e-06,
"loss": 0.0019,
"step": 1325
},
{
"epoch": 15.88,
"learning_rate": 8.111111111111112e-06,
"loss": 0.0015,
"step": 1350
},
{
"epoch": 16.18,
"learning_rate": 8.055555555555557e-06,
"loss": 0.0016,
"step": 1375
},
{
"epoch": 16.47,
"learning_rate": 8.000000000000001e-06,
"loss": 0.0013,
"step": 1400
},
{
"epoch": 16.76,
"learning_rate": 7.944444444444445e-06,
"loss": 0.0016,
"step": 1425
},
{
"epoch": 17.06,
"learning_rate": 7.88888888888889e-06,
"loss": 0.0013,
"step": 1450
},
{
"epoch": 17.35,
"learning_rate": 7.833333333333333e-06,
"loss": 0.0013,
"step": 1475
},
{
"epoch": 17.64,
"learning_rate": 7.77777777777778e-06,
"loss": 0.0012,
"step": 1500
},
{
"epoch": 17.64,
"eval_loss": 0.3920586407184601,
"eval_runtime": 1148.0874,
"eval_samples_per_second": 0.515,
"eval_steps_per_second": 0.017,
"eval_wer": 15.615624535868111,
"step": 1500
},
{
"epoch": 17.94,
"learning_rate": 7.722222222222223e-06,
"loss": 0.0015,
"step": 1525
},
{
"epoch": 18.23,
"learning_rate": 7.666666666666667e-06,
"loss": 0.0018,
"step": 1550
},
{
"epoch": 18.53,
"learning_rate": 7.611111111111111e-06,
"loss": 0.0018,
"step": 1575
},
{
"epoch": 18.82,
"learning_rate": 7.555555555555556e-06,
"loss": 0.0016,
"step": 1600
},
{
"epoch": 19.12,
"learning_rate": 7.500000000000001e-06,
"loss": 0.0015,
"step": 1625
},
{
"epoch": 19.41,
"learning_rate": 7.444444444444445e-06,
"loss": 0.0017,
"step": 1650
},
{
"epoch": 19.7,
"learning_rate": 7.38888888888889e-06,
"loss": 0.0014,
"step": 1675
},
{
"epoch": 19.99,
"learning_rate": 7.335555555555556e-06,
"loss": 0.0021,
"step": 1700
},
{
"epoch": 20.29,
"learning_rate": 7.280000000000001e-06,
"loss": 0.0024,
"step": 1725
},
{
"epoch": 20.58,
"learning_rate": 7.224444444444445e-06,
"loss": 0.0024,
"step": 1750
},
{
"epoch": 20.88,
"learning_rate": 7.1688888888888895e-06,
"loss": 0.0024,
"step": 1775
},
{
"epoch": 21.18,
"learning_rate": 7.113333333333334e-06,
"loss": 0.0023,
"step": 1800
},
{
"epoch": 21.47,
"learning_rate": 7.057777777777778e-06,
"loss": 0.002,
"step": 1825
},
{
"epoch": 21.76,
"learning_rate": 7.0022222222222225e-06,
"loss": 0.0027,
"step": 1850
},
{
"epoch": 22.06,
"learning_rate": 6.946666666666667e-06,
"loss": 0.0024,
"step": 1875
},
{
"epoch": 22.35,
"learning_rate": 6.891111111111111e-06,
"loss": 0.0015,
"step": 1900
},
{
"epoch": 22.64,
"learning_rate": 6.835555555555556e-06,
"loss": 0.002,
"step": 1925
},
{
"epoch": 22.94,
"learning_rate": 6.780000000000001e-06,
"loss": 0.0014,
"step": 1950
},
{
"epoch": 23.23,
"learning_rate": 6.724444444444444e-06,
"loss": 0.001,
"step": 1975
},
{
"epoch": 23.53,
"learning_rate": 6.668888888888889e-06,
"loss": 0.0009,
"step": 2000
},
{
"epoch": 23.53,
"eval_loss": 0.4076804518699646,
"eval_runtime": 1148.5839,
"eval_samples_per_second": 0.515,
"eval_steps_per_second": 0.017,
"eval_wer": 15.42848655874053,
"step": 2000
},
{
"epoch": 23.82,
"learning_rate": 6.613333333333334e-06,
"loss": 0.0008,
"step": 2025
},
{
"epoch": 24.12,
"learning_rate": 6.557777777777778e-06,
"loss": 0.001,
"step": 2050
},
{
"epoch": 24.41,
"learning_rate": 6.502222222222223e-06,
"loss": 0.0009,
"step": 2075
},
{
"epoch": 24.7,
"learning_rate": 6.446666666666668e-06,
"loss": 0.0005,
"step": 2100
},
{
"epoch": 24.99,
"learning_rate": 6.391111111111111e-06,
"loss": 0.0004,
"step": 2125
},
{
"epoch": 25.29,
"learning_rate": 6.335555555555556e-06,
"loss": 0.0003,
"step": 2150
},
{
"epoch": 25.58,
"learning_rate": 6.280000000000001e-06,
"loss": 0.0003,
"step": 2175
},
{
"epoch": 25.88,
"learning_rate": 6.224444444444445e-06,
"loss": 0.0003,
"step": 2200
},
{
"epoch": 26.18,
"learning_rate": 6.16888888888889e-06,
"loss": 0.0003,
"step": 2225
},
{
"epoch": 26.47,
"learning_rate": 6.113333333333333e-06,
"loss": 0.0002,
"step": 2250
},
{
"epoch": 26.76,
"learning_rate": 6.057777777777778e-06,
"loss": 0.0002,
"step": 2275
},
{
"epoch": 27.06,
"learning_rate": 6.002222222222223e-06,
"loss": 0.0002,
"step": 2300
},
{
"epoch": 27.35,
"learning_rate": 5.946666666666668e-06,
"loss": 0.0002,
"step": 2325
},
{
"epoch": 27.64,
"learning_rate": 5.891111111111112e-06,
"loss": 0.0002,
"step": 2350
},
{
"epoch": 27.94,
"learning_rate": 5.8355555555555565e-06,
"loss": 0.0002,
"step": 2375
},
{
"epoch": 28.23,
"learning_rate": 5.78e-06,
"loss": 0.0002,
"step": 2400
},
{
"epoch": 28.53,
"learning_rate": 5.724444444444445e-06,
"loss": 0.0002,
"step": 2425
},
{
"epoch": 28.82,
"learning_rate": 5.6688888888888895e-06,
"loss": 0.0002,
"step": 2450
},
{
"epoch": 29.12,
"learning_rate": 5.613333333333334e-06,
"loss": 0.0002,
"step": 2475
},
{
"epoch": 29.41,
"learning_rate": 5.557777777777778e-06,
"loss": 0.0002,
"step": 2500
},
{
"epoch": 29.41,
"eval_loss": 0.42684832215309143,
"eval_runtime": 1155.7652,
"eval_samples_per_second": 0.511,
"eval_steps_per_second": 0.016,
"eval_wer": 15.021535719590078,
"step": 2500
}
],
"max_steps": 5000,
"num_train_epochs": 59,
"total_flos": 2.31286769086464e+19,
"trial_name": null,
"trial_params": null
}