whisper-small-shona / trainer_state.json
steja's picture
End of training
3b643b7 verified
{
"best_metric": 49.90958408679928,
"best_model_checkpoint": "./whisper-small-shona/checkpoint-1600",
"epoch": 121.21212121212122,
"eval_steps": 400,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.52,
"learning_rate": 4.800000000000001e-07,
"loss": 2.8865,
"step": 25
},
{
"epoch": 3.03,
"learning_rate": 9.600000000000001e-07,
"loss": 2.3463,
"step": 50
},
{
"epoch": 4.55,
"learning_rate": 1.46e-06,
"loss": 1.7622,
"step": 75
},
{
"epoch": 6.06,
"learning_rate": 1.9600000000000003e-06,
"loss": 1.2611,
"step": 100
},
{
"epoch": 7.58,
"learning_rate": 2.46e-06,
"loss": 0.9353,
"step": 125
},
{
"epoch": 9.09,
"learning_rate": 2.96e-06,
"loss": 0.7091,
"step": 150
},
{
"epoch": 10.61,
"learning_rate": 3.46e-06,
"loss": 0.5145,
"step": 175
},
{
"epoch": 12.12,
"learning_rate": 3.96e-06,
"loss": 0.3821,
"step": 200
},
{
"epoch": 13.64,
"learning_rate": 4.4600000000000005e-06,
"loss": 0.2477,
"step": 225
},
{
"epoch": 15.15,
"learning_rate": 4.960000000000001e-06,
"loss": 0.1587,
"step": 250
},
{
"epoch": 16.67,
"learning_rate": 5.460000000000001e-06,
"loss": 0.0896,
"step": 275
},
{
"epoch": 18.18,
"learning_rate": 5.9600000000000005e-06,
"loss": 0.0466,
"step": 300
},
{
"epoch": 19.7,
"learning_rate": 6.460000000000001e-06,
"loss": 0.0224,
"step": 325
},
{
"epoch": 21.21,
"learning_rate": 6.96e-06,
"loss": 0.0132,
"step": 350
},
{
"epoch": 22.73,
"learning_rate": 7.4600000000000006e-06,
"loss": 0.0084,
"step": 375
},
{
"epoch": 24.24,
"learning_rate": 7.960000000000002e-06,
"loss": 0.0064,
"step": 400
},
{
"epoch": 24.24,
"eval_loss": 0.9630287289619446,
"eval_runtime": 56.9771,
"eval_samples_per_second": 16.235,
"eval_steps_per_second": 0.351,
"eval_wer": 50.72332730560579,
"step": 400
},
{
"epoch": 25.76,
"learning_rate": 8.46e-06,
"loss": 0.0049,
"step": 425
},
{
"epoch": 27.27,
"learning_rate": 8.96e-06,
"loss": 0.0041,
"step": 450
},
{
"epoch": 28.79,
"learning_rate": 9.460000000000001e-06,
"loss": 0.0034,
"step": 475
},
{
"epoch": 30.3,
"learning_rate": 9.960000000000001e-06,
"loss": 0.0029,
"step": 500
},
{
"epoch": 31.82,
"learning_rate": 9.846666666666668e-06,
"loss": 0.0025,
"step": 525
},
{
"epoch": 33.33,
"learning_rate": 9.68e-06,
"loss": 0.0022,
"step": 550
},
{
"epoch": 34.85,
"learning_rate": 9.513333333333334e-06,
"loss": 0.0019,
"step": 575
},
{
"epoch": 36.36,
"learning_rate": 9.346666666666666e-06,
"loss": 0.0017,
"step": 600
},
{
"epoch": 37.88,
"learning_rate": 9.180000000000002e-06,
"loss": 0.0016,
"step": 625
},
{
"epoch": 39.39,
"learning_rate": 9.013333333333334e-06,
"loss": 0.0014,
"step": 650
},
{
"epoch": 40.91,
"learning_rate": 8.846666666666668e-06,
"loss": 0.0013,
"step": 675
},
{
"epoch": 42.42,
"learning_rate": 8.68e-06,
"loss": 0.0012,
"step": 700
},
{
"epoch": 43.94,
"learning_rate": 8.513333333333335e-06,
"loss": 0.0012,
"step": 725
},
{
"epoch": 45.45,
"learning_rate": 8.346666666666668e-06,
"loss": 0.0011,
"step": 750
},
{
"epoch": 46.97,
"learning_rate": 8.18e-06,
"loss": 0.001,
"step": 775
},
{
"epoch": 48.48,
"learning_rate": 8.013333333333333e-06,
"loss": 0.001,
"step": 800
},
{
"epoch": 48.48,
"eval_loss": 1.0617018938064575,
"eval_runtime": 56.831,
"eval_samples_per_second": 16.276,
"eval_steps_per_second": 0.352,
"eval_wer": 49.93972272453286,
"step": 800
},
{
"epoch": 50.0,
"learning_rate": 7.846666666666667e-06,
"loss": 0.0009,
"step": 825
},
{
"epoch": 51.52,
"learning_rate": 7.680000000000001e-06,
"loss": 0.0009,
"step": 850
},
{
"epoch": 53.03,
"learning_rate": 7.513333333333334e-06,
"loss": 0.0008,
"step": 875
},
{
"epoch": 54.55,
"learning_rate": 7.346666666666668e-06,
"loss": 0.0008,
"step": 900
},
{
"epoch": 56.06,
"learning_rate": 7.180000000000001e-06,
"loss": 0.0007,
"step": 925
},
{
"epoch": 57.58,
"learning_rate": 7.0133333333333345e-06,
"loss": 0.0007,
"step": 950
},
{
"epoch": 59.09,
"learning_rate": 6.846666666666667e-06,
"loss": 0.0007,
"step": 975
},
{
"epoch": 60.61,
"learning_rate": 6.680000000000001e-06,
"loss": 0.0007,
"step": 1000
},
{
"epoch": 62.12,
"learning_rate": 6.513333333333333e-06,
"loss": 0.0006,
"step": 1025
},
{
"epoch": 63.64,
"learning_rate": 6.346666666666668e-06,
"loss": 0.0006,
"step": 1050
},
{
"epoch": 65.15,
"learning_rate": 6.18e-06,
"loss": 0.0006,
"step": 1075
},
{
"epoch": 66.67,
"learning_rate": 6.013333333333335e-06,
"loss": 0.0006,
"step": 1100
},
{
"epoch": 68.18,
"learning_rate": 5.846666666666667e-06,
"loss": 0.0005,
"step": 1125
},
{
"epoch": 69.7,
"learning_rate": 5.68e-06,
"loss": 0.0005,
"step": 1150
},
{
"epoch": 71.21,
"learning_rate": 5.513333333333334e-06,
"loss": 0.0005,
"step": 1175
},
{
"epoch": 72.73,
"learning_rate": 5.346666666666667e-06,
"loss": 0.0005,
"step": 1200
},
{
"epoch": 72.73,
"eval_loss": 1.101595401763916,
"eval_runtime": 56.6617,
"eval_samples_per_second": 16.325,
"eval_steps_per_second": 0.353,
"eval_wer": 49.93972272453286,
"step": 1200
},
{
"epoch": 74.24,
"learning_rate": 5.18e-06,
"loss": 0.0005,
"step": 1225
},
{
"epoch": 75.76,
"learning_rate": 5.013333333333333e-06,
"loss": 0.0005,
"step": 1250
},
{
"epoch": 77.27,
"learning_rate": 4.846666666666667e-06,
"loss": 0.0005,
"step": 1275
},
{
"epoch": 78.79,
"learning_rate": 4.680000000000001e-06,
"loss": 0.0005,
"step": 1300
},
{
"epoch": 80.3,
"learning_rate": 4.513333333333333e-06,
"loss": 0.0004,
"step": 1325
},
{
"epoch": 81.82,
"learning_rate": 4.346666666666667e-06,
"loss": 0.0004,
"step": 1350
},
{
"epoch": 83.33,
"learning_rate": 4.18e-06,
"loss": 0.0004,
"step": 1375
},
{
"epoch": 84.85,
"learning_rate": 4.013333333333334e-06,
"loss": 0.0004,
"step": 1400
},
{
"epoch": 86.36,
"learning_rate": 3.8466666666666665e-06,
"loss": 0.0004,
"step": 1425
},
{
"epoch": 87.88,
"learning_rate": 3.6800000000000003e-06,
"loss": 0.0004,
"step": 1450
},
{
"epoch": 89.39,
"learning_rate": 3.5133333333333337e-06,
"loss": 0.0004,
"step": 1475
},
{
"epoch": 90.91,
"learning_rate": 3.346666666666667e-06,
"loss": 0.0004,
"step": 1500
},
{
"epoch": 92.42,
"learning_rate": 3.1800000000000005e-06,
"loss": 0.0004,
"step": 1525
},
{
"epoch": 93.94,
"learning_rate": 3.013333333333334e-06,
"loss": 0.0004,
"step": 1550
},
{
"epoch": 95.45,
"learning_rate": 2.8466666666666672e-06,
"loss": 0.0004,
"step": 1575
},
{
"epoch": 96.97,
"learning_rate": 2.68e-06,
"loss": 0.0004,
"step": 1600
},
{
"epoch": 96.97,
"eval_loss": 1.1220260858535767,
"eval_runtime": 56.5264,
"eval_samples_per_second": 16.364,
"eval_steps_per_second": 0.354,
"eval_wer": 49.90958408679928,
"step": 1600
},
{
"epoch": 98.48,
"learning_rate": 2.5133333333333336e-06,
"loss": 0.0004,
"step": 1625
},
{
"epoch": 100.0,
"learning_rate": 2.346666666666667e-06,
"loss": 0.0004,
"step": 1650
},
{
"epoch": 101.52,
"learning_rate": 2.1800000000000003e-06,
"loss": 0.0004,
"step": 1675
},
{
"epoch": 103.03,
"learning_rate": 2.0133333333333337e-06,
"loss": 0.0004,
"step": 1700
},
{
"epoch": 104.55,
"learning_rate": 1.8466666666666668e-06,
"loss": 0.0004,
"step": 1725
},
{
"epoch": 106.06,
"learning_rate": 1.6800000000000002e-06,
"loss": 0.0003,
"step": 1750
},
{
"epoch": 107.58,
"learning_rate": 1.5133333333333334e-06,
"loss": 0.0003,
"step": 1775
},
{
"epoch": 109.09,
"learning_rate": 1.3466666666666668e-06,
"loss": 0.0003,
"step": 1800
},
{
"epoch": 110.61,
"learning_rate": 1.1800000000000001e-06,
"loss": 0.0003,
"step": 1825
},
{
"epoch": 112.12,
"learning_rate": 1.0133333333333333e-06,
"loss": 0.0003,
"step": 1850
},
{
"epoch": 113.64,
"learning_rate": 8.466666666666668e-07,
"loss": 0.0003,
"step": 1875
},
{
"epoch": 115.15,
"learning_rate": 6.800000000000001e-07,
"loss": 0.0003,
"step": 1900
},
{
"epoch": 116.67,
"learning_rate": 5.133333333333334e-07,
"loss": 0.0003,
"step": 1925
},
{
"epoch": 118.18,
"learning_rate": 3.466666666666667e-07,
"loss": 0.0003,
"step": 1950
},
{
"epoch": 119.7,
"learning_rate": 1.8e-07,
"loss": 0.0003,
"step": 1975
},
{
"epoch": 121.21,
"learning_rate": 1.3333333333333334e-08,
"loss": 0.0003,
"step": 2000
},
{
"epoch": 121.21,
"eval_loss": 1.1297551393508911,
"eval_runtime": 56.6433,
"eval_samples_per_second": 16.33,
"eval_steps_per_second": 0.353,
"eval_wer": 50.04219409282701,
"step": 2000
},
{
"epoch": 121.21,
"step": 2000,
"total_flos": 2.770419932175794e+19,
"train_loss": 0.143078886593692,
"train_runtime": 4289.2081,
"train_samples_per_second": 22.382,
"train_steps_per_second": 0.466
}
],
"logging_steps": 25,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 125,
"save_steps": 400,
"total_flos": 2.770419932175794e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}