whisper-medium-lt / trainer_state.json
DeividasM's picture
Whisper
88dd4e8
raw
history blame
10.4 kB
{
"best_metric": 20.446244243290458,
"best_model_checkpoint": "./whisper-medium-lt/checkpoint-2000",
"epoch": 18.685979142526072,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.23,
"learning_rate": 4.6000000000000004e-07,
"loss": 3.6989,
"step": 25
},
{
"epoch": 0.46,
"learning_rate": 9.600000000000001e-07,
"loss": 2.5535,
"step": 50
},
{
"epoch": 0.7,
"learning_rate": 1.46e-06,
"loss": 1.7181,
"step": 75
},
{
"epoch": 0.93,
"learning_rate": 1.9600000000000003e-06,
"loss": 1.2891,
"step": 100
},
{
"epoch": 1.17,
"learning_rate": 2.46e-06,
"loss": 0.7856,
"step": 125
},
{
"epoch": 1.4,
"learning_rate": 2.96e-06,
"loss": 0.3772,
"step": 150
},
{
"epoch": 1.63,
"learning_rate": 3.46e-06,
"loss": 0.3373,
"step": 175
},
{
"epoch": 1.86,
"learning_rate": 3.96e-06,
"loss": 0.3153,
"step": 200
},
{
"epoch": 2.1,
"learning_rate": 4.4600000000000005e-06,
"loss": 0.2667,
"step": 225
},
{
"epoch": 2.33,
"learning_rate": 4.960000000000001e-06,
"loss": 0.2029,
"step": 250
},
{
"epoch": 2.57,
"learning_rate": 5.460000000000001e-06,
"loss": 0.1957,
"step": 275
},
{
"epoch": 2.8,
"learning_rate": 5.9600000000000005e-06,
"loss": 0.1887,
"step": 300
},
{
"epoch": 3.04,
"learning_rate": 6.460000000000001e-06,
"loss": 0.1782,
"step": 325
},
{
"epoch": 3.27,
"learning_rate": 6.96e-06,
"loss": 0.1022,
"step": 350
},
{
"epoch": 3.5,
"learning_rate": 7.4600000000000006e-06,
"loss": 0.099,
"step": 375
},
{
"epoch": 3.73,
"learning_rate": 7.960000000000002e-06,
"loss": 0.1057,
"step": 400
},
{
"epoch": 3.96,
"learning_rate": 8.46e-06,
"loss": 0.1052,
"step": 425
},
{
"epoch": 4.2,
"learning_rate": 8.96e-06,
"loss": 0.0607,
"step": 450
},
{
"epoch": 4.44,
"learning_rate": 9.460000000000001e-06,
"loss": 0.0531,
"step": 475
},
{
"epoch": 4.67,
"learning_rate": 9.960000000000001e-06,
"loss": 0.0532,
"step": 500
},
{
"epoch": 4.9,
"learning_rate": 9.94888888888889e-06,
"loss": 0.0565,
"step": 525
},
{
"epoch": 5.14,
"learning_rate": 9.893333333333334e-06,
"loss": 0.0432,
"step": 550
},
{
"epoch": 5.37,
"learning_rate": 9.837777777777778e-06,
"loss": 0.0302,
"step": 575
},
{
"epoch": 5.6,
"learning_rate": 9.782222222222222e-06,
"loss": 0.0306,
"step": 600
},
{
"epoch": 5.83,
"learning_rate": 9.726666666666668e-06,
"loss": 0.0305,
"step": 625
},
{
"epoch": 6.07,
"learning_rate": 9.671111111111112e-06,
"loss": 0.0267,
"step": 650
},
{
"epoch": 6.31,
"learning_rate": 9.615555555555558e-06,
"loss": 0.0163,
"step": 675
},
{
"epoch": 6.54,
"learning_rate": 9.56e-06,
"loss": 0.0189,
"step": 700
},
{
"epoch": 6.77,
"learning_rate": 9.504444444444446e-06,
"loss": 0.0194,
"step": 725
},
{
"epoch": 7.01,
"learning_rate": 9.44888888888889e-06,
"loss": 0.0209,
"step": 750
},
{
"epoch": 7.24,
"learning_rate": 9.393333333333334e-06,
"loss": 0.0119,
"step": 775
},
{
"epoch": 7.47,
"learning_rate": 9.33777777777778e-06,
"loss": 0.0124,
"step": 800
},
{
"epoch": 7.7,
"learning_rate": 9.282222222222222e-06,
"loss": 0.0129,
"step": 825
},
{
"epoch": 7.94,
"learning_rate": 9.226666666666668e-06,
"loss": 0.0118,
"step": 850
},
{
"epoch": 8.18,
"learning_rate": 9.171111111111112e-06,
"loss": 0.009,
"step": 875
},
{
"epoch": 8.41,
"learning_rate": 9.115555555555556e-06,
"loss": 0.0069,
"step": 900
},
{
"epoch": 8.64,
"learning_rate": 9.060000000000001e-06,
"loss": 0.0076,
"step": 925
},
{
"epoch": 8.87,
"learning_rate": 9.004444444444445e-06,
"loss": 0.0076,
"step": 950
},
{
"epoch": 9.11,
"learning_rate": 8.94888888888889e-06,
"loss": 0.0071,
"step": 975
},
{
"epoch": 9.34,
"learning_rate": 8.893333333333333e-06,
"loss": 0.0056,
"step": 1000
},
{
"epoch": 9.34,
"eval_loss": 0.3252415060997009,
"eval_runtime": 2671.1564,
"eval_samples_per_second": 1.404,
"eval_steps_per_second": 0.176,
"eval_wer": 20.553438145148483,
"step": 1000
},
{
"epoch": 9.57,
"learning_rate": 8.83777777777778e-06,
"loss": 0.0058,
"step": 1025
},
{
"epoch": 9.81,
"learning_rate": 8.782222222222223e-06,
"loss": 0.0058,
"step": 1050
},
{
"epoch": 10.05,
"learning_rate": 8.726666666666667e-06,
"loss": 0.0057,
"step": 1075
},
{
"epoch": 10.28,
"learning_rate": 8.671111111111113e-06,
"loss": 0.0054,
"step": 1100
},
{
"epoch": 10.51,
"learning_rate": 8.615555555555555e-06,
"loss": 0.0042,
"step": 1125
},
{
"epoch": 10.74,
"learning_rate": 8.560000000000001e-06,
"loss": 0.0035,
"step": 1150
},
{
"epoch": 10.97,
"learning_rate": 8.504444444444445e-06,
"loss": 0.0048,
"step": 1175
},
{
"epoch": 11.21,
"learning_rate": 8.448888888888889e-06,
"loss": 0.0033,
"step": 1200
},
{
"epoch": 11.44,
"learning_rate": 8.393333333333335e-06,
"loss": 0.0033,
"step": 1225
},
{
"epoch": 11.68,
"learning_rate": 8.337777777777777e-06,
"loss": 0.003,
"step": 1250
},
{
"epoch": 11.91,
"learning_rate": 8.282222222222223e-06,
"loss": 0.0033,
"step": 1275
},
{
"epoch": 12.15,
"learning_rate": 8.226666666666667e-06,
"loss": 0.0031,
"step": 1300
},
{
"epoch": 12.38,
"learning_rate": 8.171111111111113e-06,
"loss": 0.0027,
"step": 1325
},
{
"epoch": 12.61,
"learning_rate": 8.115555555555557e-06,
"loss": 0.0026,
"step": 1350
},
{
"epoch": 12.84,
"learning_rate": 8.06e-06,
"loss": 0.0026,
"step": 1375
},
{
"epoch": 13.08,
"learning_rate": 8.004444444444445e-06,
"loss": 0.0021,
"step": 1400
},
{
"epoch": 13.32,
"learning_rate": 7.948888888888889e-06,
"loss": 0.0014,
"step": 1425
},
{
"epoch": 13.55,
"learning_rate": 7.893333333333335e-06,
"loss": 0.0013,
"step": 1450
},
{
"epoch": 13.78,
"learning_rate": 7.837777777777779e-06,
"loss": 0.0018,
"step": 1475
},
{
"epoch": 14.02,
"learning_rate": 7.782222222222223e-06,
"loss": 0.0019,
"step": 1500
},
{
"epoch": 14.25,
"learning_rate": 7.726666666666667e-06,
"loss": 0.0014,
"step": 1525
},
{
"epoch": 14.48,
"learning_rate": 7.67111111111111e-06,
"loss": 0.0013,
"step": 1550
},
{
"epoch": 14.71,
"learning_rate": 7.6155555555555564e-06,
"loss": 0.0012,
"step": 1575
},
{
"epoch": 14.95,
"learning_rate": 7.5600000000000005e-06,
"loss": 0.0017,
"step": 1600
},
{
"epoch": 15.19,
"learning_rate": 7.504444444444445e-06,
"loss": 0.0014,
"step": 1625
},
{
"epoch": 15.42,
"learning_rate": 7.44888888888889e-06,
"loss": 0.0014,
"step": 1650
},
{
"epoch": 15.65,
"learning_rate": 7.393333333333333e-06,
"loss": 0.0014,
"step": 1675
},
{
"epoch": 15.88,
"learning_rate": 7.337777777777778e-06,
"loss": 0.0012,
"step": 1700
},
{
"epoch": 16.12,
"learning_rate": 7.282222222222222e-06,
"loss": 0.0012,
"step": 1725
},
{
"epoch": 16.35,
"learning_rate": 7.226666666666667e-06,
"loss": 0.001,
"step": 1750
},
{
"epoch": 16.58,
"learning_rate": 7.171111111111112e-06,
"loss": 0.0011,
"step": 1775
},
{
"epoch": 16.82,
"learning_rate": 7.115555555555557e-06,
"loss": 0.0012,
"step": 1800
},
{
"epoch": 17.06,
"learning_rate": 7.06e-06,
"loss": 0.0011,
"step": 1825
},
{
"epoch": 17.29,
"learning_rate": 7.004444444444445e-06,
"loss": 0.001,
"step": 1850
},
{
"epoch": 17.52,
"learning_rate": 6.948888888888889e-06,
"loss": 0.001,
"step": 1875
},
{
"epoch": 17.75,
"learning_rate": 6.893333333333334e-06,
"loss": 0.0014,
"step": 1900
},
{
"epoch": 17.98,
"learning_rate": 6.837777777777779e-06,
"loss": 0.0015,
"step": 1925
},
{
"epoch": 18.22,
"learning_rate": 6.782222222222222e-06,
"loss": 0.0016,
"step": 1950
},
{
"epoch": 18.45,
"learning_rate": 6.726666666666667e-06,
"loss": 0.0018,
"step": 1975
},
{
"epoch": 18.69,
"learning_rate": 6.671111111111112e-06,
"loss": 0.0023,
"step": 2000
},
{
"epoch": 18.69,
"eval_loss": 0.3549511432647705,
"eval_runtime": 2667.5472,
"eval_samples_per_second": 1.405,
"eval_steps_per_second": 0.176,
"eval_wer": 20.446244243290458,
"step": 2000
}
],
"max_steps": 5000,
"num_train_epochs": 47,
"total_flos": 2.6324054137700352e+20,
"trial_name": null,
"trial_params": null
}