wav2vec2-300m-mls-german-ft / trainer_state.json
patrickvonplaten's picture
End of training
90cf66b
raw history blame
No virus
86.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"global_step": 6900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"learning_rate": 9e-07,
"loss": 11.2286,
"step": 10
},
{
"epoch": 0.29,
"learning_rate": 1.8e-06,
"loss": 12.1103,
"step": 20
},
{
"epoch": 0.43,
"learning_rate": 2.8000000000000003e-06,
"loss": 11.9593,
"step": 30
},
{
"epoch": 0.58,
"learning_rate": 3.8e-06,
"loss": 11.6711,
"step": 40
},
{
"epoch": 0.72,
"learning_rate": 4.800000000000001e-06,
"loss": 11.9783,
"step": 50
},
{
"epoch": 0.87,
"learning_rate": 5.8e-06,
"loss": 10.9667,
"step": 60
},
{
"epoch": 1.01,
"learning_rate": 6.800000000000001e-06,
"loss": 12.1446,
"step": 70
},
{
"epoch": 1.16,
"learning_rate": 7.8e-06,
"loss": 10.2963,
"step": 80
},
{
"epoch": 1.3,
"learning_rate": 8.7e-06,
"loss": 10.8899,
"step": 90
},
{
"epoch": 1.45,
"learning_rate": 9.7e-06,
"loss": 9.5968,
"step": 100
},
{
"epoch": 1.59,
"learning_rate": 1.0700000000000001e-05,
"loss": 7.6591,
"step": 110
},
{
"epoch": 1.74,
"learning_rate": 1.1700000000000001e-05,
"loss": 7.5159,
"step": 120
},
{
"epoch": 1.88,
"learning_rate": 1.27e-05,
"loss": 6.2925,
"step": 130
},
{
"epoch": 2.03,
"learning_rate": 1.3700000000000001e-05,
"loss": 5.7639,
"step": 140
},
{
"epoch": 2.17,
"learning_rate": 1.47e-05,
"loss": 5.6696,
"step": 150
},
{
"epoch": 2.32,
"learning_rate": 1.5700000000000002e-05,
"loss": 4.7696,
"step": 160
},
{
"epoch": 2.46,
"learning_rate": 1.6700000000000003e-05,
"loss": 5.1383,
"step": 170
},
{
"epoch": 2.61,
"learning_rate": 1.77e-05,
"loss": 4.5714,
"step": 180
},
{
"epoch": 2.75,
"learning_rate": 1.87e-05,
"loss": 4.5116,
"step": 190
},
{
"epoch": 2.9,
"learning_rate": 1.97e-05,
"loss": 4.3733,
"step": 200
},
{
"epoch": 3.04,
"learning_rate": 2.07e-05,
"loss": 3.8443,
"step": 210
},
{
"epoch": 3.19,
"learning_rate": 2.1700000000000002e-05,
"loss": 3.9388,
"step": 220
},
{
"epoch": 3.33,
"learning_rate": 2.2700000000000003e-05,
"loss": 3.8416,
"step": 230
},
{
"epoch": 3.48,
"learning_rate": 2.37e-05,
"loss": 3.7135,
"step": 240
},
{
"epoch": 3.62,
"learning_rate": 2.47e-05,
"loss": 3.5271,
"step": 250
},
{
"epoch": 3.77,
"learning_rate": 2.57e-05,
"loss": 3.5189,
"step": 260
},
{
"epoch": 3.91,
"learning_rate": 2.6700000000000002e-05,
"loss": 3.5816,
"step": 270
},
{
"epoch": 4.06,
"learning_rate": 2.7700000000000002e-05,
"loss": 3.3417,
"step": 280
},
{
"epoch": 4.2,
"learning_rate": 2.87e-05,
"loss": 3.5864,
"step": 290
},
{
"epoch": 4.35,
"learning_rate": 2.97e-05,
"loss": 3.327,
"step": 300
},
{
"epoch": 4.49,
"learning_rate": 3.07e-05,
"loss": 3.205,
"step": 310
},
{
"epoch": 4.64,
"learning_rate": 3.1700000000000005e-05,
"loss": 3.1087,
"step": 320
},
{
"epoch": 4.78,
"learning_rate": 3.27e-05,
"loss": 3.1126,
"step": 330
},
{
"epoch": 4.93,
"learning_rate": 3.3700000000000006e-05,
"loss": 3.1934,
"step": 340
},
{
"epoch": 5.07,
"learning_rate": 3.4699999999999996e-05,
"loss": 3.0881,
"step": 350
},
{
"epoch": 5.22,
"learning_rate": 3.57e-05,
"loss": 3.086,
"step": 360
},
{
"epoch": 5.36,
"learning_rate": 3.6700000000000004e-05,
"loss": 3.0304,
"step": 370
},
{
"epoch": 5.51,
"learning_rate": 3.77e-05,
"loss": 3.0262,
"step": 380
},
{
"epoch": 5.65,
"learning_rate": 3.8700000000000006e-05,
"loss": 3.0492,
"step": 390
},
{
"epoch": 5.8,
"learning_rate": 3.97e-05,
"loss": 3.0657,
"step": 400
},
{
"epoch": 5.94,
"learning_rate": 4.07e-05,
"loss": 2.9912,
"step": 410
},
{
"epoch": 6.09,
"learning_rate": 4.17e-05,
"loss": 2.9993,
"step": 420
},
{
"epoch": 6.23,
"learning_rate": 4.27e-05,
"loss": 3.0465,
"step": 430
},
{
"epoch": 6.38,
"learning_rate": 4.3700000000000005e-05,
"loss": 2.9501,
"step": 440
},
{
"epoch": 6.52,
"learning_rate": 4.47e-05,
"loss": 3.0918,
"step": 450
},
{
"epoch": 6.67,
"learning_rate": 4.5700000000000006e-05,
"loss": 3.0306,
"step": 460
},
{
"epoch": 6.81,
"learning_rate": 4.6700000000000003e-05,
"loss": 2.9349,
"step": 470
},
{
"epoch": 6.96,
"learning_rate": 4.77e-05,
"loss": 3.0044,
"step": 480
},
{
"epoch": 7.1,
"learning_rate": 4.87e-05,
"loss": 2.9541,
"step": 490
},
{
"epoch": 7.25,
"learning_rate": 4.97e-05,
"loss": 3.0132,
"step": 500
},
{
"epoch": 7.25,
"eval_loss": 2.9393208026885986,
"eval_runtime": 580.9987,
"eval_samples_per_second": 5.842,
"eval_steps_per_second": 0.731,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 7.39,
"learning_rate": 5.0700000000000006e-05,
"loss": 2.9162,
"step": 510
},
{
"epoch": 7.54,
"learning_rate": 5.17e-05,
"loss": 2.9309,
"step": 520
},
{
"epoch": 7.68,
"learning_rate": 5.270000000000001e-05,
"loss": 3.0513,
"step": 530
},
{
"epoch": 7.83,
"learning_rate": 5.3700000000000004e-05,
"loss": 2.948,
"step": 540
},
{
"epoch": 7.97,
"learning_rate": 5.470000000000001e-05,
"loss": 2.9518,
"step": 550
},
{
"epoch": 8.12,
"learning_rate": 5.5700000000000005e-05,
"loss": 2.9229,
"step": 560
},
{
"epoch": 8.26,
"learning_rate": 5.6699999999999996e-05,
"loss": 2.9511,
"step": 570
},
{
"epoch": 8.41,
"learning_rate": 5.77e-05,
"loss": 2.9427,
"step": 580
},
{
"epoch": 8.55,
"learning_rate": 5.87e-05,
"loss": 2.9603,
"step": 590
},
{
"epoch": 8.7,
"learning_rate": 5.97e-05,
"loss": 2.9397,
"step": 600
},
{
"epoch": 8.84,
"learning_rate": 6.07e-05,
"loss": 2.9267,
"step": 610
},
{
"epoch": 8.99,
"learning_rate": 6.170000000000001e-05,
"loss": 3.0164,
"step": 620
},
{
"epoch": 9.13,
"learning_rate": 6.27e-05,
"loss": 2.913,
"step": 630
},
{
"epoch": 9.28,
"learning_rate": 6.37e-05,
"loss": 2.9382,
"step": 640
},
{
"epoch": 9.42,
"learning_rate": 6.47e-05,
"loss": 2.9905,
"step": 650
},
{
"epoch": 9.57,
"learning_rate": 6.570000000000001e-05,
"loss": 2.8993,
"step": 660
},
{
"epoch": 9.71,
"learning_rate": 6.670000000000001e-05,
"loss": 2.9829,
"step": 670
},
{
"epoch": 9.86,
"learning_rate": 6.77e-05,
"loss": 2.9161,
"step": 680
},
{
"epoch": 10.0,
"learning_rate": 6.87e-05,
"loss": 3.0016,
"step": 690
},
{
"epoch": 10.14,
"learning_rate": 6.97e-05,
"loss": 2.9328,
"step": 700
},
{
"epoch": 10.29,
"learning_rate": 7.07e-05,
"loss": 3.0272,
"step": 710
},
{
"epoch": 10.43,
"learning_rate": 7.17e-05,
"loss": 2.9243,
"step": 720
},
{
"epoch": 10.58,
"learning_rate": 7.27e-05,
"loss": 2.9345,
"step": 730
},
{
"epoch": 10.72,
"learning_rate": 7.37e-05,
"loss": 2.9191,
"step": 740
},
{
"epoch": 10.87,
"learning_rate": 7.47e-05,
"loss": 2.8938,
"step": 750
},
{
"epoch": 11.01,
"learning_rate": 7.570000000000001e-05,
"loss": 2.9083,
"step": 760
},
{
"epoch": 11.16,
"learning_rate": 7.670000000000001e-05,
"loss": 2.9408,
"step": 770
},
{
"epoch": 11.3,
"learning_rate": 7.77e-05,
"loss": 2.9428,
"step": 780
},
{
"epoch": 11.45,
"learning_rate": 7.87e-05,
"loss": 2.9443,
"step": 790
},
{
"epoch": 11.59,
"learning_rate": 7.970000000000001e-05,
"loss": 2.9159,
"step": 800
},
{
"epoch": 11.74,
"learning_rate": 8.070000000000001e-05,
"loss": 2.9294,
"step": 810
},
{
"epoch": 11.88,
"learning_rate": 8.17e-05,
"loss": 2.8927,
"step": 820
},
{
"epoch": 12.03,
"learning_rate": 8.27e-05,
"loss": 2.9385,
"step": 830
},
{
"epoch": 12.17,
"learning_rate": 8.37e-05,
"loss": 2.9407,
"step": 840
},
{
"epoch": 12.32,
"learning_rate": 8.47e-05,
"loss": 2.8872,
"step": 850
},
{
"epoch": 12.46,
"learning_rate": 8.57e-05,
"loss": 2.9336,
"step": 860
},
{
"epoch": 12.61,
"learning_rate": 8.67e-05,
"loss": 2.8768,
"step": 870
},
{
"epoch": 12.75,
"learning_rate": 8.77e-05,
"loss": 2.9426,
"step": 880
},
{
"epoch": 12.9,
"learning_rate": 8.87e-05,
"loss": 2.8981,
"step": 890
},
{
"epoch": 13.04,
"learning_rate": 8.970000000000001e-05,
"loss": 2.8928,
"step": 900
},
{
"epoch": 13.19,
"learning_rate": 9.070000000000001e-05,
"loss": 2.9346,
"step": 910
},
{
"epoch": 13.33,
"learning_rate": 9.17e-05,
"loss": 2.89,
"step": 920
},
{
"epoch": 13.48,
"learning_rate": 9.27e-05,
"loss": 2.9107,
"step": 930
},
{
"epoch": 13.62,
"learning_rate": 9.370000000000001e-05,
"loss": 2.8732,
"step": 940
},
{
"epoch": 13.77,
"learning_rate": 9.47e-05,
"loss": 2.9441,
"step": 950
},
{
"epoch": 13.91,
"learning_rate": 9.57e-05,
"loss": 2.9136,
"step": 960
},
{
"epoch": 14.06,
"learning_rate": 9.67e-05,
"loss": 2.8773,
"step": 970
},
{
"epoch": 14.2,
"learning_rate": 9.77e-05,
"loss": 2.9015,
"step": 980
},
{
"epoch": 14.35,
"learning_rate": 9.87e-05,
"loss": 2.9168,
"step": 990
},
{
"epoch": 14.49,
"learning_rate": 9.970000000000001e-05,
"loss": 2.9241,
"step": 1000
},
{
"epoch": 14.49,
"eval_loss": 2.8734302520751953,
"eval_runtime": 570.6581,
"eval_samples_per_second": 5.948,
"eval_steps_per_second": 0.745,
"eval_wer": 1.0,
"step": 1000
},
{
"epoch": 14.64,
"learning_rate": 9.988135593220339e-05,
"loss": 2.8693,
"step": 1010
},
{
"epoch": 14.78,
"learning_rate": 9.971186440677967e-05,
"loss": 2.9199,
"step": 1020
},
{
"epoch": 14.93,
"learning_rate": 9.954237288135594e-05,
"loss": 2.8831,
"step": 1030
},
{
"epoch": 15.07,
"learning_rate": 9.937288135593222e-05,
"loss": 2.8716,
"step": 1040
},
{
"epoch": 15.22,
"learning_rate": 9.920338983050847e-05,
"loss": 2.9457,
"step": 1050
},
{
"epoch": 15.36,
"learning_rate": 9.903389830508475e-05,
"loss": 2.8708,
"step": 1060
},
{
"epoch": 15.51,
"learning_rate": 9.886440677966103e-05,
"loss": 2.8595,
"step": 1070
},
{
"epoch": 15.65,
"learning_rate": 9.86949152542373e-05,
"loss": 2.8718,
"step": 1080
},
{
"epoch": 15.8,
"learning_rate": 9.852542372881356e-05,
"loss": 2.8383,
"step": 1090
},
{
"epoch": 15.94,
"learning_rate": 9.835593220338983e-05,
"loss": 2.8016,
"step": 1100
},
{
"epoch": 16.09,
"learning_rate": 9.818644067796611e-05,
"loss": 2.7608,
"step": 1110
},
{
"epoch": 16.23,
"learning_rate": 9.801694915254239e-05,
"loss": 2.7417,
"step": 1120
},
{
"epoch": 16.38,
"learning_rate": 9.784745762711864e-05,
"loss": 2.6906,
"step": 1130
},
{
"epoch": 16.52,
"learning_rate": 9.767796610169492e-05,
"loss": 2.5662,
"step": 1140
},
{
"epoch": 16.67,
"learning_rate": 9.750847457627119e-05,
"loss": 2.4202,
"step": 1150
},
{
"epoch": 16.81,
"learning_rate": 9.733898305084747e-05,
"loss": 2.4102,
"step": 1160
},
{
"epoch": 16.96,
"learning_rate": 9.716949152542373e-05,
"loss": 2.185,
"step": 1170
},
{
"epoch": 17.1,
"learning_rate": 9.7e-05,
"loss": 1.9315,
"step": 1180
},
{
"epoch": 17.25,
"learning_rate": 9.683050847457628e-05,
"loss": 2.0025,
"step": 1190
},
{
"epoch": 17.39,
"learning_rate": 9.666101694915255e-05,
"loss": 1.8797,
"step": 1200
},
{
"epoch": 17.54,
"learning_rate": 9.649152542372883e-05,
"loss": 1.7524,
"step": 1210
},
{
"epoch": 17.68,
"learning_rate": 9.632203389830509e-05,
"loss": 1.7243,
"step": 1220
},
{
"epoch": 17.83,
"learning_rate": 9.615254237288136e-05,
"loss": 1.5859,
"step": 1230
},
{
"epoch": 17.97,
"learning_rate": 9.598305084745764e-05,
"loss": 1.5059,
"step": 1240
},
{
"epoch": 18.12,
"learning_rate": 9.58135593220339e-05,
"loss": 1.456,
"step": 1250
},
{
"epoch": 18.26,
"learning_rate": 9.564406779661017e-05,
"loss": 1.535,
"step": 1260
},
{
"epoch": 18.41,
"learning_rate": 9.547457627118644e-05,
"loss": 1.4808,
"step": 1270
},
{
"epoch": 18.55,
"learning_rate": 9.530508474576272e-05,
"loss": 1.4568,
"step": 1280
},
{
"epoch": 18.7,
"learning_rate": 9.5135593220339e-05,
"loss": 1.3898,
"step": 1290
},
{
"epoch": 18.84,
"learning_rate": 9.496610169491525e-05,
"loss": 1.4865,
"step": 1300
},
{
"epoch": 18.99,
"learning_rate": 9.479661016949153e-05,
"loss": 1.391,
"step": 1310
},
{
"epoch": 19.13,
"learning_rate": 9.46271186440678e-05,
"loss": 1.4236,
"step": 1320
},
{
"epoch": 19.28,
"learning_rate": 9.445762711864408e-05,
"loss": 1.303,
"step": 1330
},
{
"epoch": 19.42,
"learning_rate": 9.428813559322034e-05,
"loss": 1.2304,
"step": 1340
},
{
"epoch": 19.57,
"learning_rate": 9.411864406779661e-05,
"loss": 1.2014,
"step": 1350
},
{
"epoch": 19.71,
"learning_rate": 9.394915254237289e-05,
"loss": 1.2398,
"step": 1360
},
{
"epoch": 19.86,
"learning_rate": 9.377966101694916e-05,
"loss": 1.1895,
"step": 1370
},
{
"epoch": 20.0,
"learning_rate": 9.361016949152542e-05,
"loss": 1.2296,
"step": 1380
},
{
"epoch": 20.14,
"learning_rate": 9.34406779661017e-05,
"loss": 1.3624,
"step": 1390
},
{
"epoch": 20.29,
"learning_rate": 9.327118644067797e-05,
"loss": 1.1805,
"step": 1400
},
{
"epoch": 20.43,
"learning_rate": 9.310169491525425e-05,
"loss": 1.1705,
"step": 1410
},
{
"epoch": 20.58,
"learning_rate": 9.29322033898305e-05,
"loss": 1.1446,
"step": 1420
},
{
"epoch": 20.72,
"learning_rate": 9.276271186440678e-05,
"loss": 1.2016,
"step": 1430
},
{
"epoch": 20.87,
"learning_rate": 9.259322033898306e-05,
"loss": 1.1053,
"step": 1440
},
{
"epoch": 21.01,
"learning_rate": 9.242372881355933e-05,
"loss": 1.1159,
"step": 1450
},
{
"epoch": 21.16,
"learning_rate": 9.225423728813561e-05,
"loss": 1.0629,
"step": 1460
},
{
"epoch": 21.3,
"learning_rate": 9.208474576271186e-05,
"loss": 1.1606,
"step": 1470
},
{
"epoch": 21.45,
"learning_rate": 9.191525423728814e-05,
"loss": 1.051,
"step": 1480
},
{
"epoch": 21.59,
"learning_rate": 9.174576271186442e-05,
"loss": 1.0948,
"step": 1490
},
{
"epoch": 21.74,
"learning_rate": 9.157627118644069e-05,
"loss": 1.0766,
"step": 1500
},
{
"epoch": 21.74,
"eval_loss": 0.27734482288360596,
"eval_runtime": 571.274,
"eval_samples_per_second": 5.941,
"eval_steps_per_second": 0.744,
"eval_wer": 0.2488228188250376,
"step": 1500
},
{
"epoch": 21.88,
"learning_rate": 9.140677966101695e-05,
"loss": 0.9907,
"step": 1510
},
{
"epoch": 22.03,
"learning_rate": 9.123728813559322e-05,
"loss": 1.0756,
"step": 1520
},
{
"epoch": 22.17,
"learning_rate": 9.10677966101695e-05,
"loss": 1.0947,
"step": 1530
},
{
"epoch": 22.32,
"learning_rate": 9.089830508474577e-05,
"loss": 1.1119,
"step": 1540
},
{
"epoch": 22.46,
"learning_rate": 9.072881355932203e-05,
"loss": 1.039,
"step": 1550
},
{
"epoch": 22.61,
"learning_rate": 9.055932203389831e-05,
"loss": 0.9631,
"step": 1560
},
{
"epoch": 22.75,
"learning_rate": 9.038983050847458e-05,
"loss": 0.9765,
"step": 1570
},
{
"epoch": 22.9,
"learning_rate": 9.022033898305086e-05,
"loss": 0.9932,
"step": 1580
},
{
"epoch": 23.04,
"learning_rate": 9.005084745762712e-05,
"loss": 0.9384,
"step": 1590
},
{
"epoch": 23.19,
"learning_rate": 8.988135593220339e-05,
"loss": 0.9942,
"step": 1600
},
{
"epoch": 23.33,
"learning_rate": 8.971186440677967e-05,
"loss": 0.9546,
"step": 1610
},
{
"epoch": 23.48,
"learning_rate": 8.954237288135594e-05,
"loss": 1.0051,
"step": 1620
},
{
"epoch": 23.62,
"learning_rate": 8.93728813559322e-05,
"loss": 0.9648,
"step": 1630
},
{
"epoch": 23.77,
"learning_rate": 8.920338983050848e-05,
"loss": 1.0198,
"step": 1640
},
{
"epoch": 23.91,
"learning_rate": 8.903389830508475e-05,
"loss": 0.9773,
"step": 1650
},
{
"epoch": 24.06,
"learning_rate": 8.886440677966103e-05,
"loss": 0.9714,
"step": 1660
},
{
"epoch": 24.2,
"learning_rate": 8.869491525423728e-05,
"loss": 0.987,
"step": 1670
},
{
"epoch": 24.35,
"learning_rate": 8.852542372881356e-05,
"loss": 1.0105,
"step": 1680
},
{
"epoch": 24.49,
"learning_rate": 8.835593220338983e-05,
"loss": 0.9965,
"step": 1690
},
{
"epoch": 24.64,
"learning_rate": 8.818644067796611e-05,
"loss": 0.9293,
"step": 1700
},
{
"epoch": 24.78,
"learning_rate": 8.801694915254238e-05,
"loss": 0.8939,
"step": 1710
},
{
"epoch": 24.93,
"learning_rate": 8.784745762711864e-05,
"loss": 0.9721,
"step": 1720
},
{
"epoch": 25.07,
"learning_rate": 8.767796610169492e-05,
"loss": 1.0158,
"step": 1730
},
{
"epoch": 25.22,
"learning_rate": 8.750847457627119e-05,
"loss": 1.08,
"step": 1740
},
{
"epoch": 25.36,
"learning_rate": 8.733898305084747e-05,
"loss": 1.3222,
"step": 1750
},
{
"epoch": 25.51,
"learning_rate": 8.716949152542373e-05,
"loss": 0.9776,
"step": 1760
},
{
"epoch": 25.65,
"learning_rate": 8.7e-05,
"loss": 0.9089,
"step": 1770
},
{
"epoch": 25.8,
"learning_rate": 8.683050847457628e-05,
"loss": 0.9171,
"step": 1780
},
{
"epoch": 25.94,
"learning_rate": 8.666101694915255e-05,
"loss": 0.9366,
"step": 1790
},
{
"epoch": 26.09,
"learning_rate": 8.649152542372881e-05,
"loss": 0.8907,
"step": 1800
},
{
"epoch": 26.23,
"learning_rate": 8.632203389830509e-05,
"loss": 0.8427,
"step": 1810
},
{
"epoch": 26.38,
"learning_rate": 8.615254237288136e-05,
"loss": 0.8692,
"step": 1820
},
{
"epoch": 26.52,
"learning_rate": 8.598305084745764e-05,
"loss": 0.8933,
"step": 1830
},
{
"epoch": 26.67,
"learning_rate": 8.581355932203389e-05,
"loss": 0.9524,
"step": 1840
},
{
"epoch": 26.81,
"learning_rate": 8.564406779661017e-05,
"loss": 0.9623,
"step": 1850
},
{
"epoch": 26.96,
"learning_rate": 8.547457627118645e-05,
"loss": 0.9458,
"step": 1860
},
{
"epoch": 27.1,
"learning_rate": 8.530508474576272e-05,
"loss": 0.9299,
"step": 1870
},
{
"epoch": 27.25,
"learning_rate": 8.513559322033898e-05,
"loss": 0.8612,
"step": 1880
},
{
"epoch": 27.39,
"learning_rate": 8.496610169491525e-05,
"loss": 0.9352,
"step": 1890
},
{
"epoch": 27.54,
"learning_rate": 8.479661016949153e-05,
"loss": 0.8692,
"step": 1900
},
{
"epoch": 27.68,
"learning_rate": 8.46271186440678e-05,
"loss": 0.8834,
"step": 1910
},
{
"epoch": 27.83,
"learning_rate": 8.445762711864406e-05,
"loss": 0.8448,
"step": 1920
},
{
"epoch": 27.97,
"learning_rate": 8.428813559322034e-05,
"loss": 0.9122,
"step": 1930
},
{
"epoch": 28.12,
"learning_rate": 8.411864406779661e-05,
"loss": 0.8632,
"step": 1940
},
{
"epoch": 28.26,
"learning_rate": 8.394915254237289e-05,
"loss": 0.8578,
"step": 1950
},
{
"epoch": 28.41,
"learning_rate": 8.377966101694916e-05,
"loss": 0.8758,
"step": 1960
},
{
"epoch": 28.55,
"learning_rate": 8.361016949152542e-05,
"loss": 0.9024,
"step": 1970
},
{
"epoch": 28.7,
"learning_rate": 8.34406779661017e-05,
"loss": 1.1392,
"step": 1980
},
{
"epoch": 28.84,
"learning_rate": 8.327118644067797e-05,
"loss": 0.8794,
"step": 1990
},
{
"epoch": 28.99,
"learning_rate": 8.310169491525425e-05,
"loss": 0.8416,
"step": 2000
},
{
"epoch": 28.99,
"eval_loss": 0.222377210855484,
"eval_runtime": 567.7022,
"eval_samples_per_second": 5.978,
"eval_steps_per_second": 0.749,
"eval_wer": 0.19897443482977098,
"step": 2000
},
{
"epoch": 29.13,
"learning_rate": 8.293220338983052e-05,
"loss": 0.8217,
"step": 2010
},
{
"epoch": 29.28,
"learning_rate": 8.276271186440678e-05,
"loss": 0.9189,
"step": 2020
},
{
"epoch": 29.42,
"learning_rate": 8.259322033898306e-05,
"loss": 0.8251,
"step": 2030
},
{
"epoch": 29.57,
"learning_rate": 8.242372881355933e-05,
"loss": 0.9254,
"step": 2040
},
{
"epoch": 29.71,
"learning_rate": 8.22542372881356e-05,
"loss": 0.9276,
"step": 2050
},
{
"epoch": 29.86,
"learning_rate": 8.208474576271186e-05,
"loss": 0.8893,
"step": 2060
},
{
"epoch": 30.0,
"learning_rate": 8.191525423728814e-05,
"loss": 0.7608,
"step": 2070
},
{
"epoch": 30.14,
"learning_rate": 8.174576271186442e-05,
"loss": 0.7857,
"step": 2080
},
{
"epoch": 30.29,
"learning_rate": 8.157627118644067e-05,
"loss": 0.9132,
"step": 2090
},
{
"epoch": 30.43,
"learning_rate": 8.140677966101695e-05,
"loss": 0.8814,
"step": 2100
},
{
"epoch": 30.58,
"learning_rate": 8.123728813559322e-05,
"loss": 0.901,
"step": 2110
},
{
"epoch": 30.72,
"learning_rate": 8.10677966101695e-05,
"loss": 0.8746,
"step": 2120
},
{
"epoch": 30.87,
"learning_rate": 8.089830508474577e-05,
"loss": 0.9291,
"step": 2130
},
{
"epoch": 31.01,
"learning_rate": 8.072881355932203e-05,
"loss": 0.8357,
"step": 2140
},
{
"epoch": 31.16,
"learning_rate": 8.055932203389831e-05,
"loss": 0.8305,
"step": 2150
},
{
"epoch": 31.3,
"learning_rate": 8.038983050847458e-05,
"loss": 0.8275,
"step": 2160
},
{
"epoch": 31.45,
"learning_rate": 8.022033898305085e-05,
"loss": 0.8111,
"step": 2170
},
{
"epoch": 31.59,
"learning_rate": 8.005084745762713e-05,
"loss": 0.8109,
"step": 2180
},
{
"epoch": 31.74,
"learning_rate": 7.988135593220339e-05,
"loss": 0.8761,
"step": 2190
},
{
"epoch": 31.88,
"learning_rate": 7.971186440677967e-05,
"loss": 0.8179,
"step": 2200
},
{
"epoch": 32.03,
"learning_rate": 7.954237288135592e-05,
"loss": 0.8078,
"step": 2210
},
{
"epoch": 32.17,
"learning_rate": 7.93728813559322e-05,
"loss": 0.8174,
"step": 2220
},
{
"epoch": 32.32,
"learning_rate": 7.920338983050848e-05,
"loss": 0.8176,
"step": 2230
},
{
"epoch": 32.46,
"learning_rate": 7.903389830508475e-05,
"loss": 0.729,
"step": 2240
},
{
"epoch": 32.61,
"learning_rate": 7.886440677966102e-05,
"loss": 0.7884,
"step": 2250
},
{
"epoch": 32.75,
"learning_rate": 7.869491525423728e-05,
"loss": 0.7896,
"step": 2260
},
{
"epoch": 32.9,
"learning_rate": 7.852542372881356e-05,
"loss": 0.8016,
"step": 2270
},
{
"epoch": 33.04,
"learning_rate": 7.835593220338984e-05,
"loss": 0.7635,
"step": 2280
},
{
"epoch": 33.19,
"learning_rate": 7.818644067796611e-05,
"loss": 0.8308,
"step": 2290
},
{
"epoch": 33.33,
"learning_rate": 7.801694915254238e-05,
"loss": 0.7543,
"step": 2300
},
{
"epoch": 33.48,
"learning_rate": 7.784745762711864e-05,
"loss": 0.8617,
"step": 2310
},
{
"epoch": 33.62,
"learning_rate": 7.767796610169492e-05,
"loss": 0.8434,
"step": 2320
},
{
"epoch": 33.77,
"learning_rate": 7.750847457627119e-05,
"loss": 0.948,
"step": 2330
},
{
"epoch": 33.91,
"learning_rate": 7.733898305084746e-05,
"loss": 1.0185,
"step": 2340
},
{
"epoch": 34.06,
"learning_rate": 7.716949152542374e-05,
"loss": 0.827,
"step": 2350
},
{
"epoch": 34.2,
"learning_rate": 7.7e-05,
"loss": 0.7916,
"step": 2360
},
{
"epoch": 34.35,
"learning_rate": 7.683050847457628e-05,
"loss": 0.7858,
"step": 2370
},
{
"epoch": 34.49,
"learning_rate": 7.666101694915255e-05,
"loss": 0.8583,
"step": 2380
},
{
"epoch": 34.64,
"learning_rate": 7.649152542372881e-05,
"loss": 0.7672,
"step": 2390
},
{
"epoch": 34.78,
"learning_rate": 7.63220338983051e-05,
"loss": 0.7745,
"step": 2400
},
{
"epoch": 34.93,
"learning_rate": 7.615254237288136e-05,
"loss": 0.8104,
"step": 2410
},
{
"epoch": 35.07,
"learning_rate": 7.598305084745763e-05,
"loss": 0.7519,
"step": 2420
},
{
"epoch": 35.22,
"learning_rate": 7.58135593220339e-05,
"loss": 0.7867,
"step": 2430
},
{
"epoch": 35.36,
"learning_rate": 7.564406779661017e-05,
"loss": 0.7614,
"step": 2440
},
{
"epoch": 35.51,
"learning_rate": 7.547457627118645e-05,
"loss": 0.8043,
"step": 2450
},
{
"epoch": 35.65,
"learning_rate": 7.53050847457627e-05,
"loss": 0.7866,
"step": 2460
},
{
"epoch": 35.8,
"learning_rate": 7.513559322033899e-05,
"loss": 0.8356,
"step": 2470
},
{
"epoch": 35.94,
"learning_rate": 7.496610169491525e-05,
"loss": 0.761,
"step": 2480
},
{
"epoch": 36.09,
"learning_rate": 7.479661016949153e-05,
"loss": 0.9027,
"step": 2490
},
{
"epoch": 36.23,
"learning_rate": 7.46271186440678e-05,
"loss": 0.8048,
"step": 2500
},
{
"epoch": 36.23,
"eval_loss": 0.20626655220985413,
"eval_runtime": 571.29,
"eval_samples_per_second": 5.941,
"eval_steps_per_second": 0.744,
"eval_wer": 0.17916985101364954,
"step": 2500
},
{
"epoch": 36.38,
"learning_rate": 7.445762711864406e-05,
"loss": 0.7866,
"step": 2510
},
{
"epoch": 36.52,
"learning_rate": 7.428813559322034e-05,
"loss": 0.7869,
"step": 2520
},
{
"epoch": 36.67,
"learning_rate": 7.411864406779661e-05,
"loss": 0.8445,
"step": 2530
},
{
"epoch": 36.81,
"learning_rate": 7.394915254237289e-05,
"loss": 0.7745,
"step": 2540
},
{
"epoch": 36.96,
"learning_rate": 7.377966101694916e-05,
"loss": 0.7888,
"step": 2550
},
{
"epoch": 37.1,
"learning_rate": 7.361016949152542e-05,
"loss": 0.776,
"step": 2560
},
{
"epoch": 37.25,
"learning_rate": 7.34406779661017e-05,
"loss": 0.8096,
"step": 2570
},
{
"epoch": 37.39,
"learning_rate": 7.327118644067797e-05,
"loss": 0.7561,
"step": 2580
},
{
"epoch": 37.54,
"learning_rate": 7.310169491525424e-05,
"loss": 0.7251,
"step": 2590
},
{
"epoch": 37.68,
"learning_rate": 7.293220338983052e-05,
"loss": 0.8062,
"step": 2600
},
{
"epoch": 37.83,
"learning_rate": 7.276271186440678e-05,
"loss": 0.7388,
"step": 2610
},
{
"epoch": 37.97,
"learning_rate": 7.259322033898306e-05,
"loss": 0.7616,
"step": 2620
},
{
"epoch": 38.12,
"learning_rate": 7.242372881355932e-05,
"loss": 0.7593,
"step": 2630
},
{
"epoch": 38.26,
"learning_rate": 7.22542372881356e-05,
"loss": 0.7399,
"step": 2640
},
{
"epoch": 38.41,
"learning_rate": 7.208474576271188e-05,
"loss": 0.7559,
"step": 2650
},
{
"epoch": 38.55,
"learning_rate": 7.191525423728814e-05,
"loss": 0.7559,
"step": 2660
},
{
"epoch": 38.7,
"learning_rate": 7.174576271186441e-05,
"loss": 0.7218,
"step": 2670
},
{
"epoch": 38.84,
"learning_rate": 7.157627118644067e-05,
"loss": 0.7991,
"step": 2680
},
{
"epoch": 38.99,
"learning_rate": 7.140677966101695e-05,
"loss": 0.7493,
"step": 2690
},
{
"epoch": 39.13,
"learning_rate": 7.123728813559322e-05,
"loss": 0.7502,
"step": 2700
},
{
"epoch": 39.28,
"learning_rate": 7.106779661016949e-05,
"loss": 0.7883,
"step": 2710
},
{
"epoch": 39.42,
"learning_rate": 7.089830508474577e-05,
"loss": 0.8236,
"step": 2720
},
{
"epoch": 39.57,
"learning_rate": 7.072881355932203e-05,
"loss": 0.8141,
"step": 2730
},
{
"epoch": 39.71,
"learning_rate": 7.055932203389831e-05,
"loss": 0.7956,
"step": 2740
},
{
"epoch": 39.86,
"learning_rate": 7.038983050847458e-05,
"loss": 0.7714,
"step": 2750
},
{
"epoch": 40.0,
"learning_rate": 7.022033898305085e-05,
"loss": 0.6689,
"step": 2760
},
{
"epoch": 40.14,
"learning_rate": 7.005084745762713e-05,
"loss": 0.7202,
"step": 2770
},
{
"epoch": 40.29,
"learning_rate": 6.988135593220339e-05,
"loss": 0.7278,
"step": 2780
},
{
"epoch": 40.43,
"learning_rate": 6.971186440677966e-05,
"loss": 0.801,
"step": 2790
},
{
"epoch": 40.58,
"learning_rate": 6.954237288135594e-05,
"loss": 0.7865,
"step": 2800
},
{
"epoch": 40.72,
"learning_rate": 6.93728813559322e-05,
"loss": 0.6811,
"step": 2810
},
{
"epoch": 40.87,
"learning_rate": 6.920338983050849e-05,
"loss": 0.7933,
"step": 2820
},
{
"epoch": 41.01,
"learning_rate": 6.903389830508475e-05,
"loss": 0.7826,
"step": 2830
},
{
"epoch": 41.16,
"learning_rate": 6.886440677966102e-05,
"loss": 0.7142,
"step": 2840
},
{
"epoch": 41.3,
"learning_rate": 6.869491525423728e-05,
"loss": 0.8347,
"step": 2850
},
{
"epoch": 41.45,
"learning_rate": 6.852542372881356e-05,
"loss": 0.7712,
"step": 2860
},
{
"epoch": 41.59,
"learning_rate": 6.835593220338984e-05,
"loss": 0.7458,
"step": 2870
},
{
"epoch": 41.74,
"learning_rate": 6.81864406779661e-05,
"loss": 0.7528,
"step": 2880
},
{
"epoch": 41.88,
"learning_rate": 6.801694915254238e-05,
"loss": 0.7699,
"step": 2890
},
{
"epoch": 42.03,
"learning_rate": 6.784745762711864e-05,
"loss": 0.8577,
"step": 2900
},
{
"epoch": 42.17,
"learning_rate": 6.767796610169492e-05,
"loss": 0.7259,
"step": 2910
},
{
"epoch": 42.32,
"learning_rate": 6.750847457627119e-05,
"loss": 0.7039,
"step": 2920
},
{
"epoch": 42.46,
"learning_rate": 6.733898305084746e-05,
"loss": 0.8307,
"step": 2930
},
{
"epoch": 42.61,
"learning_rate": 6.716949152542374e-05,
"loss": 0.7952,
"step": 2940
},
{
"epoch": 42.75,
"learning_rate": 6.7e-05,
"loss": 0.6789,
"step": 2950
},
{
"epoch": 42.9,
"learning_rate": 6.683050847457627e-05,
"loss": 0.7708,
"step": 2960
},
{
"epoch": 43.04,
"learning_rate": 6.666101694915255e-05,
"loss": 0.6823,
"step": 2970
},
{
"epoch": 43.19,
"learning_rate": 6.649152542372881e-05,
"loss": 0.6989,
"step": 2980
},
{
"epoch": 43.33,
"learning_rate": 6.63220338983051e-05,
"loss": 0.7944,
"step": 2990
},
{
"epoch": 43.48,
"learning_rate": 6.615254237288135e-05,
"loss": 0.7664,
"step": 3000
},
{
"epoch": 43.48,
"eval_loss": 0.2088436335325241,
"eval_runtime": 572.961,
"eval_samples_per_second": 5.924,
"eval_steps_per_second": 0.742,
"eval_wer": 0.17475696242059677,
"step": 3000
},
{
"epoch": 43.62,
"learning_rate": 6.598305084745763e-05,
"loss": 0.7256,
"step": 3010
},
{
"epoch": 43.77,
"learning_rate": 6.581355932203391e-05,
"loss": 0.7146,
"step": 3020
},
{
"epoch": 43.91,
"learning_rate": 6.564406779661017e-05,
"loss": 0.7347,
"step": 3030
},
{
"epoch": 44.06,
"learning_rate": 6.547457627118644e-05,
"loss": 0.7302,
"step": 3040
},
{
"epoch": 44.2,
"learning_rate": 6.53050847457627e-05,
"loss": 0.7772,
"step": 3050
},
{
"epoch": 44.35,
"learning_rate": 6.513559322033899e-05,
"loss": 0.7214,
"step": 3060
},
{
"epoch": 44.49,
"learning_rate": 6.496610169491527e-05,
"loss": 0.6899,
"step": 3070
},
{
"epoch": 44.64,
"learning_rate": 6.479661016949153e-05,
"loss": 0.7488,
"step": 3080
},
{
"epoch": 44.78,
"learning_rate": 6.46271186440678e-05,
"loss": 0.831,
"step": 3090
},
{
"epoch": 44.93,
"learning_rate": 6.445762711864407e-05,
"loss": 0.799,
"step": 3100
},
{
"epoch": 45.07,
"learning_rate": 6.428813559322035e-05,
"loss": 0.7195,
"step": 3110
},
{
"epoch": 45.22,
"learning_rate": 6.411864406779661e-05,
"loss": 0.7476,
"step": 3120
},
{
"epoch": 45.36,
"learning_rate": 6.394915254237288e-05,
"loss": 0.7475,
"step": 3130
},
{
"epoch": 45.51,
"learning_rate": 6.377966101694916e-05,
"loss": 0.767,
"step": 3140
},
{
"epoch": 45.65,
"learning_rate": 6.361016949152542e-05,
"loss": 0.7263,
"step": 3150
},
{
"epoch": 45.8,
"learning_rate": 6.34406779661017e-05,
"loss": 0.7423,
"step": 3160
},
{
"epoch": 45.94,
"learning_rate": 6.327118644067797e-05,
"loss": 0.7693,
"step": 3170
},
{
"epoch": 46.09,
"learning_rate": 6.310169491525424e-05,
"loss": 0.8136,
"step": 3180
},
{
"epoch": 46.23,
"learning_rate": 6.293220338983052e-05,
"loss": 0.6705,
"step": 3190
},
{
"epoch": 46.38,
"learning_rate": 6.276271186440678e-05,
"loss": 0.7041,
"step": 3200
},
{
"epoch": 46.52,
"learning_rate": 6.259322033898305e-05,
"loss": 0.7478,
"step": 3210
},
{
"epoch": 46.67,
"learning_rate": 6.242372881355933e-05,
"loss": 0.6909,
"step": 3220
},
{
"epoch": 46.81,
"learning_rate": 6.22542372881356e-05,
"loss": 0.7702,
"step": 3230
},
{
"epoch": 46.96,
"learning_rate": 6.208474576271188e-05,
"loss": 0.7302,
"step": 3240
},
{
"epoch": 47.1,
"learning_rate": 6.191525423728813e-05,
"loss": 0.7812,
"step": 3250
},
{
"epoch": 47.25,
"learning_rate": 6.174576271186441e-05,
"loss": 0.7043,
"step": 3260
},
{
"epoch": 47.39,
"learning_rate": 6.157627118644068e-05,
"loss": 0.7645,
"step": 3270
},
{
"epoch": 47.54,
"learning_rate": 6.140677966101696e-05,
"loss": 0.8539,
"step": 3280
},
{
"epoch": 47.68,
"learning_rate": 6.123728813559322e-05,
"loss": 0.7747,
"step": 3290
},
{
"epoch": 47.83,
"learning_rate": 6.106779661016949e-05,
"loss": 0.6613,
"step": 3300
},
{
"epoch": 47.97,
"learning_rate": 6.089830508474577e-05,
"loss": 0.7399,
"step": 3310
},
{
"epoch": 48.12,
"learning_rate": 6.072881355932204e-05,
"loss": 0.6675,
"step": 3320
},
{
"epoch": 48.26,
"learning_rate": 6.05593220338983e-05,
"loss": 0.6699,
"step": 3330
},
{
"epoch": 48.41,
"learning_rate": 6.0389830508474574e-05,
"loss": 0.7534,
"step": 3340
},
{
"epoch": 48.55,
"learning_rate": 6.022033898305085e-05,
"loss": 0.7837,
"step": 3350
},
{
"epoch": 48.7,
"learning_rate": 6.005084745762713e-05,
"loss": 0.7066,
"step": 3360
},
{
"epoch": 48.84,
"learning_rate": 5.98813559322034e-05,
"loss": 0.7931,
"step": 3370
},
{
"epoch": 48.99,
"learning_rate": 5.971186440677966e-05,
"loss": 0.7575,
"step": 3380
},
{
"epoch": 49.13,
"learning_rate": 5.954237288135593e-05,
"loss": 0.6951,
"step": 3390
},
{
"epoch": 49.28,
"learning_rate": 5.9372881355932206e-05,
"loss": 0.788,
"step": 3400
},
{
"epoch": 49.42,
"learning_rate": 5.920338983050848e-05,
"loss": 0.7727,
"step": 3410
},
{
"epoch": 49.57,
"learning_rate": 5.9033898305084746e-05,
"loss": 0.7102,
"step": 3420
},
{
"epoch": 49.71,
"learning_rate": 5.886440677966102e-05,
"loss": 0.6733,
"step": 3430
},
{
"epoch": 49.86,
"learning_rate": 5.869491525423729e-05,
"loss": 0.6866,
"step": 3440
},
{
"epoch": 50.0,
"learning_rate": 5.8525423728813565e-05,
"loss": 0.7414,
"step": 3450
},
{
"epoch": 50.14,
"learning_rate": 5.835593220338983e-05,
"loss": 0.684,
"step": 3460
},
{
"epoch": 50.29,
"learning_rate": 5.8186440677966105e-05,
"loss": 0.7363,
"step": 3470
},
{
"epoch": 50.43,
"learning_rate": 5.801694915254238e-05,
"loss": 0.7421,
"step": 3480
},
{
"epoch": 50.58,
"learning_rate": 5.784745762711865e-05,
"loss": 0.7038,
"step": 3490
},
{
"epoch": 50.72,
"learning_rate": 5.767796610169491e-05,
"loss": 0.6571,
"step": 3500
},
{
"epoch": 50.72,
"eval_loss": 0.20422525703907013,
"eval_runtime": 570.0136,
"eval_samples_per_second": 5.954,
"eval_steps_per_second": 0.746,
"eval_wer": 0.166761169867449,
"step": 3500
},
{
"epoch": 50.87,
"learning_rate": 5.750847457627119e-05,
"loss": 0.7286,
"step": 3510
},
{
"epoch": 51.01,
"learning_rate": 5.7338983050847464e-05,
"loss": 0.6604,
"step": 3520
},
{
"epoch": 51.16,
"learning_rate": 5.716949152542374e-05,
"loss": 0.7662,
"step": 3530
},
{
"epoch": 51.3,
"learning_rate": 5.6999999999999996e-05,
"loss": 0.7688,
"step": 3540
},
{
"epoch": 51.45,
"learning_rate": 5.683050847457627e-05,
"loss": 0.6903,
"step": 3550
},
{
"epoch": 51.59,
"learning_rate": 5.666101694915254e-05,
"loss": 0.6939,
"step": 3560
},
{
"epoch": 51.74,
"learning_rate": 5.649152542372882e-05,
"loss": 0.729,
"step": 3570
},
{
"epoch": 51.88,
"learning_rate": 5.632203389830508e-05,
"loss": 0.7307,
"step": 3580
},
{
"epoch": 52.03,
"learning_rate": 5.6152542372881355e-05,
"loss": 0.6863,
"step": 3590
},
{
"epoch": 52.17,
"learning_rate": 5.598305084745763e-05,
"loss": 0.6843,
"step": 3600
},
{
"epoch": 52.32,
"learning_rate": 5.58135593220339e-05,
"loss": 0.7298,
"step": 3610
},
{
"epoch": 52.46,
"learning_rate": 5.5644067796610175e-05,
"loss": 0.7545,
"step": 3620
},
{
"epoch": 52.61,
"learning_rate": 5.547457627118644e-05,
"loss": 0.6663,
"step": 3630
},
{
"epoch": 52.75,
"learning_rate": 5.5305084745762714e-05,
"loss": 0.6822,
"step": 3640
},
{
"epoch": 52.9,
"learning_rate": 5.513559322033899e-05,
"loss": 0.806,
"step": 3650
},
{
"epoch": 53.04,
"learning_rate": 5.496610169491526e-05,
"loss": 0.6597,
"step": 3660
},
{
"epoch": 53.19,
"learning_rate": 5.479661016949153e-05,
"loss": 0.7131,
"step": 3670
},
{
"epoch": 53.33,
"learning_rate": 5.46271186440678e-05,
"loss": 0.7221,
"step": 3680
},
{
"epoch": 53.48,
"learning_rate": 5.445762711864407e-05,
"loss": 0.6235,
"step": 3690
},
{
"epoch": 53.62,
"learning_rate": 5.4288135593220346e-05,
"loss": 0.6547,
"step": 3700
},
{
"epoch": 53.77,
"learning_rate": 5.4118644067796606e-05,
"loss": 0.7014,
"step": 3710
},
{
"epoch": 53.91,
"learning_rate": 5.3949152542372886e-05,
"loss": 0.6826,
"step": 3720
},
{
"epoch": 54.06,
"learning_rate": 5.377966101694916e-05,
"loss": 0.664,
"step": 3730
},
{
"epoch": 54.2,
"learning_rate": 5.361016949152543e-05,
"loss": 0.7201,
"step": 3740
},
{
"epoch": 54.35,
"learning_rate": 5.344067796610169e-05,
"loss": 0.6765,
"step": 3750
},
{
"epoch": 54.49,
"learning_rate": 5.3271186440677965e-05,
"loss": 0.6541,
"step": 3760
},
{
"epoch": 54.64,
"learning_rate": 5.310169491525424e-05,
"loss": 0.7299,
"step": 3770
},
{
"epoch": 54.78,
"learning_rate": 5.293220338983051e-05,
"loss": 0.7139,
"step": 3780
},
{
"epoch": 54.93,
"learning_rate": 5.276271186440678e-05,
"loss": 0.6925,
"step": 3790
},
{
"epoch": 55.07,
"learning_rate": 5.259322033898305e-05,
"loss": 0.6712,
"step": 3800
},
{
"epoch": 55.22,
"learning_rate": 5.2423728813559324e-05,
"loss": 0.6999,
"step": 3810
},
{
"epoch": 55.36,
"learning_rate": 5.22542372881356e-05,
"loss": 0.6577,
"step": 3820
},
{
"epoch": 55.51,
"learning_rate": 5.2084745762711864e-05,
"loss": 0.733,
"step": 3830
},
{
"epoch": 55.65,
"learning_rate": 5.191525423728814e-05,
"loss": 0.666,
"step": 3840
},
{
"epoch": 55.8,
"learning_rate": 5.174576271186441e-05,
"loss": 0.6638,
"step": 3850
},
{
"epoch": 55.94,
"learning_rate": 5.157627118644068e-05,
"loss": 0.7019,
"step": 3860
},
{
"epoch": 56.09,
"learning_rate": 5.140677966101695e-05,
"loss": 0.7018,
"step": 3870
},
{
"epoch": 56.23,
"learning_rate": 5.123728813559322e-05,
"loss": 0.6911,
"step": 3880
},
{
"epoch": 56.38,
"learning_rate": 5.1067796610169496e-05,
"loss": 0.7035,
"step": 3890
},
{
"epoch": 56.52,
"learning_rate": 5.089830508474577e-05,
"loss": 0.7204,
"step": 3900
},
{
"epoch": 56.67,
"learning_rate": 5.072881355932204e-05,
"loss": 0.6888,
"step": 3910
},
{
"epoch": 56.81,
"learning_rate": 5.05593220338983e-05,
"loss": 0.7502,
"step": 3920
},
{
"epoch": 56.96,
"learning_rate": 5.0389830508474575e-05,
"loss": 0.6246,
"step": 3930
},
{
"epoch": 57.1,
"learning_rate": 5.0220338983050855e-05,
"loss": 0.6079,
"step": 3940
},
{
"epoch": 57.25,
"learning_rate": 5.005084745762713e-05,
"loss": 0.6618,
"step": 3950
},
{
"epoch": 57.39,
"learning_rate": 4.9881355932203394e-05,
"loss": 0.6935,
"step": 3960
},
{
"epoch": 57.54,
"learning_rate": 4.971186440677966e-05,
"loss": 0.6872,
"step": 3970
},
{
"epoch": 57.68,
"learning_rate": 4.9542372881355934e-05,
"loss": 0.6546,
"step": 3980
},
{
"epoch": 57.83,
"learning_rate": 4.937288135593221e-05,
"loss": 0.7091,
"step": 3990
},
{
"epoch": 57.97,
"learning_rate": 4.920338983050848e-05,
"loss": 0.7014,
"step": 4000
},
{
"epoch": 57.97,
"eval_loss": 0.21360060572624207,
"eval_runtime": 570.979,
"eval_samples_per_second": 5.944,
"eval_steps_per_second": 0.744,
"eval_wer": 0.16485467051253605,
"step": 4000
},
{
"epoch": 58.12,
"learning_rate": 4.9033898305084746e-05,
"loss": 0.6837,
"step": 4010
},
{
"epoch": 58.26,
"learning_rate": 4.886440677966102e-05,
"loss": 0.6707,
"step": 4020
},
{
"epoch": 58.41,
"learning_rate": 4.8694915254237286e-05,
"loss": 0.6715,
"step": 4030
},
{
"epoch": 58.55,
"learning_rate": 4.8525423728813566e-05,
"loss": 0.64,
"step": 4040
},
{
"epoch": 58.7,
"learning_rate": 4.835593220338983e-05,
"loss": 0.6904,
"step": 4050
},
{
"epoch": 58.84,
"learning_rate": 4.8186440677966105e-05,
"loss": 0.6809,
"step": 4060
},
{
"epoch": 58.99,
"learning_rate": 4.801694915254237e-05,
"loss": 0.6187,
"step": 4070
},
{
"epoch": 59.13,
"learning_rate": 4.7847457627118645e-05,
"loss": 0.7028,
"step": 4080
},
{
"epoch": 59.28,
"learning_rate": 4.767796610169492e-05,
"loss": 0.7025,
"step": 4090
},
{
"epoch": 59.42,
"learning_rate": 4.750847457627119e-05,
"loss": 0.6471,
"step": 4100
},
{
"epoch": 59.57,
"learning_rate": 4.733898305084746e-05,
"loss": 0.6968,
"step": 4110
},
{
"epoch": 59.71,
"learning_rate": 4.716949152542373e-05,
"loss": 0.6268,
"step": 4120
},
{
"epoch": 59.86,
"learning_rate": 4.7e-05,
"loss": 0.6731,
"step": 4130
},
{
"epoch": 60.0,
"learning_rate": 4.683050847457627e-05,
"loss": 0.6614,
"step": 4140
},
{
"epoch": 60.14,
"learning_rate": 4.666101694915255e-05,
"loss": 0.6138,
"step": 4150
},
{
"epoch": 60.29,
"learning_rate": 4.649152542372882e-05,
"loss": 0.6875,
"step": 4160
},
{
"epoch": 60.43,
"learning_rate": 4.632203389830509e-05,
"loss": 0.7305,
"step": 4170
},
{
"epoch": 60.58,
"learning_rate": 4.6152542372881356e-05,
"loss": 0.6367,
"step": 4180
},
{
"epoch": 60.72,
"learning_rate": 4.598305084745763e-05,
"loss": 0.7106,
"step": 4190
},
{
"epoch": 60.87,
"learning_rate": 4.58135593220339e-05,
"loss": 0.7531,
"step": 4200
},
{
"epoch": 61.01,
"learning_rate": 4.5644067796610176e-05,
"loss": 0.6545,
"step": 4210
},
{
"epoch": 61.16,
"learning_rate": 4.547457627118644e-05,
"loss": 0.7458,
"step": 4220
},
{
"epoch": 61.3,
"learning_rate": 4.5305084745762715e-05,
"loss": 0.6348,
"step": 4230
},
{
"epoch": 61.45,
"learning_rate": 4.513559322033898e-05,
"loss": 0.6631,
"step": 4240
},
{
"epoch": 61.59,
"learning_rate": 4.4966101694915255e-05,
"loss": 0.8233,
"step": 4250
},
{
"epoch": 61.74,
"learning_rate": 4.479661016949153e-05,
"loss": 0.6475,
"step": 4260
},
{
"epoch": 61.88,
"learning_rate": 4.46271186440678e-05,
"loss": 0.659,
"step": 4270
},
{
"epoch": 62.03,
"learning_rate": 4.445762711864407e-05,
"loss": 0.7019,
"step": 4280
},
{
"epoch": 62.17,
"learning_rate": 4.428813559322034e-05,
"loss": 0.6716,
"step": 4290
},
{
"epoch": 62.32,
"learning_rate": 4.4118644067796614e-05,
"loss": 0.6016,
"step": 4300
},
{
"epoch": 62.46,
"learning_rate": 4.394915254237289e-05,
"loss": 0.6532,
"step": 4310
},
{
"epoch": 62.61,
"learning_rate": 4.377966101694915e-05,
"loss": 0.6426,
"step": 4320
},
{
"epoch": 62.75,
"learning_rate": 4.3610169491525426e-05,
"loss": 0.6543,
"step": 4330
},
{
"epoch": 62.9,
"learning_rate": 4.344067796610169e-05,
"loss": 0.7405,
"step": 4340
},
{
"epoch": 63.04,
"learning_rate": 4.3271186440677966e-05,
"loss": 0.6795,
"step": 4350
},
{
"epoch": 63.19,
"learning_rate": 4.310169491525424e-05,
"loss": 0.726,
"step": 4360
},
{
"epoch": 63.33,
"learning_rate": 4.293220338983051e-05,
"loss": 0.8443,
"step": 4370
},
{
"epoch": 63.48,
"learning_rate": 4.276271186440678e-05,
"loss": 0.7359,
"step": 4380
},
{
"epoch": 63.62,
"learning_rate": 4.259322033898305e-05,
"loss": 0.6781,
"step": 4390
},
{
"epoch": 63.77,
"learning_rate": 4.242372881355932e-05,
"loss": 0.6366,
"step": 4400
},
{
"epoch": 63.91,
"learning_rate": 4.22542372881356e-05,
"loss": 0.6598,
"step": 4410
},
{
"epoch": 64.06,
"learning_rate": 4.208474576271187e-05,
"loss": 0.6668,
"step": 4420
},
{
"epoch": 64.2,
"learning_rate": 4.191525423728814e-05,
"loss": 0.7418,
"step": 4430
},
{
"epoch": 64.35,
"learning_rate": 4.174576271186441e-05,
"loss": 0.6788,
"step": 4440
},
{
"epoch": 64.49,
"learning_rate": 4.157627118644068e-05,
"loss": 0.6549,
"step": 4450
},
{
"epoch": 64.64,
"learning_rate": 4.140677966101695e-05,
"loss": 0.6636,
"step": 4460
},
{
"epoch": 64.78,
"learning_rate": 4.1237288135593223e-05,
"loss": 0.6163,
"step": 4470
},
{
"epoch": 64.93,
"learning_rate": 4.10677966101695e-05,
"loss": 0.7296,
"step": 4480
},
{
"epoch": 65.07,
"learning_rate": 4.089830508474576e-05,
"loss": 0.6633,
"step": 4490
},
{
"epoch": 65.22,
"learning_rate": 4.0728813559322036e-05,
"loss": 0.6171,
"step": 4500
},
{
"epoch": 65.22,
"eval_loss": 0.21388114988803864,
"eval_runtime": 574.1167,
"eval_samples_per_second": 5.912,
"eval_steps_per_second": 0.74,
"eval_wer": 0.16409864490627749,
"step": 4500
},
{
"epoch": 65.36,
"learning_rate": 4.055932203389831e-05,
"loss": 0.647,
"step": 4510
},
{
"epoch": 65.51,
"learning_rate": 4.038983050847458e-05,
"loss": 0.5946,
"step": 4520
},
{
"epoch": 65.65,
"learning_rate": 4.022033898305085e-05,
"loss": 0.6789,
"step": 4530
},
{
"epoch": 65.8,
"learning_rate": 4.005084745762712e-05,
"loss": 0.6854,
"step": 4540
},
{
"epoch": 65.94,
"learning_rate": 3.988135593220339e-05,
"loss": 0.6547,
"step": 4550
},
{
"epoch": 66.09,
"learning_rate": 3.971186440677966e-05,
"loss": 0.7462,
"step": 4560
},
{
"epoch": 66.23,
"learning_rate": 3.9542372881355935e-05,
"loss": 0.6724,
"step": 4570
},
{
"epoch": 66.38,
"learning_rate": 3.937288135593221e-05,
"loss": 0.6623,
"step": 4580
},
{
"epoch": 66.52,
"learning_rate": 3.9203389830508474e-05,
"loss": 0.6438,
"step": 4590
},
{
"epoch": 66.67,
"learning_rate": 3.903389830508475e-05,
"loss": 0.6983,
"step": 4600
},
{
"epoch": 66.81,
"learning_rate": 3.8864406779661014e-05,
"loss": 0.6905,
"step": 4610
},
{
"epoch": 66.96,
"learning_rate": 3.8694915254237294e-05,
"loss": 0.7062,
"step": 4620
},
{
"epoch": 67.1,
"learning_rate": 3.852542372881356e-05,
"loss": 0.6685,
"step": 4630
},
{
"epoch": 67.25,
"learning_rate": 3.835593220338983e-05,
"loss": 0.6545,
"step": 4640
},
{
"epoch": 67.39,
"learning_rate": 3.81864406779661e-05,
"loss": 0.6953,
"step": 4650
},
{
"epoch": 67.54,
"learning_rate": 3.801694915254237e-05,
"loss": 0.6793,
"step": 4660
},
{
"epoch": 67.68,
"learning_rate": 3.7847457627118646e-05,
"loss": 0.6059,
"step": 4670
},
{
"epoch": 67.83,
"learning_rate": 3.767796610169492e-05,
"loss": 0.6555,
"step": 4680
},
{
"epoch": 67.97,
"learning_rate": 3.750847457627119e-05,
"loss": 0.6261,
"step": 4690
},
{
"epoch": 68.12,
"learning_rate": 3.733898305084746e-05,
"loss": 0.7417,
"step": 4700
},
{
"epoch": 68.26,
"learning_rate": 3.716949152542373e-05,
"loss": 0.6732,
"step": 4710
},
{
"epoch": 68.41,
"learning_rate": 3.7e-05,
"loss": 0.7073,
"step": 4720
},
{
"epoch": 68.55,
"learning_rate": 3.683050847457628e-05,
"loss": 0.6299,
"step": 4730
},
{
"epoch": 68.7,
"learning_rate": 3.6661016949152544e-05,
"loss": 0.6789,
"step": 4740
},
{
"epoch": 68.84,
"learning_rate": 3.649152542372882e-05,
"loss": 0.6946,
"step": 4750
},
{
"epoch": 68.99,
"learning_rate": 3.6322033898305084e-05,
"loss": 0.6954,
"step": 4760
},
{
"epoch": 69.13,
"learning_rate": 3.615254237288136e-05,
"loss": 0.6609,
"step": 4770
},
{
"epoch": 69.28,
"learning_rate": 3.598305084745763e-05,
"loss": 0.7685,
"step": 4780
},
{
"epoch": 69.42,
"learning_rate": 3.5813559322033903e-05,
"loss": 0.6556,
"step": 4790
},
{
"epoch": 69.57,
"learning_rate": 3.564406779661017e-05,
"loss": 0.6377,
"step": 4800
},
{
"epoch": 69.71,
"learning_rate": 3.547457627118644e-05,
"loss": 0.667,
"step": 4810
},
{
"epoch": 69.86,
"learning_rate": 3.530508474576271e-05,
"loss": 0.6439,
"step": 4820
},
{
"epoch": 70.0,
"learning_rate": 3.513559322033899e-05,
"loss": 0.6266,
"step": 4830
},
{
"epoch": 70.14,
"learning_rate": 3.4966101694915256e-05,
"loss": 0.6563,
"step": 4840
},
{
"epoch": 70.29,
"learning_rate": 3.479661016949153e-05,
"loss": 0.606,
"step": 4850
},
{
"epoch": 70.43,
"learning_rate": 3.4627118644067795e-05,
"loss": 0.6662,
"step": 4860
},
{
"epoch": 70.58,
"learning_rate": 3.445762711864407e-05,
"loss": 0.6453,
"step": 4870
},
{
"epoch": 70.72,
"learning_rate": 3.428813559322034e-05,
"loss": 0.6812,
"step": 4880
},
{
"epoch": 70.87,
"learning_rate": 3.4118644067796615e-05,
"loss": 0.6655,
"step": 4890
},
{
"epoch": 71.01,
"learning_rate": 3.394915254237288e-05,
"loss": 0.6687,
"step": 4900
},
{
"epoch": 71.16,
"learning_rate": 3.3779661016949154e-05,
"loss": 0.6349,
"step": 4910
},
{
"epoch": 71.3,
"learning_rate": 3.361016949152542e-05,
"loss": 0.5908,
"step": 4920
},
{
"epoch": 71.45,
"learning_rate": 3.3440677966101694e-05,
"loss": 0.6648,
"step": 4930
},
{
"epoch": 71.59,
"learning_rate": 3.327118644067797e-05,
"loss": 0.6964,
"step": 4940
},
{
"epoch": 71.74,
"learning_rate": 3.310169491525424e-05,
"loss": 0.6482,
"step": 4950
},
{
"epoch": 71.88,
"learning_rate": 3.293220338983051e-05,
"loss": 0.7086,
"step": 4960
},
{
"epoch": 72.03,
"learning_rate": 3.276271186440678e-05,
"loss": 0.6146,
"step": 4970
},
{
"epoch": 72.17,
"learning_rate": 3.259322033898305e-05,
"loss": 0.6546,
"step": 4980
},
{
"epoch": 72.32,
"learning_rate": 3.2423728813559326e-05,
"loss": 0.608,
"step": 4990
},
{
"epoch": 72.46,
"learning_rate": 3.22542372881356e-05,
"loss": 0.6609,
"step": 5000
},
{
"epoch": 72.46,
"eval_loss": 0.2143816202878952,
"eval_runtime": 575.4482,
"eval_samples_per_second": 5.898,
"eval_steps_per_second": 0.739,
"eval_wer": 0.16206888050686585,
"step": 5000
},
{
"epoch": 72.61,
"learning_rate": 3.2084745762711865e-05,
"loss": 0.6483,
"step": 5010
},
{
"epoch": 72.75,
"learning_rate": 3.191525423728814e-05,
"loss": 0.5998,
"step": 5020
},
{
"epoch": 72.9,
"learning_rate": 3.1745762711864405e-05,
"loss": 0.6161,
"step": 5030
},
{
"epoch": 73.04,
"learning_rate": 3.157627118644068e-05,
"loss": 0.6856,
"step": 5040
},
{
"epoch": 73.19,
"learning_rate": 3.140677966101695e-05,
"loss": 0.6095,
"step": 5050
},
{
"epoch": 73.33,
"learning_rate": 3.1237288135593224e-05,
"loss": 0.6235,
"step": 5060
},
{
"epoch": 73.48,
"learning_rate": 3.106779661016949e-05,
"loss": 0.6827,
"step": 5070
},
{
"epoch": 73.62,
"learning_rate": 3.0898305084745764e-05,
"loss": 0.5903,
"step": 5080
},
{
"epoch": 73.77,
"learning_rate": 3.072881355932204e-05,
"loss": 0.7406,
"step": 5090
},
{
"epoch": 73.91,
"learning_rate": 3.055932203389831e-05,
"loss": 0.6621,
"step": 5100
},
{
"epoch": 74.06,
"learning_rate": 3.0389830508474577e-05,
"loss": 0.6619,
"step": 5110
},
{
"epoch": 74.2,
"learning_rate": 3.022033898305085e-05,
"loss": 0.6755,
"step": 5120
},
{
"epoch": 74.35,
"learning_rate": 3.005084745762712e-05,
"loss": 0.6568,
"step": 5130
},
{
"epoch": 74.49,
"learning_rate": 2.9881355932203393e-05,
"loss": 0.6244,
"step": 5140
},
{
"epoch": 74.64,
"learning_rate": 2.971186440677966e-05,
"loss": 0.6166,
"step": 5150
},
{
"epoch": 74.78,
"learning_rate": 2.9542372881355936e-05,
"loss": 0.6238,
"step": 5160
},
{
"epoch": 74.93,
"learning_rate": 2.9372881355932202e-05,
"loss": 0.7148,
"step": 5170
},
{
"epoch": 75.07,
"learning_rate": 2.9203389830508475e-05,
"loss": 0.6449,
"step": 5180
},
{
"epoch": 75.22,
"learning_rate": 2.9033898305084745e-05,
"loss": 0.6567,
"step": 5190
},
{
"epoch": 75.36,
"learning_rate": 2.8864406779661018e-05,
"loss": 0.6167,
"step": 5200
},
{
"epoch": 75.51,
"learning_rate": 2.8694915254237288e-05,
"loss": 0.6728,
"step": 5210
},
{
"epoch": 75.65,
"learning_rate": 2.852542372881356e-05,
"loss": 0.6267,
"step": 5220
},
{
"epoch": 75.8,
"learning_rate": 2.8355932203389834e-05,
"loss": 0.6707,
"step": 5230
},
{
"epoch": 75.94,
"learning_rate": 2.8186440677966104e-05,
"loss": 0.6005,
"step": 5240
},
{
"epoch": 76.09,
"learning_rate": 2.8016949152542377e-05,
"loss": 0.5838,
"step": 5250
},
{
"epoch": 76.23,
"learning_rate": 2.7847457627118643e-05,
"loss": 0.6954,
"step": 5260
},
{
"epoch": 76.38,
"learning_rate": 2.767796610169492e-05,
"loss": 0.6658,
"step": 5270
},
{
"epoch": 76.52,
"learning_rate": 2.7508474576271186e-05,
"loss": 0.6252,
"step": 5280
},
{
"epoch": 76.67,
"learning_rate": 2.733898305084746e-05,
"loss": 0.6967,
"step": 5290
},
{
"epoch": 76.81,
"learning_rate": 2.716949152542373e-05,
"loss": 0.6216,
"step": 5300
},
{
"epoch": 76.96,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.6392,
"step": 5310
},
{
"epoch": 77.1,
"learning_rate": 2.6830508474576272e-05,
"loss": 0.6266,
"step": 5320
},
{
"epoch": 77.25,
"learning_rate": 2.6661016949152545e-05,
"loss": 0.5908,
"step": 5330
},
{
"epoch": 77.39,
"learning_rate": 2.6491525423728815e-05,
"loss": 0.6788,
"step": 5340
},
{
"epoch": 77.54,
"learning_rate": 2.6322033898305088e-05,
"loss": 0.6592,
"step": 5350
},
{
"epoch": 77.68,
"learning_rate": 2.6152542372881355e-05,
"loss": 0.6038,
"step": 5360
},
{
"epoch": 77.83,
"learning_rate": 2.598305084745763e-05,
"loss": 0.5884,
"step": 5370
},
{
"epoch": 77.97,
"learning_rate": 2.5813559322033898e-05,
"loss": 0.6433,
"step": 5380
},
{
"epoch": 78.12,
"learning_rate": 2.564406779661017e-05,
"loss": 0.6976,
"step": 5390
},
{
"epoch": 78.26,
"learning_rate": 2.547457627118644e-05,
"loss": 0.6922,
"step": 5400
},
{
"epoch": 78.41,
"learning_rate": 2.5305084745762714e-05,
"loss": 0.6621,
"step": 5410
},
{
"epoch": 78.55,
"learning_rate": 2.5135593220338983e-05,
"loss": 0.6388,
"step": 5420
},
{
"epoch": 78.7,
"learning_rate": 2.4966101694915257e-05,
"loss": 0.628,
"step": 5430
},
{
"epoch": 78.84,
"learning_rate": 2.4796610169491526e-05,
"loss": 0.5507,
"step": 5440
},
{
"epoch": 78.99,
"learning_rate": 2.46271186440678e-05,
"loss": 0.6358,
"step": 5450
},
{
"epoch": 79.13,
"learning_rate": 2.445762711864407e-05,
"loss": 0.8279,
"step": 5460
},
{
"epoch": 79.28,
"learning_rate": 2.428813559322034e-05,
"loss": 0.6108,
"step": 5470
},
{
"epoch": 79.42,
"learning_rate": 2.4118644067796612e-05,
"loss": 0.6535,
"step": 5480
},
{
"epoch": 79.57,
"learning_rate": 2.3949152542372882e-05,
"loss": 0.5962,
"step": 5490
},
{
"epoch": 79.71,
"learning_rate": 2.3779661016949155e-05,
"loss": 0.6318,
"step": 5500
},
{
"epoch": 79.71,
"eval_loss": 0.21294616162776947,
"eval_runtime": 575.7769,
"eval_samples_per_second": 5.895,
"eval_steps_per_second": 0.738,
"eval_wer": 0.16001446309855452,
"step": 5500
},
{
"epoch": 79.86,
"learning_rate": 2.3610169491525425e-05,
"loss": 0.5991,
"step": 5510
},
{
"epoch": 80.0,
"learning_rate": 2.3440677966101695e-05,
"loss": 0.6389,
"step": 5520
},
{
"epoch": 80.14,
"learning_rate": 2.3271186440677968e-05,
"loss": 0.5969,
"step": 5530
},
{
"epoch": 80.29,
"learning_rate": 2.3101694915254237e-05,
"loss": 0.626,
"step": 5540
},
{
"epoch": 80.43,
"learning_rate": 2.2932203389830507e-05,
"loss": 0.6123,
"step": 5550
},
{
"epoch": 80.58,
"learning_rate": 2.276271186440678e-05,
"loss": 0.6056,
"step": 5560
},
{
"epoch": 80.72,
"learning_rate": 2.259322033898305e-05,
"loss": 0.6004,
"step": 5570
},
{
"epoch": 80.87,
"learning_rate": 2.2423728813559323e-05,
"loss": 0.5924,
"step": 5580
},
{
"epoch": 81.01,
"learning_rate": 2.2254237288135593e-05,
"loss": 0.6622,
"step": 5590
},
{
"epoch": 81.16,
"learning_rate": 2.2084745762711863e-05,
"loss": 0.6285,
"step": 5600
},
{
"epoch": 81.3,
"learning_rate": 2.1915254237288136e-05,
"loss": 0.6199,
"step": 5610
},
{
"epoch": 81.45,
"learning_rate": 2.174576271186441e-05,
"loss": 0.6496,
"step": 5620
},
{
"epoch": 81.59,
"learning_rate": 2.157627118644068e-05,
"loss": 0.632,
"step": 5630
},
{
"epoch": 81.74,
"learning_rate": 2.1406779661016952e-05,
"loss": 0.5825,
"step": 5640
},
{
"epoch": 81.88,
"learning_rate": 2.1237288135593222e-05,
"loss": 0.7035,
"step": 5650
},
{
"epoch": 82.03,
"learning_rate": 2.1067796610169495e-05,
"loss": 0.5958,
"step": 5660
},
{
"epoch": 82.17,
"learning_rate": 2.0898305084745765e-05,
"loss": 0.6213,
"step": 5670
},
{
"epoch": 82.32,
"learning_rate": 2.0728813559322035e-05,
"loss": 0.621,
"step": 5680
},
{
"epoch": 82.46,
"learning_rate": 2.0559322033898308e-05,
"loss": 0.5801,
"step": 5690
},
{
"epoch": 82.61,
"learning_rate": 2.0389830508474577e-05,
"loss": 0.6511,
"step": 5700
},
{
"epoch": 82.75,
"learning_rate": 2.0220338983050847e-05,
"loss": 0.6268,
"step": 5710
},
{
"epoch": 82.9,
"learning_rate": 2.005084745762712e-05,
"loss": 0.642,
"step": 5720
},
{
"epoch": 83.04,
"learning_rate": 1.988135593220339e-05,
"loss": 0.6158,
"step": 5730
},
{
"epoch": 83.19,
"learning_rate": 1.9711864406779663e-05,
"loss": 0.6171,
"step": 5740
},
{
"epoch": 83.33,
"learning_rate": 1.9542372881355933e-05,
"loss": 0.5943,
"step": 5750
},
{
"epoch": 83.48,
"learning_rate": 1.9372881355932203e-05,
"loss": 0.6312,
"step": 5760
},
{
"epoch": 83.62,
"learning_rate": 1.9203389830508476e-05,
"loss": 0.6128,
"step": 5770
},
{
"epoch": 83.77,
"learning_rate": 1.9033898305084746e-05,
"loss": 0.6094,
"step": 5780
},
{
"epoch": 83.91,
"learning_rate": 1.886440677966102e-05,
"loss": 0.6403,
"step": 5790
},
{
"epoch": 84.06,
"learning_rate": 1.869491525423729e-05,
"loss": 0.5933,
"step": 5800
},
{
"epoch": 84.2,
"learning_rate": 1.852542372881356e-05,
"loss": 0.5684,
"step": 5810
},
{
"epoch": 84.35,
"learning_rate": 1.835593220338983e-05,
"loss": 0.6025,
"step": 5820
},
{
"epoch": 84.49,
"learning_rate": 1.81864406779661e-05,
"loss": 0.6225,
"step": 5830
},
{
"epoch": 84.64,
"learning_rate": 1.8016949152542374e-05,
"loss": 0.6246,
"step": 5840
},
{
"epoch": 84.78,
"learning_rate": 1.7847457627118644e-05,
"loss": 0.6186,
"step": 5850
},
{
"epoch": 84.93,
"learning_rate": 1.7677966101694914e-05,
"loss": 0.6706,
"step": 5860
},
{
"epoch": 85.07,
"learning_rate": 1.7508474576271187e-05,
"loss": 0.5923,
"step": 5870
},
{
"epoch": 85.22,
"learning_rate": 1.7338983050847457e-05,
"loss": 0.5892,
"step": 5880
},
{
"epoch": 85.36,
"learning_rate": 1.716949152542373e-05,
"loss": 0.6155,
"step": 5890
},
{
"epoch": 85.51,
"learning_rate": 1.7000000000000003e-05,
"loss": 0.545,
"step": 5900
},
{
"epoch": 85.65,
"learning_rate": 1.6830508474576273e-05,
"loss": 0.6541,
"step": 5910
},
{
"epoch": 85.8,
"learning_rate": 1.6661016949152543e-05,
"loss": 0.7079,
"step": 5920
},
{
"epoch": 85.94,
"learning_rate": 1.6491525423728816e-05,
"loss": 0.58,
"step": 5930
},
{
"epoch": 86.09,
"learning_rate": 1.6322033898305086e-05,
"loss": 0.631,
"step": 5940
},
{
"epoch": 86.23,
"learning_rate": 1.615254237288136e-05,
"loss": 0.6232,
"step": 5950
},
{
"epoch": 86.38,
"learning_rate": 1.598305084745763e-05,
"loss": 0.5722,
"step": 5960
},
{
"epoch": 86.52,
"learning_rate": 1.58135593220339e-05,
"loss": 0.6361,
"step": 5970
},
{
"epoch": 86.67,
"learning_rate": 1.564406779661017e-05,
"loss": 0.6603,
"step": 5980
},
{
"epoch": 86.81,
"learning_rate": 1.547457627118644e-05,
"loss": 0.6191,
"step": 5990
},
{
"epoch": 86.96,
"learning_rate": 1.5305084745762714e-05,
"loss": 0.6222,
"step": 6000
},
{
"epoch": 86.96,
"eval_loss": 0.2124408483505249,
"eval_runtime": 572.9918,
"eval_samples_per_second": 5.923,
"eval_steps_per_second": 0.742,
"eval_wer": 0.158223011118507,
"step": 6000
},
{
"epoch": 87.1,
"learning_rate": 1.5135593220338984e-05,
"loss": 0.5942,
"step": 6010
},
{
"epoch": 87.25,
"learning_rate": 1.4966101694915256e-05,
"loss": 0.6446,
"step": 6020
},
{
"epoch": 87.39,
"learning_rate": 1.4796610169491525e-05,
"loss": 0.5991,
"step": 6030
},
{
"epoch": 87.54,
"learning_rate": 1.4627118644067797e-05,
"loss": 0.6077,
"step": 6040
},
{
"epoch": 87.68,
"learning_rate": 1.4457627118644068e-05,
"loss": 0.5856,
"step": 6050
},
{
"epoch": 87.83,
"learning_rate": 1.428813559322034e-05,
"loss": 0.6133,
"step": 6060
},
{
"epoch": 87.97,
"learning_rate": 1.411864406779661e-05,
"loss": 0.5992,
"step": 6070
},
{
"epoch": 88.12,
"learning_rate": 1.3949152542372881e-05,
"loss": 0.6122,
"step": 6080
},
{
"epoch": 88.26,
"learning_rate": 1.3779661016949153e-05,
"loss": 0.6178,
"step": 6090
},
{
"epoch": 88.41,
"learning_rate": 1.3610169491525424e-05,
"loss": 0.6505,
"step": 6100
},
{
"epoch": 88.55,
"learning_rate": 1.3440677966101695e-05,
"loss": 0.5641,
"step": 6110
},
{
"epoch": 88.7,
"learning_rate": 1.3271186440677965e-05,
"loss": 0.6329,
"step": 6120
},
{
"epoch": 88.84,
"learning_rate": 1.3101694915254237e-05,
"loss": 0.6119,
"step": 6130
},
{
"epoch": 88.99,
"learning_rate": 1.2932203389830508e-05,
"loss": 0.6043,
"step": 6140
},
{
"epoch": 89.13,
"learning_rate": 1.276271186440678e-05,
"loss": 0.6661,
"step": 6150
},
{
"epoch": 89.28,
"learning_rate": 1.2593220338983053e-05,
"loss": 0.5938,
"step": 6160
},
{
"epoch": 89.42,
"learning_rate": 1.2423728813559323e-05,
"loss": 0.6135,
"step": 6170
},
{
"epoch": 89.57,
"learning_rate": 1.2254237288135594e-05,
"loss": 0.6354,
"step": 6180
},
{
"epoch": 89.71,
"learning_rate": 1.2084745762711865e-05,
"loss": 0.5597,
"step": 6190
},
{
"epoch": 89.86,
"learning_rate": 1.1915254237288135e-05,
"loss": 0.6065,
"step": 6200
},
{
"epoch": 90.0,
"learning_rate": 1.1745762711864407e-05,
"loss": 0.5579,
"step": 6210
},
{
"epoch": 90.14,
"learning_rate": 1.157627118644068e-05,
"loss": 0.5914,
"step": 6220
},
{
"epoch": 90.29,
"learning_rate": 1.140677966101695e-05,
"loss": 0.6552,
"step": 6230
},
{
"epoch": 90.43,
"learning_rate": 1.1237288135593221e-05,
"loss": 0.5762,
"step": 6240
},
{
"epoch": 90.58,
"learning_rate": 1.1067796610169492e-05,
"loss": 0.5902,
"step": 6250
},
{
"epoch": 90.72,
"learning_rate": 1.0898305084745764e-05,
"loss": 0.5821,
"step": 6260
},
{
"epoch": 90.87,
"learning_rate": 1.0728813559322035e-05,
"loss": 0.6163,
"step": 6270
},
{
"epoch": 91.01,
"learning_rate": 1.0559322033898305e-05,
"loss": 0.6451,
"step": 6280
},
{
"epoch": 91.16,
"learning_rate": 1.0389830508474577e-05,
"loss": 0.6399,
"step": 6290
},
{
"epoch": 91.3,
"learning_rate": 1.0220338983050848e-05,
"loss": 0.6132,
"step": 6300
},
{
"epoch": 91.45,
"learning_rate": 1.005084745762712e-05,
"loss": 0.612,
"step": 6310
},
{
"epoch": 91.59,
"learning_rate": 9.88135593220339e-06,
"loss": 0.5939,
"step": 6320
},
{
"epoch": 91.74,
"learning_rate": 9.71186440677966e-06,
"loss": 0.6379,
"step": 6330
},
{
"epoch": 91.88,
"learning_rate": 9.542372881355932e-06,
"loss": 0.6063,
"step": 6340
},
{
"epoch": 92.03,
"learning_rate": 9.372881355932204e-06,
"loss": 0.6005,
"step": 6350
},
{
"epoch": 92.17,
"learning_rate": 9.203389830508475e-06,
"loss": 0.6076,
"step": 6360
},
{
"epoch": 92.32,
"learning_rate": 9.033898305084747e-06,
"loss": 0.6082,
"step": 6370
},
{
"epoch": 92.46,
"learning_rate": 8.864406779661018e-06,
"loss": 0.6571,
"step": 6380
},
{
"epoch": 92.61,
"learning_rate": 8.69491525423729e-06,
"loss": 0.5666,
"step": 6390
},
{
"epoch": 92.75,
"learning_rate": 8.52542372881356e-06,
"loss": 0.5932,
"step": 6400
},
{
"epoch": 92.9,
"learning_rate": 8.35593220338983e-06,
"loss": 0.6317,
"step": 6410
},
{
"epoch": 93.04,
"learning_rate": 8.186440677966102e-06,
"loss": 0.6665,
"step": 6420
},
{
"epoch": 93.19,
"learning_rate": 8.016949152542374e-06,
"loss": 0.5687,
"step": 6430
},
{
"epoch": 93.33,
"learning_rate": 7.847457627118643e-06,
"loss": 0.5904,
"step": 6440
},
{
"epoch": 93.48,
"learning_rate": 7.677966101694915e-06,
"loss": 0.7073,
"step": 6450
},
{
"epoch": 93.62,
"learning_rate": 7.508474576271186e-06,
"loss": 0.619,
"step": 6460
},
{
"epoch": 93.77,
"learning_rate": 7.338983050847458e-06,
"loss": 0.6203,
"step": 6470
},
{
"epoch": 93.91,
"learning_rate": 7.1694915254237284e-06,
"loss": 0.6576,
"step": 6480
},
{
"epoch": 94.06,
"learning_rate": 7.000000000000001e-06,
"loss": 0.6249,
"step": 6490
},
{
"epoch": 94.2,
"learning_rate": 6.830508474576272e-06,
"loss": 0.588,
"step": 6500
},
{
"epoch": 94.2,
"eval_loss": 0.2143363356590271,
"eval_runtime": 573.5555,
"eval_samples_per_second": 5.917,
"eval_steps_per_second": 0.741,
"eval_wer": 0.15602067565679725,
"step": 6500
},
{
"epoch": 94.35,
"learning_rate": 6.661016949152543e-06,
"loss": 0.669,
"step": 6510
},
{
"epoch": 94.49,
"learning_rate": 6.491525423728814e-06,
"loss": 0.5974,
"step": 6520
},
{
"epoch": 94.64,
"learning_rate": 6.322033898305085e-06,
"loss": 0.6671,
"step": 6530
},
{
"epoch": 94.78,
"learning_rate": 6.152542372881356e-06,
"loss": 0.6735,
"step": 6540
},
{
"epoch": 94.93,
"learning_rate": 5.983050847457628e-06,
"loss": 0.6356,
"step": 6550
},
{
"epoch": 95.07,
"learning_rate": 5.813559322033898e-06,
"loss": 0.6135,
"step": 6560
},
{
"epoch": 95.22,
"learning_rate": 5.64406779661017e-06,
"loss": 0.6403,
"step": 6570
},
{
"epoch": 95.36,
"learning_rate": 5.4745762711864405e-06,
"loss": 0.5574,
"step": 6580
},
{
"epoch": 95.51,
"learning_rate": 5.305084745762713e-06,
"loss": 0.552,
"step": 6590
},
{
"epoch": 95.65,
"learning_rate": 5.135593220338983e-06,
"loss": 0.5979,
"step": 6600
},
{
"epoch": 95.8,
"learning_rate": 4.966101694915255e-06,
"loss": 0.6342,
"step": 6610
},
{
"epoch": 95.94,
"learning_rate": 4.7966101694915255e-06,
"loss": 0.6397,
"step": 6620
},
{
"epoch": 96.09,
"learning_rate": 4.627118644067797e-06,
"loss": 0.5847,
"step": 6630
},
{
"epoch": 96.23,
"learning_rate": 4.4576271186440676e-06,
"loss": 0.5736,
"step": 6640
},
{
"epoch": 96.38,
"learning_rate": 4.288135593220339e-06,
"loss": 0.6148,
"step": 6650
},
{
"epoch": 96.52,
"learning_rate": 4.1186440677966105e-06,
"loss": 0.6084,
"step": 6660
},
{
"epoch": 96.67,
"learning_rate": 3.949152542372882e-06,
"loss": 0.5835,
"step": 6670
},
{
"epoch": 96.81,
"learning_rate": 3.779661016949153e-06,
"loss": 0.5791,
"step": 6680
},
{
"epoch": 96.96,
"learning_rate": 3.610169491525424e-06,
"loss": 0.7132,
"step": 6690
},
{
"epoch": 97.1,
"learning_rate": 3.440677966101695e-06,
"loss": 0.5572,
"step": 6700
},
{
"epoch": 97.25,
"learning_rate": 3.271186440677966e-06,
"loss": 0.6607,
"step": 6710
},
{
"epoch": 97.39,
"learning_rate": 3.1016949152542375e-06,
"loss": 0.6343,
"step": 6720
},
{
"epoch": 97.54,
"learning_rate": 2.9322033898305086e-06,
"loss": 0.6126,
"step": 6730
},
{
"epoch": 97.68,
"learning_rate": 2.76271186440678e-06,
"loss": 0.6536,
"step": 6740
},
{
"epoch": 97.83,
"learning_rate": 2.593220338983051e-06,
"loss": 0.5843,
"step": 6750
},
{
"epoch": 97.97,
"learning_rate": 2.423728813559322e-06,
"loss": 0.5874,
"step": 6760
},
{
"epoch": 98.12,
"learning_rate": 2.254237288135593e-06,
"loss": 0.6067,
"step": 6770
},
{
"epoch": 98.26,
"learning_rate": 2.0847457627118646e-06,
"loss": 0.5626,
"step": 6780
},
{
"epoch": 98.41,
"learning_rate": 1.9152542372881356e-06,
"loss": 0.6659,
"step": 6790
},
{
"epoch": 98.55,
"learning_rate": 1.7457627118644067e-06,
"loss": 0.6462,
"step": 6800
},
{
"epoch": 98.7,
"learning_rate": 1.5762711864406781e-06,
"loss": 0.67,
"step": 6810
},
{
"epoch": 98.84,
"learning_rate": 1.4067796610169492e-06,
"loss": 0.5734,
"step": 6820
},
{
"epoch": 98.99,
"learning_rate": 1.2372881355932204e-06,
"loss": 0.6133,
"step": 6830
},
{
"epoch": 99.13,
"learning_rate": 1.0677966101694917e-06,
"loss": 0.596,
"step": 6840
},
{
"epoch": 99.28,
"learning_rate": 9.152542372881356e-07,
"loss": 0.6763,
"step": 6850
},
{
"epoch": 99.42,
"learning_rate": 7.457627118644068e-07,
"loss": 0.6368,
"step": 6860
},
{
"epoch": 99.57,
"learning_rate": 5.76271186440678e-07,
"loss": 0.6463,
"step": 6870
},
{
"epoch": 99.71,
"learning_rate": 4.0677966101694916e-07,
"loss": 0.62,
"step": 6880
},
{
"epoch": 99.86,
"learning_rate": 2.3728813559322033e-07,
"loss": 0.6346,
"step": 6890
},
{
"epoch": 100.0,
"learning_rate": 6.779661016949153e-08,
"loss": 0.5672,
"step": 6900
},
{
"epoch": 100.0,
"step": 6900,
"total_flos": 1.0025325448199992e+20,
"train_loss": 1.2894020353538402,
"train_runtime": 52505.7639,
"train_samples_per_second": 4.179,
"train_steps_per_second": 0.131
}
],
"max_steps": 6900,
"num_train_epochs": 100,
"total_flos": 1.0025325448199992e+20,
"trial_name": null,
"trial_params": null
}