wav2vec2-300m-mls-german-ft / trainer_state.json
patrickvonplaten's picture
End of training
557b4f8
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 200.0,
"global_step": 13800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"learning_rate": 9e-07,
"loss": 11.2286,
"step": 10
},
{
"epoch": 0.29,
"learning_rate": 1.8e-06,
"loss": 12.1103,
"step": 20
},
{
"epoch": 0.43,
"learning_rate": 2.8000000000000003e-06,
"loss": 11.9593,
"step": 30
},
{
"epoch": 0.58,
"learning_rate": 3.8e-06,
"loss": 11.6711,
"step": 40
},
{
"epoch": 0.72,
"learning_rate": 4.800000000000001e-06,
"loss": 11.9783,
"step": 50
},
{
"epoch": 0.87,
"learning_rate": 5.8e-06,
"loss": 10.9667,
"step": 60
},
{
"epoch": 1.01,
"learning_rate": 6.800000000000001e-06,
"loss": 12.1446,
"step": 70
},
{
"epoch": 1.16,
"learning_rate": 7.8e-06,
"loss": 10.2963,
"step": 80
},
{
"epoch": 1.3,
"learning_rate": 8.7e-06,
"loss": 10.8899,
"step": 90
},
{
"epoch": 1.45,
"learning_rate": 9.7e-06,
"loss": 9.5968,
"step": 100
},
{
"epoch": 1.59,
"learning_rate": 1.0700000000000001e-05,
"loss": 7.6591,
"step": 110
},
{
"epoch": 1.74,
"learning_rate": 1.1700000000000001e-05,
"loss": 7.5159,
"step": 120
},
{
"epoch": 1.88,
"learning_rate": 1.27e-05,
"loss": 6.2925,
"step": 130
},
{
"epoch": 2.03,
"learning_rate": 1.3700000000000001e-05,
"loss": 5.7639,
"step": 140
},
{
"epoch": 2.17,
"learning_rate": 1.47e-05,
"loss": 5.6696,
"step": 150
},
{
"epoch": 2.32,
"learning_rate": 1.5700000000000002e-05,
"loss": 4.7696,
"step": 160
},
{
"epoch": 2.46,
"learning_rate": 1.6700000000000003e-05,
"loss": 5.1383,
"step": 170
},
{
"epoch": 2.61,
"learning_rate": 1.77e-05,
"loss": 4.5714,
"step": 180
},
{
"epoch": 2.75,
"learning_rate": 1.87e-05,
"loss": 4.5116,
"step": 190
},
{
"epoch": 2.9,
"learning_rate": 1.97e-05,
"loss": 4.3733,
"step": 200
},
{
"epoch": 3.04,
"learning_rate": 2.07e-05,
"loss": 3.8443,
"step": 210
},
{
"epoch": 3.19,
"learning_rate": 2.1700000000000002e-05,
"loss": 3.9388,
"step": 220
},
{
"epoch": 3.33,
"learning_rate": 2.2700000000000003e-05,
"loss": 3.8416,
"step": 230
},
{
"epoch": 3.48,
"learning_rate": 2.37e-05,
"loss": 3.7135,
"step": 240
},
{
"epoch": 3.62,
"learning_rate": 2.47e-05,
"loss": 3.5271,
"step": 250
},
{
"epoch": 3.77,
"learning_rate": 2.57e-05,
"loss": 3.5189,
"step": 260
},
{
"epoch": 3.91,
"learning_rate": 2.6700000000000002e-05,
"loss": 3.5816,
"step": 270
},
{
"epoch": 4.06,
"learning_rate": 2.7700000000000002e-05,
"loss": 3.3417,
"step": 280
},
{
"epoch": 4.2,
"learning_rate": 2.87e-05,
"loss": 3.5864,
"step": 290
},
{
"epoch": 4.35,
"learning_rate": 2.97e-05,
"loss": 3.327,
"step": 300
},
{
"epoch": 4.49,
"learning_rate": 3.07e-05,
"loss": 3.205,
"step": 310
},
{
"epoch": 4.64,
"learning_rate": 3.1700000000000005e-05,
"loss": 3.1087,
"step": 320
},
{
"epoch": 4.78,
"learning_rate": 3.27e-05,
"loss": 3.1126,
"step": 330
},
{
"epoch": 4.93,
"learning_rate": 3.3700000000000006e-05,
"loss": 3.1934,
"step": 340
},
{
"epoch": 5.07,
"learning_rate": 3.4699999999999996e-05,
"loss": 3.0881,
"step": 350
},
{
"epoch": 5.22,
"learning_rate": 3.57e-05,
"loss": 3.086,
"step": 360
},
{
"epoch": 5.36,
"learning_rate": 3.6700000000000004e-05,
"loss": 3.0304,
"step": 370
},
{
"epoch": 5.51,
"learning_rate": 3.77e-05,
"loss": 3.0262,
"step": 380
},
{
"epoch": 5.65,
"learning_rate": 3.8700000000000006e-05,
"loss": 3.0492,
"step": 390
},
{
"epoch": 5.8,
"learning_rate": 3.97e-05,
"loss": 3.0657,
"step": 400
},
{
"epoch": 5.94,
"learning_rate": 4.07e-05,
"loss": 2.9912,
"step": 410
},
{
"epoch": 6.09,
"learning_rate": 4.17e-05,
"loss": 2.9993,
"step": 420
},
{
"epoch": 6.23,
"learning_rate": 4.27e-05,
"loss": 3.0465,
"step": 430
},
{
"epoch": 6.38,
"learning_rate": 4.3700000000000005e-05,
"loss": 2.9501,
"step": 440
},
{
"epoch": 6.52,
"learning_rate": 4.47e-05,
"loss": 3.0918,
"step": 450
},
{
"epoch": 6.67,
"learning_rate": 4.5700000000000006e-05,
"loss": 3.0306,
"step": 460
},
{
"epoch": 6.81,
"learning_rate": 4.6700000000000003e-05,
"loss": 2.9349,
"step": 470
},
{
"epoch": 6.96,
"learning_rate": 4.77e-05,
"loss": 3.0044,
"step": 480
},
{
"epoch": 7.1,
"learning_rate": 4.87e-05,
"loss": 2.9541,
"step": 490
},
{
"epoch": 7.25,
"learning_rate": 4.97e-05,
"loss": 3.0132,
"step": 500
},
{
"epoch": 7.25,
"eval_loss": 2.9393208026885986,
"eval_runtime": 580.9987,
"eval_samples_per_second": 5.842,
"eval_steps_per_second": 0.731,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 7.39,
"learning_rate": 5.0700000000000006e-05,
"loss": 2.9162,
"step": 510
},
{
"epoch": 7.54,
"learning_rate": 5.17e-05,
"loss": 2.9309,
"step": 520
},
{
"epoch": 7.68,
"learning_rate": 5.270000000000001e-05,
"loss": 3.0513,
"step": 530
},
{
"epoch": 7.83,
"learning_rate": 5.3700000000000004e-05,
"loss": 2.948,
"step": 540
},
{
"epoch": 7.97,
"learning_rate": 5.470000000000001e-05,
"loss": 2.9518,
"step": 550
},
{
"epoch": 8.12,
"learning_rate": 5.5700000000000005e-05,
"loss": 2.9229,
"step": 560
},
{
"epoch": 8.26,
"learning_rate": 5.6699999999999996e-05,
"loss": 2.9511,
"step": 570
},
{
"epoch": 8.41,
"learning_rate": 5.77e-05,
"loss": 2.9427,
"step": 580
},
{
"epoch": 8.55,
"learning_rate": 5.87e-05,
"loss": 2.9603,
"step": 590
},
{
"epoch": 8.7,
"learning_rate": 5.97e-05,
"loss": 2.9397,
"step": 600
},
{
"epoch": 8.84,
"learning_rate": 6.07e-05,
"loss": 2.9267,
"step": 610
},
{
"epoch": 8.99,
"learning_rate": 6.170000000000001e-05,
"loss": 3.0164,
"step": 620
},
{
"epoch": 9.13,
"learning_rate": 6.27e-05,
"loss": 2.913,
"step": 630
},
{
"epoch": 9.28,
"learning_rate": 6.37e-05,
"loss": 2.9382,
"step": 640
},
{
"epoch": 9.42,
"learning_rate": 6.47e-05,
"loss": 2.9905,
"step": 650
},
{
"epoch": 9.57,
"learning_rate": 6.570000000000001e-05,
"loss": 2.8993,
"step": 660
},
{
"epoch": 9.71,
"learning_rate": 6.670000000000001e-05,
"loss": 2.9829,
"step": 670
},
{
"epoch": 9.86,
"learning_rate": 6.77e-05,
"loss": 2.9161,
"step": 680
},
{
"epoch": 10.0,
"learning_rate": 6.87e-05,
"loss": 3.0016,
"step": 690
},
{
"epoch": 10.14,
"learning_rate": 6.97e-05,
"loss": 2.9328,
"step": 700
},
{
"epoch": 10.29,
"learning_rate": 7.07e-05,
"loss": 3.0272,
"step": 710
},
{
"epoch": 10.43,
"learning_rate": 7.17e-05,
"loss": 2.9243,
"step": 720
},
{
"epoch": 10.58,
"learning_rate": 7.27e-05,
"loss": 2.9345,
"step": 730
},
{
"epoch": 10.72,
"learning_rate": 7.37e-05,
"loss": 2.9191,
"step": 740
},
{
"epoch": 10.87,
"learning_rate": 7.47e-05,
"loss": 2.8938,
"step": 750
},
{
"epoch": 11.01,
"learning_rate": 7.570000000000001e-05,
"loss": 2.9083,
"step": 760
},
{
"epoch": 11.16,
"learning_rate": 7.670000000000001e-05,
"loss": 2.9408,
"step": 770
},
{
"epoch": 11.3,
"learning_rate": 7.77e-05,
"loss": 2.9428,
"step": 780
},
{
"epoch": 11.45,
"learning_rate": 7.87e-05,
"loss": 2.9443,
"step": 790
},
{
"epoch": 11.59,
"learning_rate": 7.970000000000001e-05,
"loss": 2.9159,
"step": 800
},
{
"epoch": 11.74,
"learning_rate": 8.070000000000001e-05,
"loss": 2.9294,
"step": 810
},
{
"epoch": 11.88,
"learning_rate": 8.17e-05,
"loss": 2.8927,
"step": 820
},
{
"epoch": 12.03,
"learning_rate": 8.27e-05,
"loss": 2.9385,
"step": 830
},
{
"epoch": 12.17,
"learning_rate": 8.37e-05,
"loss": 2.9407,
"step": 840
},
{
"epoch": 12.32,
"learning_rate": 8.47e-05,
"loss": 2.8872,
"step": 850
},
{
"epoch": 12.46,
"learning_rate": 8.57e-05,
"loss": 2.9336,
"step": 860
},
{
"epoch": 12.61,
"learning_rate": 8.67e-05,
"loss": 2.8768,
"step": 870
},
{
"epoch": 12.75,
"learning_rate": 8.77e-05,
"loss": 2.9426,
"step": 880
},
{
"epoch": 12.9,
"learning_rate": 8.87e-05,
"loss": 2.8981,
"step": 890
},
{
"epoch": 13.04,
"learning_rate": 8.970000000000001e-05,
"loss": 2.8928,
"step": 900
},
{
"epoch": 13.19,
"learning_rate": 9.070000000000001e-05,
"loss": 2.9346,
"step": 910
},
{
"epoch": 13.33,
"learning_rate": 9.17e-05,
"loss": 2.89,
"step": 920
},
{
"epoch": 13.48,
"learning_rate": 9.27e-05,
"loss": 2.9107,
"step": 930
},
{
"epoch": 13.62,
"learning_rate": 9.370000000000001e-05,
"loss": 2.8732,
"step": 940
},
{
"epoch": 13.77,
"learning_rate": 9.47e-05,
"loss": 2.9441,
"step": 950
},
{
"epoch": 13.91,
"learning_rate": 9.57e-05,
"loss": 2.9136,
"step": 960
},
{
"epoch": 14.06,
"learning_rate": 9.67e-05,
"loss": 2.8773,
"step": 970
},
{
"epoch": 14.2,
"learning_rate": 9.77e-05,
"loss": 2.9015,
"step": 980
},
{
"epoch": 14.35,
"learning_rate": 9.87e-05,
"loss": 2.9168,
"step": 990
},
{
"epoch": 14.49,
"learning_rate": 9.970000000000001e-05,
"loss": 2.9241,
"step": 1000
},
{
"epoch": 14.49,
"eval_loss": 2.8734302520751953,
"eval_runtime": 570.6581,
"eval_samples_per_second": 5.948,
"eval_steps_per_second": 0.745,
"eval_wer": 1.0,
"step": 1000
},
{
"epoch": 14.64,
"learning_rate": 9.988135593220339e-05,
"loss": 2.8693,
"step": 1010
},
{
"epoch": 14.78,
"learning_rate": 9.971186440677967e-05,
"loss": 2.9199,
"step": 1020
},
{
"epoch": 14.93,
"learning_rate": 9.954237288135594e-05,
"loss": 2.8831,
"step": 1030
},
{
"epoch": 15.07,
"learning_rate": 9.937288135593222e-05,
"loss": 2.8716,
"step": 1040
},
{
"epoch": 15.22,
"learning_rate": 9.920338983050847e-05,
"loss": 2.9457,
"step": 1050
},
{
"epoch": 15.36,
"learning_rate": 9.903389830508475e-05,
"loss": 2.8708,
"step": 1060
},
{
"epoch": 15.51,
"learning_rate": 9.886440677966103e-05,
"loss": 2.8595,
"step": 1070
},
{
"epoch": 15.65,
"learning_rate": 9.86949152542373e-05,
"loss": 2.8718,
"step": 1080
},
{
"epoch": 15.8,
"learning_rate": 9.852542372881356e-05,
"loss": 2.8383,
"step": 1090
},
{
"epoch": 15.94,
"learning_rate": 9.835593220338983e-05,
"loss": 2.8016,
"step": 1100
},
{
"epoch": 16.09,
"learning_rate": 9.818644067796611e-05,
"loss": 2.7608,
"step": 1110
},
{
"epoch": 16.23,
"learning_rate": 9.801694915254239e-05,
"loss": 2.7417,
"step": 1120
},
{
"epoch": 16.38,
"learning_rate": 9.784745762711864e-05,
"loss": 2.6906,
"step": 1130
},
{
"epoch": 16.52,
"learning_rate": 9.767796610169492e-05,
"loss": 2.5662,
"step": 1140
},
{
"epoch": 16.67,
"learning_rate": 9.750847457627119e-05,
"loss": 2.4202,
"step": 1150
},
{
"epoch": 16.81,
"learning_rate": 9.733898305084747e-05,
"loss": 2.4102,
"step": 1160
},
{
"epoch": 16.96,
"learning_rate": 9.716949152542373e-05,
"loss": 2.185,
"step": 1170
},
{
"epoch": 17.1,
"learning_rate": 9.7e-05,
"loss": 1.9315,
"step": 1180
},
{
"epoch": 17.25,
"learning_rate": 9.683050847457628e-05,
"loss": 2.0025,
"step": 1190
},
{
"epoch": 17.39,
"learning_rate": 9.666101694915255e-05,
"loss": 1.8797,
"step": 1200
},
{
"epoch": 17.54,
"learning_rate": 9.649152542372883e-05,
"loss": 1.7524,
"step": 1210
},
{
"epoch": 17.68,
"learning_rate": 9.632203389830509e-05,
"loss": 1.7243,
"step": 1220
},
{
"epoch": 17.83,
"learning_rate": 9.615254237288136e-05,
"loss": 1.5859,
"step": 1230
},
{
"epoch": 17.97,
"learning_rate": 9.598305084745764e-05,
"loss": 1.5059,
"step": 1240
},
{
"epoch": 18.12,
"learning_rate": 9.58135593220339e-05,
"loss": 1.456,
"step": 1250
},
{
"epoch": 18.26,
"learning_rate": 9.564406779661017e-05,
"loss": 1.535,
"step": 1260
},
{
"epoch": 18.41,
"learning_rate": 9.547457627118644e-05,
"loss": 1.4808,
"step": 1270
},
{
"epoch": 18.55,
"learning_rate": 9.530508474576272e-05,
"loss": 1.4568,
"step": 1280
},
{
"epoch": 18.7,
"learning_rate": 9.5135593220339e-05,
"loss": 1.3898,
"step": 1290
},
{
"epoch": 18.84,
"learning_rate": 9.496610169491525e-05,
"loss": 1.4865,
"step": 1300
},
{
"epoch": 18.99,
"learning_rate": 9.479661016949153e-05,
"loss": 1.391,
"step": 1310
},
{
"epoch": 19.13,
"learning_rate": 9.46271186440678e-05,
"loss": 1.4236,
"step": 1320
},
{
"epoch": 19.28,
"learning_rate": 9.445762711864408e-05,
"loss": 1.303,
"step": 1330
},
{
"epoch": 19.42,
"learning_rate": 9.428813559322034e-05,
"loss": 1.2304,
"step": 1340
},
{
"epoch": 19.57,
"learning_rate": 9.411864406779661e-05,
"loss": 1.2014,
"step": 1350
},
{
"epoch": 19.71,
"learning_rate": 9.394915254237289e-05,
"loss": 1.2398,
"step": 1360
},
{
"epoch": 19.86,
"learning_rate": 9.377966101694916e-05,
"loss": 1.1895,
"step": 1370
},
{
"epoch": 20.0,
"learning_rate": 9.361016949152542e-05,
"loss": 1.2296,
"step": 1380
},
{
"epoch": 20.14,
"learning_rate": 9.34406779661017e-05,
"loss": 1.3624,
"step": 1390
},
{
"epoch": 20.29,
"learning_rate": 9.327118644067797e-05,
"loss": 1.1805,
"step": 1400
},
{
"epoch": 20.43,
"learning_rate": 9.310169491525425e-05,
"loss": 1.1705,
"step": 1410
},
{
"epoch": 20.58,
"learning_rate": 9.29322033898305e-05,
"loss": 1.1446,
"step": 1420
},
{
"epoch": 20.72,
"learning_rate": 9.276271186440678e-05,
"loss": 1.2016,
"step": 1430
},
{
"epoch": 20.87,
"learning_rate": 9.259322033898306e-05,
"loss": 1.1053,
"step": 1440
},
{
"epoch": 21.01,
"learning_rate": 9.242372881355933e-05,
"loss": 1.1159,
"step": 1450
},
{
"epoch": 21.16,
"learning_rate": 9.225423728813561e-05,
"loss": 1.0629,
"step": 1460
},
{
"epoch": 21.3,
"learning_rate": 9.208474576271186e-05,
"loss": 1.1606,
"step": 1470
},
{
"epoch": 21.45,
"learning_rate": 9.191525423728814e-05,
"loss": 1.051,
"step": 1480
},
{
"epoch": 21.59,
"learning_rate": 9.174576271186442e-05,
"loss": 1.0948,
"step": 1490
},
{
"epoch": 21.74,
"learning_rate": 9.157627118644069e-05,
"loss": 1.0766,
"step": 1500
},
{
"epoch": 21.74,
"eval_loss": 0.27734482288360596,
"eval_runtime": 571.274,
"eval_samples_per_second": 5.941,
"eval_steps_per_second": 0.744,
"eval_wer": 0.2488228188250376,
"step": 1500
},
{
"epoch": 21.88,
"learning_rate": 9.140677966101695e-05,
"loss": 0.9907,
"step": 1510
},
{
"epoch": 22.03,
"learning_rate": 9.123728813559322e-05,
"loss": 1.0756,
"step": 1520
},
{
"epoch": 22.17,
"learning_rate": 9.10677966101695e-05,
"loss": 1.0947,
"step": 1530
},
{
"epoch": 22.32,
"learning_rate": 9.089830508474577e-05,
"loss": 1.1119,
"step": 1540
},
{
"epoch": 22.46,
"learning_rate": 9.072881355932203e-05,
"loss": 1.039,
"step": 1550
},
{
"epoch": 22.61,
"learning_rate": 9.055932203389831e-05,
"loss": 0.9631,
"step": 1560
},
{
"epoch": 22.75,
"learning_rate": 9.038983050847458e-05,
"loss": 0.9765,
"step": 1570
},
{
"epoch": 22.9,
"learning_rate": 9.022033898305086e-05,
"loss": 0.9932,
"step": 1580
},
{
"epoch": 23.04,
"learning_rate": 9.005084745762712e-05,
"loss": 0.9384,
"step": 1590
},
{
"epoch": 23.19,
"learning_rate": 8.988135593220339e-05,
"loss": 0.9942,
"step": 1600
},
{
"epoch": 23.33,
"learning_rate": 8.971186440677967e-05,
"loss": 0.9546,
"step": 1610
},
{
"epoch": 23.48,
"learning_rate": 8.954237288135594e-05,
"loss": 1.0051,
"step": 1620
},
{
"epoch": 23.62,
"learning_rate": 8.93728813559322e-05,
"loss": 0.9648,
"step": 1630
},
{
"epoch": 23.77,
"learning_rate": 8.920338983050848e-05,
"loss": 1.0198,
"step": 1640
},
{
"epoch": 23.91,
"learning_rate": 8.903389830508475e-05,
"loss": 0.9773,
"step": 1650
},
{
"epoch": 24.06,
"learning_rate": 8.886440677966103e-05,
"loss": 0.9714,
"step": 1660
},
{
"epoch": 24.2,
"learning_rate": 8.869491525423728e-05,
"loss": 0.987,
"step": 1670
},
{
"epoch": 24.35,
"learning_rate": 8.852542372881356e-05,
"loss": 1.0105,
"step": 1680
},
{
"epoch": 24.49,
"learning_rate": 8.835593220338983e-05,
"loss": 0.9965,
"step": 1690
},
{
"epoch": 24.64,
"learning_rate": 8.818644067796611e-05,
"loss": 0.9293,
"step": 1700
},
{
"epoch": 24.78,
"learning_rate": 8.801694915254238e-05,
"loss": 0.8939,
"step": 1710
},
{
"epoch": 24.93,
"learning_rate": 8.784745762711864e-05,
"loss": 0.9721,
"step": 1720
},
{
"epoch": 25.07,
"learning_rate": 8.767796610169492e-05,
"loss": 1.0158,
"step": 1730
},
{
"epoch": 25.22,
"learning_rate": 8.750847457627119e-05,
"loss": 1.08,
"step": 1740
},
{
"epoch": 25.36,
"learning_rate": 8.733898305084747e-05,
"loss": 1.3222,
"step": 1750
},
{
"epoch": 25.51,
"learning_rate": 8.716949152542373e-05,
"loss": 0.9776,
"step": 1760
},
{
"epoch": 25.65,
"learning_rate": 8.7e-05,
"loss": 0.9089,
"step": 1770
},
{
"epoch": 25.8,
"learning_rate": 8.683050847457628e-05,
"loss": 0.9171,
"step": 1780
},
{
"epoch": 25.94,
"learning_rate": 8.666101694915255e-05,
"loss": 0.9366,
"step": 1790
},
{
"epoch": 26.09,
"learning_rate": 8.649152542372881e-05,
"loss": 0.8907,
"step": 1800
},
{
"epoch": 26.23,
"learning_rate": 8.632203389830509e-05,
"loss": 0.8427,
"step": 1810
},
{
"epoch": 26.38,
"learning_rate": 8.615254237288136e-05,
"loss": 0.8692,
"step": 1820
},
{
"epoch": 26.52,
"learning_rate": 8.598305084745764e-05,
"loss": 0.8933,
"step": 1830
},
{
"epoch": 26.67,
"learning_rate": 8.581355932203389e-05,
"loss": 0.9524,
"step": 1840
},
{
"epoch": 26.81,
"learning_rate": 8.564406779661017e-05,
"loss": 0.9623,
"step": 1850
},
{
"epoch": 26.96,
"learning_rate": 8.547457627118645e-05,
"loss": 0.9458,
"step": 1860
},
{
"epoch": 27.1,
"learning_rate": 8.530508474576272e-05,
"loss": 0.9299,
"step": 1870
},
{
"epoch": 27.25,
"learning_rate": 8.513559322033898e-05,
"loss": 0.8612,
"step": 1880
},
{
"epoch": 27.39,
"learning_rate": 8.496610169491525e-05,
"loss": 0.9352,
"step": 1890
},
{
"epoch": 27.54,
"learning_rate": 8.479661016949153e-05,
"loss": 0.8692,
"step": 1900
},
{
"epoch": 27.68,
"learning_rate": 8.46271186440678e-05,
"loss": 0.8834,
"step": 1910
},
{
"epoch": 27.83,
"learning_rate": 8.445762711864406e-05,
"loss": 0.8448,
"step": 1920
},
{
"epoch": 27.97,
"learning_rate": 8.428813559322034e-05,
"loss": 0.9122,
"step": 1930
},
{
"epoch": 28.12,
"learning_rate": 8.411864406779661e-05,
"loss": 0.8632,
"step": 1940
},
{
"epoch": 28.26,
"learning_rate": 8.394915254237289e-05,
"loss": 0.8578,
"step": 1950
},
{
"epoch": 28.41,
"learning_rate": 8.377966101694916e-05,
"loss": 0.8758,
"step": 1960
},
{
"epoch": 28.55,
"learning_rate": 8.361016949152542e-05,
"loss": 0.9024,
"step": 1970
},
{
"epoch": 28.7,
"learning_rate": 8.34406779661017e-05,
"loss": 1.1392,
"step": 1980
},
{
"epoch": 28.84,
"learning_rate": 8.327118644067797e-05,
"loss": 0.8794,
"step": 1990
},
{
"epoch": 28.99,
"learning_rate": 8.310169491525425e-05,
"loss": 0.8416,
"step": 2000
},
{
"epoch": 28.99,
"eval_loss": 0.222377210855484,
"eval_runtime": 567.7022,
"eval_samples_per_second": 5.978,
"eval_steps_per_second": 0.749,
"eval_wer": 0.19897443482977098,
"step": 2000
},
{
"epoch": 29.13,
"learning_rate": 8.293220338983052e-05,
"loss": 0.8217,
"step": 2010
},
{
"epoch": 29.28,
"learning_rate": 8.276271186440678e-05,
"loss": 0.9189,
"step": 2020
},
{
"epoch": 29.42,
"learning_rate": 8.259322033898306e-05,
"loss": 0.8251,
"step": 2030
},
{
"epoch": 29.57,
"learning_rate": 8.242372881355933e-05,
"loss": 0.9254,
"step": 2040
},
{
"epoch": 29.71,
"learning_rate": 8.22542372881356e-05,
"loss": 0.9276,
"step": 2050
},
{
"epoch": 29.86,
"learning_rate": 8.208474576271186e-05,
"loss": 0.8893,
"step": 2060
},
{
"epoch": 30.0,
"learning_rate": 8.191525423728814e-05,
"loss": 0.7608,
"step": 2070
},
{
"epoch": 30.14,
"learning_rate": 8.174576271186442e-05,
"loss": 0.7857,
"step": 2080
},
{
"epoch": 30.29,
"learning_rate": 8.157627118644067e-05,
"loss": 0.9132,
"step": 2090
},
{
"epoch": 30.43,
"learning_rate": 8.140677966101695e-05,
"loss": 0.8814,
"step": 2100
},
{
"epoch": 30.58,
"learning_rate": 8.123728813559322e-05,
"loss": 0.901,
"step": 2110
},
{
"epoch": 30.72,
"learning_rate": 8.10677966101695e-05,
"loss": 0.8746,
"step": 2120
},
{
"epoch": 30.87,
"learning_rate": 8.089830508474577e-05,
"loss": 0.9291,
"step": 2130
},
{
"epoch": 31.01,
"learning_rate": 8.072881355932203e-05,
"loss": 0.8357,
"step": 2140
},
{
"epoch": 31.16,
"learning_rate": 8.055932203389831e-05,
"loss": 0.8305,
"step": 2150
},
{
"epoch": 31.3,
"learning_rate": 8.038983050847458e-05,
"loss": 0.8275,
"step": 2160
},
{
"epoch": 31.45,
"learning_rate": 8.022033898305085e-05,
"loss": 0.8111,
"step": 2170
},
{
"epoch": 31.59,
"learning_rate": 8.005084745762713e-05,
"loss": 0.8109,
"step": 2180
},
{
"epoch": 31.74,
"learning_rate": 7.988135593220339e-05,
"loss": 0.8761,
"step": 2190
},
{
"epoch": 31.88,
"learning_rate": 7.971186440677967e-05,
"loss": 0.8179,
"step": 2200
},
{
"epoch": 32.03,
"learning_rate": 7.954237288135592e-05,
"loss": 0.8078,
"step": 2210
},
{
"epoch": 32.17,
"learning_rate": 7.93728813559322e-05,
"loss": 0.8174,
"step": 2220
},
{
"epoch": 32.32,
"learning_rate": 7.920338983050848e-05,
"loss": 0.8176,
"step": 2230
},
{
"epoch": 32.46,
"learning_rate": 7.903389830508475e-05,
"loss": 0.729,
"step": 2240
},
{
"epoch": 32.61,
"learning_rate": 7.886440677966102e-05,
"loss": 0.7884,
"step": 2250
},
{
"epoch": 32.75,
"learning_rate": 7.869491525423728e-05,
"loss": 0.7896,
"step": 2260
},
{
"epoch": 32.9,
"learning_rate": 7.852542372881356e-05,
"loss": 0.8016,
"step": 2270
},
{
"epoch": 33.04,
"learning_rate": 7.835593220338984e-05,
"loss": 0.7635,
"step": 2280
},
{
"epoch": 33.19,
"learning_rate": 7.818644067796611e-05,
"loss": 0.8308,
"step": 2290
},
{
"epoch": 33.33,
"learning_rate": 7.801694915254238e-05,
"loss": 0.7543,
"step": 2300
},
{
"epoch": 33.48,
"learning_rate": 7.784745762711864e-05,
"loss": 0.8617,
"step": 2310
},
{
"epoch": 33.62,
"learning_rate": 7.767796610169492e-05,
"loss": 0.8434,
"step": 2320
},
{
"epoch": 33.77,
"learning_rate": 7.750847457627119e-05,
"loss": 0.948,
"step": 2330
},
{
"epoch": 33.91,
"learning_rate": 7.733898305084746e-05,
"loss": 1.0185,
"step": 2340
},
{
"epoch": 34.06,
"learning_rate": 7.716949152542374e-05,
"loss": 0.827,
"step": 2350
},
{
"epoch": 34.2,
"learning_rate": 7.7e-05,
"loss": 0.7916,
"step": 2360
},
{
"epoch": 34.35,
"learning_rate": 7.683050847457628e-05,
"loss": 0.7858,
"step": 2370
},
{
"epoch": 34.49,
"learning_rate": 7.666101694915255e-05,
"loss": 0.8583,
"step": 2380
},
{
"epoch": 34.64,
"learning_rate": 7.649152542372881e-05,
"loss": 0.7672,
"step": 2390
},
{
"epoch": 34.78,
"learning_rate": 7.63220338983051e-05,
"loss": 0.7745,
"step": 2400
},
{
"epoch": 34.93,
"learning_rate": 7.615254237288136e-05,
"loss": 0.8104,
"step": 2410
},
{
"epoch": 35.07,
"learning_rate": 7.598305084745763e-05,
"loss": 0.7519,
"step": 2420
},
{
"epoch": 35.22,
"learning_rate": 7.58135593220339e-05,
"loss": 0.7867,
"step": 2430
},
{
"epoch": 35.36,
"learning_rate": 7.564406779661017e-05,
"loss": 0.7614,
"step": 2440
},
{
"epoch": 35.51,
"learning_rate": 7.547457627118645e-05,
"loss": 0.8043,
"step": 2450
},
{
"epoch": 35.65,
"learning_rate": 7.53050847457627e-05,
"loss": 0.7866,
"step": 2460
},
{
"epoch": 35.8,
"learning_rate": 7.513559322033899e-05,
"loss": 0.8356,
"step": 2470
},
{
"epoch": 35.94,
"learning_rate": 7.496610169491525e-05,
"loss": 0.761,
"step": 2480
},
{
"epoch": 36.09,
"learning_rate": 7.479661016949153e-05,
"loss": 0.9027,
"step": 2490
},
{
"epoch": 36.23,
"learning_rate": 7.46271186440678e-05,
"loss": 0.8048,
"step": 2500
},
{
"epoch": 36.23,
"eval_loss": 0.20626655220985413,
"eval_runtime": 571.29,
"eval_samples_per_second": 5.941,
"eval_steps_per_second": 0.744,
"eval_wer": 0.17916985101364954,
"step": 2500
},
{
"epoch": 36.38,
"learning_rate": 7.445762711864406e-05,
"loss": 0.7866,
"step": 2510
},
{
"epoch": 36.52,
"learning_rate": 7.428813559322034e-05,
"loss": 0.7869,
"step": 2520
},
{
"epoch": 36.67,
"learning_rate": 7.411864406779661e-05,
"loss": 0.8445,
"step": 2530
},
{
"epoch": 36.81,
"learning_rate": 7.394915254237289e-05,
"loss": 0.7745,
"step": 2540
},
{
"epoch": 36.96,
"learning_rate": 7.377966101694916e-05,
"loss": 0.7888,
"step": 2550
},
{
"epoch": 37.1,
"learning_rate": 7.361016949152542e-05,
"loss": 0.776,
"step": 2560
},
{
"epoch": 37.25,
"learning_rate": 7.34406779661017e-05,
"loss": 0.8096,
"step": 2570
},
{
"epoch": 37.39,
"learning_rate": 7.327118644067797e-05,
"loss": 0.7561,
"step": 2580
},
{
"epoch": 37.54,
"learning_rate": 7.310169491525424e-05,
"loss": 0.7251,
"step": 2590
},
{
"epoch": 37.68,
"learning_rate": 7.293220338983052e-05,
"loss": 0.8062,
"step": 2600
},
{
"epoch": 37.83,
"learning_rate": 7.276271186440678e-05,
"loss": 0.7388,
"step": 2610
},
{
"epoch": 37.97,
"learning_rate": 7.259322033898306e-05,
"loss": 0.7616,
"step": 2620
},
{
"epoch": 38.12,
"learning_rate": 7.242372881355932e-05,
"loss": 0.7593,
"step": 2630
},
{
"epoch": 38.26,
"learning_rate": 7.22542372881356e-05,
"loss": 0.7399,
"step": 2640
},
{
"epoch": 38.41,
"learning_rate": 7.208474576271188e-05,
"loss": 0.7559,
"step": 2650
},
{
"epoch": 38.55,
"learning_rate": 7.191525423728814e-05,
"loss": 0.7559,
"step": 2660
},
{
"epoch": 38.7,
"learning_rate": 7.174576271186441e-05,
"loss": 0.7218,
"step": 2670
},
{
"epoch": 38.84,
"learning_rate": 7.157627118644067e-05,
"loss": 0.7991,
"step": 2680
},
{
"epoch": 38.99,
"learning_rate": 7.140677966101695e-05,
"loss": 0.7493,
"step": 2690
},
{
"epoch": 39.13,
"learning_rate": 7.123728813559322e-05,
"loss": 0.7502,
"step": 2700
},
{
"epoch": 39.28,
"learning_rate": 7.106779661016949e-05,
"loss": 0.7883,
"step": 2710
},
{
"epoch": 39.42,
"learning_rate": 7.089830508474577e-05,
"loss": 0.8236,
"step": 2720
},
{
"epoch": 39.57,
"learning_rate": 7.072881355932203e-05,
"loss": 0.8141,
"step": 2730
},
{
"epoch": 39.71,
"learning_rate": 7.055932203389831e-05,
"loss": 0.7956,
"step": 2740
},
{
"epoch": 39.86,
"learning_rate": 7.038983050847458e-05,
"loss": 0.7714,
"step": 2750
},
{
"epoch": 40.0,
"learning_rate": 7.022033898305085e-05,
"loss": 0.6689,
"step": 2760
},
{
"epoch": 40.14,
"learning_rate": 7.005084745762713e-05,
"loss": 0.7202,
"step": 2770
},
{
"epoch": 40.29,
"learning_rate": 6.988135593220339e-05,
"loss": 0.7278,
"step": 2780
},
{
"epoch": 40.43,
"learning_rate": 6.971186440677966e-05,
"loss": 0.801,
"step": 2790
},
{
"epoch": 40.58,
"learning_rate": 6.954237288135594e-05,
"loss": 0.7865,
"step": 2800
},
{
"epoch": 40.72,
"learning_rate": 6.93728813559322e-05,
"loss": 0.6811,
"step": 2810
},
{
"epoch": 40.87,
"learning_rate": 6.920338983050849e-05,
"loss": 0.7933,
"step": 2820
},
{
"epoch": 41.01,
"learning_rate": 6.903389830508475e-05,
"loss": 0.7826,
"step": 2830
},
{
"epoch": 41.16,
"learning_rate": 6.886440677966102e-05,
"loss": 0.7142,
"step": 2840
},
{
"epoch": 41.3,
"learning_rate": 6.869491525423728e-05,
"loss": 0.8347,
"step": 2850
},
{
"epoch": 41.45,
"learning_rate": 6.852542372881356e-05,
"loss": 0.7712,
"step": 2860
},
{
"epoch": 41.59,
"learning_rate": 6.835593220338984e-05,
"loss": 0.7458,
"step": 2870
},
{
"epoch": 41.74,
"learning_rate": 6.81864406779661e-05,
"loss": 0.7528,
"step": 2880
},
{
"epoch": 41.88,
"learning_rate": 6.801694915254238e-05,
"loss": 0.7699,
"step": 2890
},
{
"epoch": 42.03,
"learning_rate": 6.784745762711864e-05,
"loss": 0.8577,
"step": 2900
},
{
"epoch": 42.17,
"learning_rate": 6.767796610169492e-05,
"loss": 0.7259,
"step": 2910
},
{
"epoch": 42.32,
"learning_rate": 6.750847457627119e-05,
"loss": 0.7039,
"step": 2920
},
{
"epoch": 42.46,
"learning_rate": 6.733898305084746e-05,
"loss": 0.8307,
"step": 2930
},
{
"epoch": 42.61,
"learning_rate": 6.716949152542374e-05,
"loss": 0.7952,
"step": 2940
},
{
"epoch": 42.75,
"learning_rate": 6.7e-05,
"loss": 0.6789,
"step": 2950
},
{
"epoch": 42.9,
"learning_rate": 6.683050847457627e-05,
"loss": 0.7708,
"step": 2960
},
{
"epoch": 43.04,
"learning_rate": 6.666101694915255e-05,
"loss": 0.6823,
"step": 2970
},
{
"epoch": 43.19,
"learning_rate": 6.649152542372881e-05,
"loss": 0.6989,
"step": 2980
},
{
"epoch": 43.33,
"learning_rate": 6.63220338983051e-05,
"loss": 0.7944,
"step": 2990
},
{
"epoch": 43.48,
"learning_rate": 6.615254237288135e-05,
"loss": 0.7664,
"step": 3000
},
{
"epoch": 43.48,
"eval_loss": 0.2088436335325241,
"eval_runtime": 572.961,
"eval_samples_per_second": 5.924,
"eval_steps_per_second": 0.742,
"eval_wer": 0.17475696242059677,
"step": 3000
},
{
"epoch": 43.62,
"learning_rate": 6.598305084745763e-05,
"loss": 0.7256,
"step": 3010
},
{
"epoch": 43.77,
"learning_rate": 6.581355932203391e-05,
"loss": 0.7146,
"step": 3020
},
{
"epoch": 43.91,
"learning_rate": 6.564406779661017e-05,
"loss": 0.7347,
"step": 3030
},
{
"epoch": 44.06,
"learning_rate": 6.547457627118644e-05,
"loss": 0.7302,
"step": 3040
},
{
"epoch": 44.2,
"learning_rate": 6.53050847457627e-05,
"loss": 0.7772,
"step": 3050
},
{
"epoch": 44.35,
"learning_rate": 6.513559322033899e-05,
"loss": 0.7214,
"step": 3060
},
{
"epoch": 44.49,
"learning_rate": 6.496610169491527e-05,
"loss": 0.6899,
"step": 3070
},
{
"epoch": 44.64,
"learning_rate": 6.479661016949153e-05,
"loss": 0.7488,
"step": 3080
},
{
"epoch": 44.78,
"learning_rate": 6.46271186440678e-05,
"loss": 0.831,
"step": 3090
},
{
"epoch": 44.93,
"learning_rate": 6.445762711864407e-05,
"loss": 0.799,
"step": 3100
},
{
"epoch": 45.07,
"learning_rate": 6.428813559322035e-05,
"loss": 0.7195,
"step": 3110
},
{
"epoch": 45.22,
"learning_rate": 6.411864406779661e-05,
"loss": 0.7476,
"step": 3120
},
{
"epoch": 45.36,
"learning_rate": 6.394915254237288e-05,
"loss": 0.7475,
"step": 3130
},
{
"epoch": 45.51,
"learning_rate": 6.377966101694916e-05,
"loss": 0.767,
"step": 3140
},
{
"epoch": 45.65,
"learning_rate": 6.361016949152542e-05,
"loss": 0.7263,
"step": 3150
},
{
"epoch": 45.8,
"learning_rate": 6.34406779661017e-05,
"loss": 0.7423,
"step": 3160
},
{
"epoch": 45.94,
"learning_rate": 6.327118644067797e-05,
"loss": 0.7693,
"step": 3170
},
{
"epoch": 46.09,
"learning_rate": 6.310169491525424e-05,
"loss": 0.8136,
"step": 3180
},
{
"epoch": 46.23,
"learning_rate": 6.293220338983052e-05,
"loss": 0.6705,
"step": 3190
},
{
"epoch": 46.38,
"learning_rate": 6.276271186440678e-05,
"loss": 0.7041,
"step": 3200
},
{
"epoch": 46.52,
"learning_rate": 6.259322033898305e-05,
"loss": 0.7478,
"step": 3210
},
{
"epoch": 46.67,
"learning_rate": 6.242372881355933e-05,
"loss": 0.6909,
"step": 3220
},
{
"epoch": 46.81,
"learning_rate": 6.22542372881356e-05,
"loss": 0.7702,
"step": 3230
},
{
"epoch": 46.96,
"learning_rate": 6.208474576271188e-05,
"loss": 0.7302,
"step": 3240
},
{
"epoch": 47.1,
"learning_rate": 6.191525423728813e-05,
"loss": 0.7812,
"step": 3250
},
{
"epoch": 47.25,
"learning_rate": 6.174576271186441e-05,
"loss": 0.7043,
"step": 3260
},
{
"epoch": 47.39,
"learning_rate": 6.157627118644068e-05,
"loss": 0.7645,
"step": 3270
},
{
"epoch": 47.54,
"learning_rate": 6.140677966101696e-05,
"loss": 0.8539,
"step": 3280
},
{
"epoch": 47.68,
"learning_rate": 6.123728813559322e-05,
"loss": 0.7747,
"step": 3290
},
{
"epoch": 47.83,
"learning_rate": 6.106779661016949e-05,
"loss": 0.6613,
"step": 3300
},
{
"epoch": 47.97,
"learning_rate": 6.089830508474577e-05,
"loss": 0.7399,
"step": 3310
},
{
"epoch": 48.12,
"learning_rate": 6.072881355932204e-05,
"loss": 0.6675,
"step": 3320
},
{
"epoch": 48.26,
"learning_rate": 6.05593220338983e-05,
"loss": 0.6699,
"step": 3330
},
{
"epoch": 48.41,
"learning_rate": 6.0389830508474574e-05,
"loss": 0.7534,
"step": 3340
},
{
"epoch": 48.55,
"learning_rate": 6.022033898305085e-05,
"loss": 0.7837,
"step": 3350
},
{
"epoch": 48.7,
"learning_rate": 6.005084745762713e-05,
"loss": 0.7066,
"step": 3360
},
{
"epoch": 48.84,
"learning_rate": 5.98813559322034e-05,
"loss": 0.7931,
"step": 3370
},
{
"epoch": 48.99,
"learning_rate": 5.971186440677966e-05,
"loss": 0.7575,
"step": 3380
},
{
"epoch": 49.13,
"learning_rate": 5.954237288135593e-05,
"loss": 0.6951,
"step": 3390
},
{
"epoch": 49.28,
"learning_rate": 5.9372881355932206e-05,
"loss": 0.788,
"step": 3400
},
{
"epoch": 49.42,
"learning_rate": 5.920338983050848e-05,
"loss": 0.7727,
"step": 3410
},
{
"epoch": 49.57,
"learning_rate": 5.9033898305084746e-05,
"loss": 0.7102,
"step": 3420
},
{
"epoch": 49.71,
"learning_rate": 5.886440677966102e-05,
"loss": 0.6733,
"step": 3430
},
{
"epoch": 49.86,
"learning_rate": 5.869491525423729e-05,
"loss": 0.6866,
"step": 3440
},
{
"epoch": 50.0,
"learning_rate": 5.8525423728813565e-05,
"loss": 0.7414,
"step": 3450
},
{
"epoch": 50.14,
"learning_rate": 5.835593220338983e-05,
"loss": 0.684,
"step": 3460
},
{
"epoch": 50.29,
"learning_rate": 5.8186440677966105e-05,
"loss": 0.7363,
"step": 3470
},
{
"epoch": 50.43,
"learning_rate": 5.801694915254238e-05,
"loss": 0.7421,
"step": 3480
},
{
"epoch": 50.58,
"learning_rate": 5.784745762711865e-05,
"loss": 0.7038,
"step": 3490
},
{
"epoch": 50.72,
"learning_rate": 5.767796610169491e-05,
"loss": 0.6571,
"step": 3500
},
{
"epoch": 50.72,
"eval_loss": 0.20422525703907013,
"eval_runtime": 570.0136,
"eval_samples_per_second": 5.954,
"eval_steps_per_second": 0.746,
"eval_wer": 0.166761169867449,
"step": 3500
},
{
"epoch": 50.87,
"learning_rate": 5.750847457627119e-05,
"loss": 0.7286,
"step": 3510
},
{
"epoch": 51.01,
"learning_rate": 5.7338983050847464e-05,
"loss": 0.6604,
"step": 3520
},
{
"epoch": 51.16,
"learning_rate": 5.716949152542374e-05,
"loss": 0.7662,
"step": 3530
},
{
"epoch": 51.3,
"learning_rate": 5.6999999999999996e-05,
"loss": 0.7688,
"step": 3540
},
{
"epoch": 51.45,
"learning_rate": 5.683050847457627e-05,
"loss": 0.6903,
"step": 3550
},
{
"epoch": 51.59,
"learning_rate": 5.666101694915254e-05,
"loss": 0.6939,
"step": 3560
},
{
"epoch": 51.74,
"learning_rate": 5.649152542372882e-05,
"loss": 0.729,
"step": 3570
},
{
"epoch": 51.88,
"learning_rate": 5.632203389830508e-05,
"loss": 0.7307,
"step": 3580
},
{
"epoch": 52.03,
"learning_rate": 5.6152542372881355e-05,
"loss": 0.6863,
"step": 3590
},
{
"epoch": 52.17,
"learning_rate": 5.598305084745763e-05,
"loss": 0.6843,
"step": 3600
},
{
"epoch": 52.32,
"learning_rate": 5.58135593220339e-05,
"loss": 0.7298,
"step": 3610
},
{
"epoch": 52.46,
"learning_rate": 5.5644067796610175e-05,
"loss": 0.7545,
"step": 3620
},
{
"epoch": 52.61,
"learning_rate": 5.547457627118644e-05,
"loss": 0.6663,
"step": 3630
},
{
"epoch": 52.75,
"learning_rate": 5.5305084745762714e-05,
"loss": 0.6822,
"step": 3640
},
{
"epoch": 52.9,
"learning_rate": 5.513559322033899e-05,
"loss": 0.806,
"step": 3650
},
{
"epoch": 53.04,
"learning_rate": 5.496610169491526e-05,
"loss": 0.6597,
"step": 3660
},
{
"epoch": 53.19,
"learning_rate": 5.479661016949153e-05,
"loss": 0.7131,
"step": 3670
},
{
"epoch": 53.33,
"learning_rate": 5.46271186440678e-05,
"loss": 0.7221,
"step": 3680
},
{
"epoch": 53.48,
"learning_rate": 5.445762711864407e-05,
"loss": 0.6235,
"step": 3690
},
{
"epoch": 53.62,
"learning_rate": 5.4288135593220346e-05,
"loss": 0.6547,
"step": 3700
},
{
"epoch": 53.77,
"learning_rate": 5.4118644067796606e-05,
"loss": 0.7014,
"step": 3710
},
{
"epoch": 53.91,
"learning_rate": 5.3949152542372886e-05,
"loss": 0.6826,
"step": 3720
},
{
"epoch": 54.06,
"learning_rate": 5.377966101694916e-05,
"loss": 0.664,
"step": 3730
},
{
"epoch": 54.2,
"learning_rate": 5.361016949152543e-05,
"loss": 0.7201,
"step": 3740
},
{
"epoch": 54.35,
"learning_rate": 5.344067796610169e-05,
"loss": 0.6765,
"step": 3750
},
{
"epoch": 54.49,
"learning_rate": 5.3271186440677965e-05,
"loss": 0.6541,
"step": 3760
},
{
"epoch": 54.64,
"learning_rate": 5.310169491525424e-05,
"loss": 0.7299,
"step": 3770
},
{
"epoch": 54.78,
"learning_rate": 5.293220338983051e-05,
"loss": 0.7139,
"step": 3780
},
{
"epoch": 54.93,
"learning_rate": 5.276271186440678e-05,
"loss": 0.6925,
"step": 3790
},
{
"epoch": 55.07,
"learning_rate": 5.259322033898305e-05,
"loss": 0.6712,
"step": 3800
},
{
"epoch": 55.22,
"learning_rate": 5.2423728813559324e-05,
"loss": 0.6999,
"step": 3810
},
{
"epoch": 55.36,
"learning_rate": 5.22542372881356e-05,
"loss": 0.6577,
"step": 3820
},
{
"epoch": 55.51,
"learning_rate": 5.2084745762711864e-05,
"loss": 0.733,
"step": 3830
},
{
"epoch": 55.65,
"learning_rate": 5.191525423728814e-05,
"loss": 0.666,
"step": 3840
},
{
"epoch": 55.8,
"learning_rate": 5.174576271186441e-05,
"loss": 0.6638,
"step": 3850
},
{
"epoch": 55.94,
"learning_rate": 5.157627118644068e-05,
"loss": 0.7019,
"step": 3860
},
{
"epoch": 56.09,
"learning_rate": 5.140677966101695e-05,
"loss": 0.7018,
"step": 3870
},
{
"epoch": 56.23,
"learning_rate": 5.123728813559322e-05,
"loss": 0.6911,
"step": 3880
},
{
"epoch": 56.38,
"learning_rate": 5.1067796610169496e-05,
"loss": 0.7035,
"step": 3890
},
{
"epoch": 56.52,
"learning_rate": 5.089830508474577e-05,
"loss": 0.7204,
"step": 3900
},
{
"epoch": 56.67,
"learning_rate": 5.072881355932204e-05,
"loss": 0.6888,
"step": 3910
},
{
"epoch": 56.81,
"learning_rate": 5.05593220338983e-05,
"loss": 0.7502,
"step": 3920
},
{
"epoch": 56.96,
"learning_rate": 5.0389830508474575e-05,
"loss": 0.6246,
"step": 3930
},
{
"epoch": 57.1,
"learning_rate": 5.0220338983050855e-05,
"loss": 0.6079,
"step": 3940
},
{
"epoch": 57.25,
"learning_rate": 5.005084745762713e-05,
"loss": 0.6618,
"step": 3950
},
{
"epoch": 57.39,
"learning_rate": 4.9881355932203394e-05,
"loss": 0.6935,
"step": 3960
},
{
"epoch": 57.54,
"learning_rate": 4.971186440677966e-05,
"loss": 0.6872,
"step": 3970
},
{
"epoch": 57.68,
"learning_rate": 4.9542372881355934e-05,
"loss": 0.6546,
"step": 3980
},
{
"epoch": 57.83,
"learning_rate": 4.937288135593221e-05,
"loss": 0.7091,
"step": 3990
},
{
"epoch": 57.97,
"learning_rate": 4.920338983050848e-05,
"loss": 0.7014,
"step": 4000
},
{
"epoch": 57.97,
"eval_loss": 0.21360060572624207,
"eval_runtime": 570.979,
"eval_samples_per_second": 5.944,
"eval_steps_per_second": 0.744,
"eval_wer": 0.16485467051253605,
"step": 4000
},
{
"epoch": 58.12,
"learning_rate": 4.9033898305084746e-05,
"loss": 0.6837,
"step": 4010
},
{
"epoch": 58.26,
"learning_rate": 4.886440677966102e-05,
"loss": 0.6707,
"step": 4020
},
{
"epoch": 58.41,
"learning_rate": 4.8694915254237286e-05,
"loss": 0.6715,
"step": 4030
},
{
"epoch": 58.55,
"learning_rate": 4.8525423728813566e-05,
"loss": 0.64,
"step": 4040
},
{
"epoch": 58.7,
"learning_rate": 4.835593220338983e-05,
"loss": 0.6904,
"step": 4050
},
{
"epoch": 58.84,
"learning_rate": 4.8186440677966105e-05,
"loss": 0.6809,
"step": 4060
},
{
"epoch": 58.99,
"learning_rate": 4.801694915254237e-05,
"loss": 0.6187,
"step": 4070
},
{
"epoch": 59.13,
"learning_rate": 4.7847457627118645e-05,
"loss": 0.7028,
"step": 4080
},
{
"epoch": 59.28,
"learning_rate": 4.767796610169492e-05,
"loss": 0.7025,
"step": 4090
},
{
"epoch": 59.42,
"learning_rate": 4.750847457627119e-05,
"loss": 0.6471,
"step": 4100
},
{
"epoch": 59.57,
"learning_rate": 4.733898305084746e-05,
"loss": 0.6968,
"step": 4110
},
{
"epoch": 59.71,
"learning_rate": 4.716949152542373e-05,
"loss": 0.6268,
"step": 4120
},
{
"epoch": 59.86,
"learning_rate": 4.7e-05,
"loss": 0.6731,
"step": 4130
},
{
"epoch": 60.0,
"learning_rate": 4.683050847457627e-05,
"loss": 0.6614,
"step": 4140
},
{
"epoch": 60.14,
"learning_rate": 4.666101694915255e-05,
"loss": 0.6138,
"step": 4150
},
{
"epoch": 60.29,
"learning_rate": 4.649152542372882e-05,
"loss": 0.6875,
"step": 4160
},
{
"epoch": 60.43,
"learning_rate": 4.632203389830509e-05,
"loss": 0.7305,
"step": 4170
},
{
"epoch": 60.58,
"learning_rate": 4.6152542372881356e-05,
"loss": 0.6367,
"step": 4180
},
{
"epoch": 60.72,
"learning_rate": 4.598305084745763e-05,
"loss": 0.7106,
"step": 4190
},
{
"epoch": 60.87,
"learning_rate": 4.58135593220339e-05,
"loss": 0.7531,
"step": 4200
},
{
"epoch": 61.01,
"learning_rate": 4.5644067796610176e-05,
"loss": 0.6545,
"step": 4210
},
{
"epoch": 61.16,
"learning_rate": 4.547457627118644e-05,
"loss": 0.7458,
"step": 4220
},
{
"epoch": 61.3,
"learning_rate": 4.5305084745762715e-05,
"loss": 0.6348,
"step": 4230
},
{
"epoch": 61.45,
"learning_rate": 4.513559322033898e-05,
"loss": 0.6631,
"step": 4240
},
{
"epoch": 61.59,
"learning_rate": 4.4966101694915255e-05,
"loss": 0.8233,
"step": 4250
},
{
"epoch": 61.74,
"learning_rate": 4.479661016949153e-05,
"loss": 0.6475,
"step": 4260
},
{
"epoch": 61.88,
"learning_rate": 4.46271186440678e-05,
"loss": 0.659,
"step": 4270
},
{
"epoch": 62.03,
"learning_rate": 4.445762711864407e-05,
"loss": 0.7019,
"step": 4280
},
{
"epoch": 62.17,
"learning_rate": 4.428813559322034e-05,
"loss": 0.6716,
"step": 4290
},
{
"epoch": 62.32,
"learning_rate": 4.4118644067796614e-05,
"loss": 0.6016,
"step": 4300
},
{
"epoch": 62.46,
"learning_rate": 4.394915254237289e-05,
"loss": 0.6532,
"step": 4310
},
{
"epoch": 62.61,
"learning_rate": 4.377966101694915e-05,
"loss": 0.6426,
"step": 4320
},
{
"epoch": 62.75,
"learning_rate": 4.3610169491525426e-05,
"loss": 0.6543,
"step": 4330
},
{
"epoch": 62.9,
"learning_rate": 4.344067796610169e-05,
"loss": 0.7405,
"step": 4340
},
{
"epoch": 63.04,
"learning_rate": 4.3271186440677966e-05,
"loss": 0.6795,
"step": 4350
},
{
"epoch": 63.19,
"learning_rate": 4.310169491525424e-05,
"loss": 0.726,
"step": 4360
},
{
"epoch": 63.33,
"learning_rate": 4.293220338983051e-05,
"loss": 0.8443,
"step": 4370
},
{
"epoch": 63.48,
"learning_rate": 4.276271186440678e-05,
"loss": 0.7359,
"step": 4380
},
{
"epoch": 63.62,
"learning_rate": 4.259322033898305e-05,
"loss": 0.6781,
"step": 4390
},
{
"epoch": 63.77,
"learning_rate": 4.242372881355932e-05,
"loss": 0.6366,
"step": 4400
},
{
"epoch": 63.91,
"learning_rate": 4.22542372881356e-05,
"loss": 0.6598,
"step": 4410
},
{
"epoch": 64.06,
"learning_rate": 4.208474576271187e-05,
"loss": 0.6668,
"step": 4420
},
{
"epoch": 64.2,
"learning_rate": 4.191525423728814e-05,
"loss": 0.7418,
"step": 4430
},
{
"epoch": 64.35,
"learning_rate": 4.174576271186441e-05,
"loss": 0.6788,
"step": 4440
},
{
"epoch": 64.49,
"learning_rate": 4.157627118644068e-05,
"loss": 0.6549,
"step": 4450
},
{
"epoch": 64.64,
"learning_rate": 4.140677966101695e-05,
"loss": 0.6636,
"step": 4460
},
{
"epoch": 64.78,
"learning_rate": 4.1237288135593223e-05,
"loss": 0.6163,
"step": 4470
},
{
"epoch": 64.93,
"learning_rate": 4.10677966101695e-05,
"loss": 0.7296,
"step": 4480
},
{
"epoch": 65.07,
"learning_rate": 4.089830508474576e-05,
"loss": 0.6633,
"step": 4490
},
{
"epoch": 65.22,
"learning_rate": 4.0728813559322036e-05,
"loss": 0.6171,
"step": 4500
},
{
"epoch": 65.22,
"eval_loss": 0.21388114988803864,
"eval_runtime": 574.1167,
"eval_samples_per_second": 5.912,
"eval_steps_per_second": 0.74,
"eval_wer": 0.16409864490627749,
"step": 4500
},
{
"epoch": 65.36,
"learning_rate": 4.055932203389831e-05,
"loss": 0.647,
"step": 4510
},
{
"epoch": 65.51,
"learning_rate": 4.038983050847458e-05,
"loss": 0.5946,
"step": 4520
},
{
"epoch": 65.65,
"learning_rate": 4.022033898305085e-05,
"loss": 0.6789,
"step": 4530
},
{
"epoch": 65.8,
"learning_rate": 4.005084745762712e-05,
"loss": 0.6854,
"step": 4540
},
{
"epoch": 65.94,
"learning_rate": 3.988135593220339e-05,
"loss": 0.6547,
"step": 4550
},
{
"epoch": 66.09,
"learning_rate": 3.971186440677966e-05,
"loss": 0.7462,
"step": 4560
},
{
"epoch": 66.23,
"learning_rate": 3.9542372881355935e-05,
"loss": 0.6724,
"step": 4570
},
{
"epoch": 66.38,
"learning_rate": 3.937288135593221e-05,
"loss": 0.6623,
"step": 4580
},
{
"epoch": 66.52,
"learning_rate": 3.9203389830508474e-05,
"loss": 0.6438,
"step": 4590
},
{
"epoch": 66.67,
"learning_rate": 3.903389830508475e-05,
"loss": 0.6983,
"step": 4600
},
{
"epoch": 66.81,
"learning_rate": 3.8864406779661014e-05,
"loss": 0.6905,
"step": 4610
},
{
"epoch": 66.96,
"learning_rate": 3.8694915254237294e-05,
"loss": 0.7062,
"step": 4620
},
{
"epoch": 67.1,
"learning_rate": 3.852542372881356e-05,
"loss": 0.6685,
"step": 4630
},
{
"epoch": 67.25,
"learning_rate": 3.835593220338983e-05,
"loss": 0.6545,
"step": 4640
},
{
"epoch": 67.39,
"learning_rate": 3.81864406779661e-05,
"loss": 0.6953,
"step": 4650
},
{
"epoch": 67.54,
"learning_rate": 3.801694915254237e-05,
"loss": 0.6793,
"step": 4660
},
{
"epoch": 67.68,
"learning_rate": 3.7847457627118646e-05,
"loss": 0.6059,
"step": 4670
},
{
"epoch": 67.83,
"learning_rate": 3.767796610169492e-05,
"loss": 0.6555,
"step": 4680
},
{
"epoch": 67.97,
"learning_rate": 3.750847457627119e-05,
"loss": 0.6261,
"step": 4690
},
{
"epoch": 68.12,
"learning_rate": 3.733898305084746e-05,
"loss": 0.7417,
"step": 4700
},
{
"epoch": 68.26,
"learning_rate": 3.716949152542373e-05,
"loss": 0.6732,
"step": 4710
},
{
"epoch": 68.41,
"learning_rate": 3.7e-05,
"loss": 0.7073,
"step": 4720
},
{
"epoch": 68.55,
"learning_rate": 3.683050847457628e-05,
"loss": 0.6299,
"step": 4730
},
{
"epoch": 68.7,
"learning_rate": 3.6661016949152544e-05,
"loss": 0.6789,
"step": 4740
},
{
"epoch": 68.84,
"learning_rate": 3.649152542372882e-05,
"loss": 0.6946,
"step": 4750
},
{
"epoch": 68.99,
"learning_rate": 3.6322033898305084e-05,
"loss": 0.6954,
"step": 4760
},
{
"epoch": 69.13,
"learning_rate": 3.615254237288136e-05,
"loss": 0.6609,
"step": 4770
},
{
"epoch": 69.28,
"learning_rate": 3.598305084745763e-05,
"loss": 0.7685,
"step": 4780
},
{
"epoch": 69.42,
"learning_rate": 3.5813559322033903e-05,
"loss": 0.6556,
"step": 4790
},
{
"epoch": 69.57,
"learning_rate": 3.564406779661017e-05,
"loss": 0.6377,
"step": 4800
},
{
"epoch": 69.71,
"learning_rate": 3.547457627118644e-05,
"loss": 0.667,
"step": 4810
},
{
"epoch": 69.86,
"learning_rate": 3.530508474576271e-05,
"loss": 0.6439,
"step": 4820
},
{
"epoch": 70.0,
"learning_rate": 3.513559322033899e-05,
"loss": 0.6266,
"step": 4830
},
{
"epoch": 70.14,
"learning_rate": 3.4966101694915256e-05,
"loss": 0.6563,
"step": 4840
},
{
"epoch": 70.29,
"learning_rate": 3.479661016949153e-05,
"loss": 0.606,
"step": 4850
},
{
"epoch": 70.43,
"learning_rate": 3.4627118644067795e-05,
"loss": 0.6662,
"step": 4860
},
{
"epoch": 70.58,
"learning_rate": 3.445762711864407e-05,
"loss": 0.6453,
"step": 4870
},
{
"epoch": 70.72,
"learning_rate": 3.428813559322034e-05,
"loss": 0.6812,
"step": 4880
},
{
"epoch": 70.87,
"learning_rate": 3.4118644067796615e-05,
"loss": 0.6655,
"step": 4890
},
{
"epoch": 71.01,
"learning_rate": 3.394915254237288e-05,
"loss": 0.6687,
"step": 4900
},
{
"epoch": 71.16,
"learning_rate": 3.3779661016949154e-05,
"loss": 0.6349,
"step": 4910
},
{
"epoch": 71.3,
"learning_rate": 3.361016949152542e-05,
"loss": 0.5908,
"step": 4920
},
{
"epoch": 71.45,
"learning_rate": 3.3440677966101694e-05,
"loss": 0.6648,
"step": 4930
},
{
"epoch": 71.59,
"learning_rate": 3.327118644067797e-05,
"loss": 0.6964,
"step": 4940
},
{
"epoch": 71.74,
"learning_rate": 3.310169491525424e-05,
"loss": 0.6482,
"step": 4950
},
{
"epoch": 71.88,
"learning_rate": 3.293220338983051e-05,
"loss": 0.7086,
"step": 4960
},
{
"epoch": 72.03,
"learning_rate": 3.276271186440678e-05,
"loss": 0.6146,
"step": 4970
},
{
"epoch": 72.17,
"learning_rate": 3.259322033898305e-05,
"loss": 0.6546,
"step": 4980
},
{
"epoch": 72.32,
"learning_rate": 3.2423728813559326e-05,
"loss": 0.608,
"step": 4990
},
{
"epoch": 72.46,
"learning_rate": 3.22542372881356e-05,
"loss": 0.6609,
"step": 5000
},
{
"epoch": 72.46,
"eval_loss": 0.2143816202878952,
"eval_runtime": 575.4482,
"eval_samples_per_second": 5.898,
"eval_steps_per_second": 0.739,
"eval_wer": 0.16206888050686585,
"step": 5000
},
{
"epoch": 72.61,
"learning_rate": 3.2084745762711865e-05,
"loss": 0.6483,
"step": 5010
},
{
"epoch": 72.75,
"learning_rate": 3.191525423728814e-05,
"loss": 0.5998,
"step": 5020
},
{
"epoch": 72.9,
"learning_rate": 3.1745762711864405e-05,
"loss": 0.6161,
"step": 5030
},
{
"epoch": 73.04,
"learning_rate": 3.157627118644068e-05,
"loss": 0.6856,
"step": 5040
},
{
"epoch": 73.19,
"learning_rate": 3.140677966101695e-05,
"loss": 0.6095,
"step": 5050
},
{
"epoch": 73.33,
"learning_rate": 3.1237288135593224e-05,
"loss": 0.6235,
"step": 5060
},
{
"epoch": 73.48,
"learning_rate": 3.106779661016949e-05,
"loss": 0.6827,
"step": 5070
},
{
"epoch": 73.62,
"learning_rate": 3.0898305084745764e-05,
"loss": 0.5903,
"step": 5080
},
{
"epoch": 73.77,
"learning_rate": 3.072881355932204e-05,
"loss": 0.7406,
"step": 5090
},
{
"epoch": 73.91,
"learning_rate": 3.055932203389831e-05,
"loss": 0.6621,
"step": 5100
},
{
"epoch": 74.06,
"learning_rate": 3.0389830508474577e-05,
"loss": 0.6619,
"step": 5110
},
{
"epoch": 74.2,
"learning_rate": 3.022033898305085e-05,
"loss": 0.6755,
"step": 5120
},
{
"epoch": 74.35,
"learning_rate": 3.005084745762712e-05,
"loss": 0.6568,
"step": 5130
},
{
"epoch": 74.49,
"learning_rate": 2.9881355932203393e-05,
"loss": 0.6244,
"step": 5140
},
{
"epoch": 74.64,
"learning_rate": 2.971186440677966e-05,
"loss": 0.6166,
"step": 5150
},
{
"epoch": 74.78,
"learning_rate": 2.9542372881355936e-05,
"loss": 0.6238,
"step": 5160
},
{
"epoch": 74.93,
"learning_rate": 2.9372881355932202e-05,
"loss": 0.7148,
"step": 5170
},
{
"epoch": 75.07,
"learning_rate": 2.9203389830508475e-05,
"loss": 0.6449,
"step": 5180
},
{
"epoch": 75.22,
"learning_rate": 2.9033898305084745e-05,
"loss": 0.6567,
"step": 5190
},
{
"epoch": 75.36,
"learning_rate": 2.8864406779661018e-05,
"loss": 0.6167,
"step": 5200
},
{
"epoch": 75.51,
"learning_rate": 2.8694915254237288e-05,
"loss": 0.6728,
"step": 5210
},
{
"epoch": 75.65,
"learning_rate": 2.852542372881356e-05,
"loss": 0.6267,
"step": 5220
},
{
"epoch": 75.8,
"learning_rate": 2.8355932203389834e-05,
"loss": 0.6707,
"step": 5230
},
{
"epoch": 75.94,
"learning_rate": 2.8186440677966104e-05,
"loss": 0.6005,
"step": 5240
},
{
"epoch": 76.09,
"learning_rate": 2.8016949152542377e-05,
"loss": 0.5838,
"step": 5250
},
{
"epoch": 76.23,
"learning_rate": 2.7847457627118643e-05,
"loss": 0.6954,
"step": 5260
},
{
"epoch": 76.38,
"learning_rate": 2.767796610169492e-05,
"loss": 0.6658,
"step": 5270
},
{
"epoch": 76.52,
"learning_rate": 2.7508474576271186e-05,
"loss": 0.6252,
"step": 5280
},
{
"epoch": 76.67,
"learning_rate": 2.733898305084746e-05,
"loss": 0.6967,
"step": 5290
},
{
"epoch": 76.81,
"learning_rate": 2.716949152542373e-05,
"loss": 0.6216,
"step": 5300
},
{
"epoch": 76.96,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.6392,
"step": 5310
},
{
"epoch": 77.1,
"learning_rate": 2.6830508474576272e-05,
"loss": 0.6266,
"step": 5320
},
{
"epoch": 77.25,
"learning_rate": 2.6661016949152545e-05,
"loss": 0.5908,
"step": 5330
},
{
"epoch": 77.39,
"learning_rate": 2.6491525423728815e-05,
"loss": 0.6788,
"step": 5340
},
{
"epoch": 77.54,
"learning_rate": 2.6322033898305088e-05,
"loss": 0.6592,
"step": 5350
},
{
"epoch": 77.68,
"learning_rate": 2.6152542372881355e-05,
"loss": 0.6038,
"step": 5360
},
{
"epoch": 77.83,
"learning_rate": 2.598305084745763e-05,
"loss": 0.5884,
"step": 5370
},
{
"epoch": 77.97,
"learning_rate": 2.5813559322033898e-05,
"loss": 0.6433,
"step": 5380
},
{
"epoch": 78.12,
"learning_rate": 2.564406779661017e-05,
"loss": 0.6976,
"step": 5390
},
{
"epoch": 78.26,
"learning_rate": 2.547457627118644e-05,
"loss": 0.6922,
"step": 5400
},
{
"epoch": 78.41,
"learning_rate": 2.5305084745762714e-05,
"loss": 0.6621,
"step": 5410
},
{
"epoch": 78.55,
"learning_rate": 2.5135593220338983e-05,
"loss": 0.6388,
"step": 5420
},
{
"epoch": 78.7,
"learning_rate": 2.4966101694915257e-05,
"loss": 0.628,
"step": 5430
},
{
"epoch": 78.84,
"learning_rate": 2.4796610169491526e-05,
"loss": 0.5507,
"step": 5440
},
{
"epoch": 78.99,
"learning_rate": 2.46271186440678e-05,
"loss": 0.6358,
"step": 5450
},
{
"epoch": 79.13,
"learning_rate": 2.445762711864407e-05,
"loss": 0.8279,
"step": 5460
},
{
"epoch": 79.28,
"learning_rate": 2.428813559322034e-05,
"loss": 0.6108,
"step": 5470
},
{
"epoch": 79.42,
"learning_rate": 2.4118644067796612e-05,
"loss": 0.6535,
"step": 5480
},
{
"epoch": 79.57,
"learning_rate": 2.3949152542372882e-05,
"loss": 0.5962,
"step": 5490
},
{
"epoch": 79.71,
"learning_rate": 2.3779661016949155e-05,
"loss": 0.6318,
"step": 5500
},
{
"epoch": 79.71,
"eval_loss": 0.21294616162776947,
"eval_runtime": 575.7769,
"eval_samples_per_second": 5.895,
"eval_steps_per_second": 0.738,
"eval_wer": 0.16001446309855452,
"step": 5500
},
{
"epoch": 79.86,
"learning_rate": 2.3610169491525425e-05,
"loss": 0.5991,
"step": 5510
},
{
"epoch": 80.0,
"learning_rate": 2.3440677966101695e-05,
"loss": 0.6389,
"step": 5520
},
{
"epoch": 80.14,
"learning_rate": 2.3271186440677968e-05,
"loss": 0.5969,
"step": 5530
},
{
"epoch": 80.29,
"learning_rate": 2.3101694915254237e-05,
"loss": 0.626,
"step": 5540
},
{
"epoch": 80.43,
"learning_rate": 2.2932203389830507e-05,
"loss": 0.6123,
"step": 5550
},
{
"epoch": 80.58,
"learning_rate": 2.276271186440678e-05,
"loss": 0.6056,
"step": 5560
},
{
"epoch": 80.72,
"learning_rate": 2.259322033898305e-05,
"loss": 0.6004,
"step": 5570
},
{
"epoch": 80.87,
"learning_rate": 2.2423728813559323e-05,
"loss": 0.5924,
"step": 5580
},
{
"epoch": 81.01,
"learning_rate": 2.2254237288135593e-05,
"loss": 0.6622,
"step": 5590
},
{
"epoch": 81.16,
"learning_rate": 2.2084745762711863e-05,
"loss": 0.6285,
"step": 5600
},
{
"epoch": 81.3,
"learning_rate": 2.1915254237288136e-05,
"loss": 0.6199,
"step": 5610
},
{
"epoch": 81.45,
"learning_rate": 2.174576271186441e-05,
"loss": 0.6496,
"step": 5620
},
{
"epoch": 81.59,
"learning_rate": 2.157627118644068e-05,
"loss": 0.632,
"step": 5630
},
{
"epoch": 81.74,
"learning_rate": 2.1406779661016952e-05,
"loss": 0.5825,
"step": 5640
},
{
"epoch": 81.88,
"learning_rate": 2.1237288135593222e-05,
"loss": 0.7035,
"step": 5650
},
{
"epoch": 82.03,
"learning_rate": 2.1067796610169495e-05,
"loss": 0.5958,
"step": 5660
},
{
"epoch": 82.17,
"learning_rate": 2.0898305084745765e-05,
"loss": 0.6213,
"step": 5670
},
{
"epoch": 82.32,
"learning_rate": 2.0728813559322035e-05,
"loss": 0.621,
"step": 5680
},
{
"epoch": 82.46,
"learning_rate": 2.0559322033898308e-05,
"loss": 0.5801,
"step": 5690
},
{
"epoch": 82.61,
"learning_rate": 2.0389830508474577e-05,
"loss": 0.6511,
"step": 5700
},
{
"epoch": 82.75,
"learning_rate": 2.0220338983050847e-05,
"loss": 0.6268,
"step": 5710
},
{
"epoch": 82.9,
"learning_rate": 2.005084745762712e-05,
"loss": 0.642,
"step": 5720
},
{
"epoch": 83.04,
"learning_rate": 1.988135593220339e-05,
"loss": 0.6158,
"step": 5730
},
{
"epoch": 83.19,
"learning_rate": 1.9711864406779663e-05,
"loss": 0.6171,
"step": 5740
},
{
"epoch": 83.33,
"learning_rate": 1.9542372881355933e-05,
"loss": 0.5943,
"step": 5750
},
{
"epoch": 83.48,
"learning_rate": 1.9372881355932203e-05,
"loss": 0.6312,
"step": 5760
},
{
"epoch": 83.62,
"learning_rate": 1.9203389830508476e-05,
"loss": 0.6128,
"step": 5770
},
{
"epoch": 83.77,
"learning_rate": 1.9033898305084746e-05,
"loss": 0.6094,
"step": 5780
},
{
"epoch": 83.91,
"learning_rate": 1.886440677966102e-05,
"loss": 0.6403,
"step": 5790
},
{
"epoch": 84.06,
"learning_rate": 1.869491525423729e-05,
"loss": 0.5933,
"step": 5800
},
{
"epoch": 84.2,
"learning_rate": 1.852542372881356e-05,
"loss": 0.5684,
"step": 5810
},
{
"epoch": 84.35,
"learning_rate": 1.835593220338983e-05,
"loss": 0.6025,
"step": 5820
},
{
"epoch": 84.49,
"learning_rate": 1.81864406779661e-05,
"loss": 0.6225,
"step": 5830
},
{
"epoch": 84.64,
"learning_rate": 1.8016949152542374e-05,
"loss": 0.6246,
"step": 5840
},
{
"epoch": 84.78,
"learning_rate": 1.7847457627118644e-05,
"loss": 0.6186,
"step": 5850
},
{
"epoch": 84.93,
"learning_rate": 1.7677966101694914e-05,
"loss": 0.6706,
"step": 5860
},
{
"epoch": 85.07,
"learning_rate": 1.7508474576271187e-05,
"loss": 0.5923,
"step": 5870
},
{
"epoch": 85.22,
"learning_rate": 1.7338983050847457e-05,
"loss": 0.5892,
"step": 5880
},
{
"epoch": 85.36,
"learning_rate": 1.716949152542373e-05,
"loss": 0.6155,
"step": 5890
},
{
"epoch": 85.51,
"learning_rate": 1.7000000000000003e-05,
"loss": 0.545,
"step": 5900
},
{
"epoch": 85.65,
"learning_rate": 1.6830508474576273e-05,
"loss": 0.6541,
"step": 5910
},
{
"epoch": 85.8,
"learning_rate": 1.6661016949152543e-05,
"loss": 0.7079,
"step": 5920
},
{
"epoch": 85.94,
"learning_rate": 1.6491525423728816e-05,
"loss": 0.58,
"step": 5930
},
{
"epoch": 86.09,
"learning_rate": 1.6322033898305086e-05,
"loss": 0.631,
"step": 5940
},
{
"epoch": 86.23,
"learning_rate": 1.615254237288136e-05,
"loss": 0.6232,
"step": 5950
},
{
"epoch": 86.38,
"learning_rate": 1.598305084745763e-05,
"loss": 0.5722,
"step": 5960
},
{
"epoch": 86.52,
"learning_rate": 1.58135593220339e-05,
"loss": 0.6361,
"step": 5970
},
{
"epoch": 86.67,
"learning_rate": 1.564406779661017e-05,
"loss": 0.6603,
"step": 5980
},
{
"epoch": 86.81,
"learning_rate": 1.547457627118644e-05,
"loss": 0.6191,
"step": 5990
},
{
"epoch": 86.96,
"learning_rate": 1.5305084745762714e-05,
"loss": 0.6222,
"step": 6000
},
{
"epoch": 86.96,
"eval_loss": 0.2124408483505249,
"eval_runtime": 572.9918,
"eval_samples_per_second": 5.923,
"eval_steps_per_second": 0.742,
"eval_wer": 0.158223011118507,
"step": 6000
},
{
"epoch": 87.1,
"learning_rate": 6.088281250000001e-05,
"loss": 0.595,
"step": 6010
},
{
"epoch": 87.25,
"learning_rate": 6.08046875e-05,
"loss": 0.6517,
"step": 6020
},
{
"epoch": 87.39,
"learning_rate": 6.07265625e-05,
"loss": 0.6079,
"step": 6030
},
{
"epoch": 87.54,
"learning_rate": 6.06484375e-05,
"loss": 0.6203,
"step": 6040
},
{
"epoch": 87.68,
"learning_rate": 6.05703125e-05,
"loss": 0.6005,
"step": 6050
},
{
"epoch": 87.83,
"learning_rate": 6.0492187500000006e-05,
"loss": 0.6321,
"step": 6060
},
{
"epoch": 87.97,
"learning_rate": 6.04140625e-05,
"loss": 0.6156,
"step": 6070
},
{
"epoch": 88.12,
"learning_rate": 6.0335937500000005e-05,
"loss": 0.6329,
"step": 6080
},
{
"epoch": 88.26,
"learning_rate": 6.02578125e-05,
"loss": 0.6311,
"step": 6090
},
{
"epoch": 88.41,
"learning_rate": 6.0179687500000005e-05,
"loss": 0.6689,
"step": 6100
},
{
"epoch": 88.55,
"learning_rate": 6.010156250000001e-05,
"loss": 0.5824,
"step": 6110
},
{
"epoch": 88.7,
"learning_rate": 6.0023437500000005e-05,
"loss": 0.6491,
"step": 6120
},
{
"epoch": 88.84,
"learning_rate": 5.994531250000001e-05,
"loss": 0.6326,
"step": 6130
},
{
"epoch": 88.99,
"learning_rate": 5.98671875e-05,
"loss": 0.6272,
"step": 6140
},
{
"epoch": 89.13,
"learning_rate": 5.97890625e-05,
"loss": 0.6865,
"step": 6150
},
{
"epoch": 89.28,
"learning_rate": 5.971093750000001e-05,
"loss": 0.6149,
"step": 6160
},
{
"epoch": 89.42,
"learning_rate": 5.96328125e-05,
"loss": 0.6385,
"step": 6170
},
{
"epoch": 89.57,
"learning_rate": 5.9554687500000003e-05,
"loss": 0.6621,
"step": 6180
},
{
"epoch": 89.71,
"learning_rate": 5.94765625e-05,
"loss": 0.5817,
"step": 6190
},
{
"epoch": 89.86,
"learning_rate": 5.93984375e-05,
"loss": 0.6333,
"step": 6200
},
{
"epoch": 90.0,
"learning_rate": 5.9320312500000006e-05,
"loss": 0.5794,
"step": 6210
},
{
"epoch": 90.14,
"learning_rate": 5.92421875e-05,
"loss": 0.6153,
"step": 6220
},
{
"epoch": 90.29,
"learning_rate": 5.9164062500000006e-05,
"loss": 0.6794,
"step": 6230
},
{
"epoch": 90.43,
"learning_rate": 5.90859375e-05,
"loss": 0.601,
"step": 6240
},
{
"epoch": 90.58,
"learning_rate": 5.9007812500000005e-05,
"loss": 0.6176,
"step": 6250
},
{
"epoch": 90.72,
"learning_rate": 5.892968750000001e-05,
"loss": 0.6055,
"step": 6260
},
{
"epoch": 90.87,
"learning_rate": 5.88515625e-05,
"loss": 0.6389,
"step": 6270
},
{
"epoch": 91.01,
"learning_rate": 5.877343750000001e-05,
"loss": 0.6727,
"step": 6280
},
{
"epoch": 91.16,
"learning_rate": 5.86953125e-05,
"loss": 0.6592,
"step": 6290
},
{
"epoch": 91.3,
"learning_rate": 5.86171875e-05,
"loss": 0.6367,
"step": 6300
},
{
"epoch": 91.45,
"learning_rate": 5.853906250000001e-05,
"loss": 0.6366,
"step": 6310
},
{
"epoch": 91.59,
"learning_rate": 5.84609375e-05,
"loss": 0.6171,
"step": 6320
},
{
"epoch": 91.74,
"learning_rate": 5.8382812500000004e-05,
"loss": 0.6642,
"step": 6330
},
{
"epoch": 91.88,
"learning_rate": 5.83046875e-05,
"loss": 0.6345,
"step": 6340
},
{
"epoch": 92.03,
"learning_rate": 5.82265625e-05,
"loss": 0.6265,
"step": 6350
},
{
"epoch": 92.17,
"learning_rate": 5.8148437500000006e-05,
"loss": 0.6302,
"step": 6360
},
{
"epoch": 92.32,
"learning_rate": 5.80703125e-05,
"loss": 0.6347,
"step": 6370
},
{
"epoch": 92.46,
"learning_rate": 5.7992187500000006e-05,
"loss": 0.6858,
"step": 6380
},
{
"epoch": 92.61,
"learning_rate": 5.79140625e-05,
"loss": 0.5876,
"step": 6390
},
{
"epoch": 92.75,
"learning_rate": 5.7835937500000006e-05,
"loss": 0.6195,
"step": 6400
},
{
"epoch": 92.9,
"learning_rate": 5.775781250000001e-05,
"loss": 0.6559,
"step": 6410
},
{
"epoch": 93.04,
"learning_rate": 5.76796875e-05,
"loss": 0.6908,
"step": 6420
},
{
"epoch": 93.19,
"learning_rate": 5.760156250000001e-05,
"loss": 0.5887,
"step": 6430
},
{
"epoch": 93.33,
"learning_rate": 5.75234375e-05,
"loss": 0.6118,
"step": 6440
},
{
"epoch": 93.48,
"learning_rate": 5.74453125e-05,
"loss": 0.7352,
"step": 6450
},
{
"epoch": 93.62,
"learning_rate": 5.736718750000001e-05,
"loss": 0.6462,
"step": 6460
},
{
"epoch": 93.77,
"learning_rate": 5.72890625e-05,
"loss": 0.6448,
"step": 6470
},
{
"epoch": 93.91,
"learning_rate": 5.7210937500000004e-05,
"loss": 0.6806,
"step": 6480
},
{
"epoch": 94.06,
"learning_rate": 5.71328125e-05,
"loss": 0.6465,
"step": 6490
},
{
"epoch": 94.2,
"learning_rate": 5.7062500000000005e-05,
"loss": 0.608,
"step": 6500
},
{
"epoch": 94.2,
"eval_loss": 0.22552849352359772,
"eval_runtime": 596.3798,
"eval_samples_per_second": 5.691,
"eval_steps_per_second": 0.713,
"eval_wer": 0.1638849854958131,
"step": 6500
},
{
"epoch": 94.35,
"learning_rate": 5.6984375e-05,
"loss": 0.6931,
"step": 6510
},
{
"epoch": 94.49,
"learning_rate": 5.6906250000000004e-05,
"loss": 0.6172,
"step": 6520
},
{
"epoch": 94.64,
"learning_rate": 5.6828125e-05,
"loss": 0.686,
"step": 6530
},
{
"epoch": 94.78,
"learning_rate": 5.6750000000000004e-05,
"loss": 0.6993,
"step": 6540
},
{
"epoch": 94.93,
"learning_rate": 5.667187500000001e-05,
"loss": 0.6582,
"step": 6550
},
{
"epoch": 95.07,
"learning_rate": 5.6593750000000003e-05,
"loss": 0.6366,
"step": 6560
},
{
"epoch": 95.22,
"learning_rate": 5.6515625000000007e-05,
"loss": 0.6609,
"step": 6570
},
{
"epoch": 95.36,
"learning_rate": 5.6437499999999996e-05,
"loss": 0.5802,
"step": 6580
},
{
"epoch": 95.51,
"learning_rate": 5.6359375000000006e-05,
"loss": 0.5748,
"step": 6590
},
{
"epoch": 95.65,
"learning_rate": 5.628125000000001e-05,
"loss": 0.6195,
"step": 6600
},
{
"epoch": 95.8,
"learning_rate": 5.6203125e-05,
"loss": 0.6524,
"step": 6610
},
{
"epoch": 95.94,
"learning_rate": 5.6125e-05,
"loss": 0.6621,
"step": 6620
},
{
"epoch": 96.09,
"learning_rate": 5.6046875e-05,
"loss": 0.6074,
"step": 6630
},
{
"epoch": 96.23,
"learning_rate": 5.596875e-05,
"loss": 0.598,
"step": 6640
},
{
"epoch": 96.38,
"learning_rate": 5.5890625000000005e-05,
"loss": 0.6296,
"step": 6650
},
{
"epoch": 96.52,
"learning_rate": 5.58125e-05,
"loss": 0.6353,
"step": 6660
},
{
"epoch": 96.67,
"learning_rate": 5.5734375000000005e-05,
"loss": 0.6013,
"step": 6670
},
{
"epoch": 96.81,
"learning_rate": 5.565625e-05,
"loss": 0.606,
"step": 6680
},
{
"epoch": 96.96,
"learning_rate": 5.5578125000000004e-05,
"loss": 0.742,
"step": 6690
},
{
"epoch": 97.1,
"learning_rate": 5.550000000000001e-05,
"loss": 0.5763,
"step": 6700
},
{
"epoch": 97.25,
"learning_rate": 5.5421875000000004e-05,
"loss": 0.6839,
"step": 6710
},
{
"epoch": 97.39,
"learning_rate": 5.534375000000001e-05,
"loss": 0.6565,
"step": 6720
},
{
"epoch": 97.54,
"learning_rate": 5.5265624999999997e-05,
"loss": 0.6341,
"step": 6730
},
{
"epoch": 97.68,
"learning_rate": 5.51875e-05,
"loss": 0.6721,
"step": 6740
},
{
"epoch": 97.83,
"learning_rate": 5.510937500000001e-05,
"loss": 0.6134,
"step": 6750
},
{
"epoch": 97.97,
"learning_rate": 5.503125e-05,
"loss": 0.6161,
"step": 6760
},
{
"epoch": 98.12,
"learning_rate": 5.4953125e-05,
"loss": 0.6273,
"step": 6770
},
{
"epoch": 98.26,
"learning_rate": 5.4875e-05,
"loss": 0.5778,
"step": 6780
},
{
"epoch": 98.41,
"learning_rate": 5.4796875e-05,
"loss": 0.6879,
"step": 6790
},
{
"epoch": 98.55,
"learning_rate": 5.4718750000000005e-05,
"loss": 0.6652,
"step": 6800
},
{
"epoch": 98.7,
"learning_rate": 5.4640625e-05,
"loss": 0.6894,
"step": 6810
},
{
"epoch": 98.84,
"learning_rate": 5.4562500000000005e-05,
"loss": 0.5951,
"step": 6820
},
{
"epoch": 98.99,
"learning_rate": 5.4484375e-05,
"loss": 0.6397,
"step": 6830
},
{
"epoch": 99.13,
"learning_rate": 5.4406250000000004e-05,
"loss": 0.6221,
"step": 6840
},
{
"epoch": 99.28,
"learning_rate": 5.432812500000001e-05,
"loss": 0.7,
"step": 6850
},
{
"epoch": 99.42,
"learning_rate": 5.4250000000000004e-05,
"loss": 0.6638,
"step": 6860
},
{
"epoch": 99.57,
"learning_rate": 5.417187500000001e-05,
"loss": 0.6702,
"step": 6870
},
{
"epoch": 99.71,
"learning_rate": 5.409375e-05,
"loss": 0.637,
"step": 6880
},
{
"epoch": 99.86,
"learning_rate": 5.4015625e-05,
"loss": 0.6518,
"step": 6890
},
{
"epoch": 100.0,
"learning_rate": 5.393750000000001e-05,
"loss": 0.5912,
"step": 6900
},
{
"epoch": 100.14,
"learning_rate": 5.3859375e-05,
"loss": 0.6999,
"step": 6910
},
{
"epoch": 100.29,
"learning_rate": 5.378125e-05,
"loss": 0.5682,
"step": 6920
},
{
"epoch": 100.43,
"learning_rate": 5.3703125e-05,
"loss": 0.6023,
"step": 6930
},
{
"epoch": 100.58,
"learning_rate": 5.3625e-05,
"loss": 0.6437,
"step": 6940
},
{
"epoch": 100.72,
"learning_rate": 5.3546875000000006e-05,
"loss": 0.7168,
"step": 6950
},
{
"epoch": 100.87,
"learning_rate": 5.346875e-05,
"loss": 0.5913,
"step": 6960
},
{
"epoch": 101.01,
"learning_rate": 5.3390625000000005e-05,
"loss": 0.6047,
"step": 6970
},
{
"epoch": 101.16,
"learning_rate": 5.33125e-05,
"loss": 0.6355,
"step": 6980
},
{
"epoch": 101.3,
"learning_rate": 5.3234375000000005e-05,
"loss": 0.7046,
"step": 6990
},
{
"epoch": 101.45,
"learning_rate": 5.315625000000001e-05,
"loss": 0.6099,
"step": 7000
},
{
"epoch": 101.45,
"eval_loss": 0.22652995586395264,
"eval_runtime": 583.6707,
"eval_samples_per_second": 5.815,
"eval_steps_per_second": 0.728,
"eval_wer": 0.1621839278817313,
"step": 7000
},
{
"epoch": 101.59,
"learning_rate": 5.3078125e-05,
"loss": 0.5706,
"step": 7010
},
{
"epoch": 101.74,
"learning_rate": 5.300000000000001e-05,
"loss": 0.5656,
"step": 7020
},
{
"epoch": 101.88,
"learning_rate": 5.2921875e-05,
"loss": 0.6755,
"step": 7030
},
{
"epoch": 102.03,
"learning_rate": 5.284375e-05,
"loss": 0.6343,
"step": 7040
},
{
"epoch": 102.17,
"learning_rate": 5.276562500000001e-05,
"loss": 0.6129,
"step": 7050
},
{
"epoch": 102.32,
"learning_rate": 5.26875e-05,
"loss": 0.6321,
"step": 7060
},
{
"epoch": 102.46,
"learning_rate": 5.2609375e-05,
"loss": 0.6189,
"step": 7070
},
{
"epoch": 102.61,
"learning_rate": 5.253125e-05,
"loss": 0.6267,
"step": 7080
},
{
"epoch": 102.75,
"learning_rate": 5.2453125e-05,
"loss": 0.601,
"step": 7090
},
{
"epoch": 102.9,
"learning_rate": 5.2375000000000006e-05,
"loss": 0.6051,
"step": 7100
},
{
"epoch": 103.04,
"learning_rate": 5.2296875e-05,
"loss": 0.538,
"step": 7110
},
{
"epoch": 103.19,
"learning_rate": 5.2218750000000006e-05,
"loss": 0.6136,
"step": 7120
},
{
"epoch": 103.33,
"learning_rate": 5.2140624999999995e-05,
"loss": 0.625,
"step": 7130
},
{
"epoch": 103.48,
"learning_rate": 5.2062500000000005e-05,
"loss": 0.5999,
"step": 7140
},
{
"epoch": 103.62,
"learning_rate": 5.198437500000001e-05,
"loss": 0.694,
"step": 7150
},
{
"epoch": 103.77,
"learning_rate": 5.190625e-05,
"loss": 0.6446,
"step": 7160
},
{
"epoch": 103.91,
"learning_rate": 5.182812500000001e-05,
"loss": 0.5879,
"step": 7170
},
{
"epoch": 104.06,
"learning_rate": 5.175e-05,
"loss": 0.6115,
"step": 7180
},
{
"epoch": 104.2,
"learning_rate": 5.1671875e-05,
"loss": 0.5663,
"step": 7190
},
{
"epoch": 104.35,
"learning_rate": 5.159375000000001e-05,
"loss": 0.5825,
"step": 7200
},
{
"epoch": 104.49,
"learning_rate": 5.1515625e-05,
"loss": 0.5692,
"step": 7210
},
{
"epoch": 104.64,
"learning_rate": 5.1437500000000003e-05,
"loss": 0.614,
"step": 7220
},
{
"epoch": 104.78,
"learning_rate": 5.1359375e-05,
"loss": 0.6005,
"step": 7230
},
{
"epoch": 104.93,
"learning_rate": 5.128125e-05,
"loss": 0.6169,
"step": 7240
},
{
"epoch": 105.07,
"learning_rate": 5.1203125000000006e-05,
"loss": 0.6383,
"step": 7250
},
{
"epoch": 105.22,
"learning_rate": 5.1125e-05,
"loss": 0.5705,
"step": 7260
},
{
"epoch": 105.36,
"learning_rate": 5.1046875000000006e-05,
"loss": 0.6212,
"step": 7270
},
{
"epoch": 105.51,
"learning_rate": 5.0968749999999995e-05,
"loss": 0.5747,
"step": 7280
},
{
"epoch": 105.65,
"learning_rate": 5.0890625000000005e-05,
"loss": 0.6407,
"step": 7290
},
{
"epoch": 105.8,
"learning_rate": 5.081250000000001e-05,
"loss": 0.5993,
"step": 7300
},
{
"epoch": 105.94,
"learning_rate": 5.0734375e-05,
"loss": 0.6226,
"step": 7310
},
{
"epoch": 106.09,
"learning_rate": 5.065625000000001e-05,
"loss": 0.6316,
"step": 7320
},
{
"epoch": 106.23,
"learning_rate": 5.0578125e-05,
"loss": 0.6922,
"step": 7330
},
{
"epoch": 106.38,
"learning_rate": 5.05e-05,
"loss": 0.6252,
"step": 7340
},
{
"epoch": 106.52,
"learning_rate": 5.0421875000000004e-05,
"loss": 0.5841,
"step": 7350
},
{
"epoch": 106.67,
"learning_rate": 5.034375e-05,
"loss": 0.6174,
"step": 7360
},
{
"epoch": 106.81,
"learning_rate": 5.0265625000000004e-05,
"loss": 0.6482,
"step": 7370
},
{
"epoch": 106.96,
"learning_rate": 5.01875e-05,
"loss": 0.5965,
"step": 7380
},
{
"epoch": 107.1,
"learning_rate": 5.0109375e-05,
"loss": 0.6417,
"step": 7390
},
{
"epoch": 107.25,
"learning_rate": 5.0031250000000007e-05,
"loss": 0.6373,
"step": 7400
},
{
"epoch": 107.39,
"learning_rate": 4.9953125e-05,
"loss": 0.6205,
"step": 7410
},
{
"epoch": 107.54,
"learning_rate": 4.9875000000000006e-05,
"loss": 0.5891,
"step": 7420
},
{
"epoch": 107.68,
"learning_rate": 4.9796875e-05,
"loss": 0.6333,
"step": 7430
},
{
"epoch": 107.83,
"learning_rate": 4.9718750000000006e-05,
"loss": 0.6479,
"step": 7440
},
{
"epoch": 107.97,
"learning_rate": 4.9640625e-05,
"loss": 0.5854,
"step": 7450
},
{
"epoch": 108.12,
"learning_rate": 4.95625e-05,
"loss": 0.602,
"step": 7460
},
{
"epoch": 108.26,
"learning_rate": 4.9484375e-05,
"loss": 0.6362,
"step": 7470
},
{
"epoch": 108.41,
"learning_rate": 4.9406250000000005e-05,
"loss": 0.7472,
"step": 7480
},
{
"epoch": 108.55,
"learning_rate": 4.9328125e-05,
"loss": 0.6158,
"step": 7490
},
{
"epoch": 108.7,
"learning_rate": 4.9250000000000004e-05,
"loss": 0.6069,
"step": 7500
},
{
"epoch": 108.7,
"eval_loss": 0.22459882497787476,
"eval_runtime": 574.1645,
"eval_samples_per_second": 5.911,
"eval_steps_per_second": 0.74,
"eval_wer": 0.1592502198226627,
"step": 7500
},
{
"epoch": 108.84,
"learning_rate": 4.9171875e-05,
"loss": 0.7501,
"step": 7510
},
{
"epoch": 108.99,
"learning_rate": 4.9093750000000004e-05,
"loss": 0.585,
"step": 7520
},
{
"epoch": 109.13,
"learning_rate": 4.901562500000001e-05,
"loss": 0.6811,
"step": 7530
},
{
"epoch": 109.28,
"learning_rate": 4.8937500000000004e-05,
"loss": 0.7343,
"step": 7540
},
{
"epoch": 109.42,
"learning_rate": 4.8859375e-05,
"loss": 0.6275,
"step": 7550
},
{
"epoch": 109.57,
"learning_rate": 4.878125e-05,
"loss": 0.6326,
"step": 7560
},
{
"epoch": 109.71,
"learning_rate": 4.8703125000000006e-05,
"loss": 0.6374,
"step": 7570
},
{
"epoch": 109.86,
"learning_rate": 4.8625e-05,
"loss": 0.6079,
"step": 7580
},
{
"epoch": 110.0,
"learning_rate": 4.8546875000000006e-05,
"loss": 0.5572,
"step": 7590
},
{
"epoch": 110.14,
"learning_rate": 4.846875e-05,
"loss": 0.6443,
"step": 7600
},
{
"epoch": 110.29,
"learning_rate": 4.8390625e-05,
"loss": 0.5898,
"step": 7610
},
{
"epoch": 110.43,
"learning_rate": 4.83125e-05,
"loss": 0.6258,
"step": 7620
},
{
"epoch": 110.58,
"learning_rate": 4.8234375000000005e-05,
"loss": 0.6048,
"step": 7630
},
{
"epoch": 110.72,
"learning_rate": 4.815625e-05,
"loss": 0.6092,
"step": 7640
},
{
"epoch": 110.87,
"learning_rate": 4.8078125000000005e-05,
"loss": 0.5939,
"step": 7650
},
{
"epoch": 111.01,
"learning_rate": 4.8e-05,
"loss": 0.6584,
"step": 7660
},
{
"epoch": 111.16,
"learning_rate": 4.7921875000000004e-05,
"loss": 0.6788,
"step": 7670
},
{
"epoch": 111.3,
"learning_rate": 4.784375e-05,
"loss": 0.6859,
"step": 7680
},
{
"epoch": 111.45,
"learning_rate": 4.7765625000000004e-05,
"loss": 0.6178,
"step": 7690
},
{
"epoch": 111.59,
"learning_rate": 4.76875e-05,
"loss": 0.5804,
"step": 7700
},
{
"epoch": 111.74,
"learning_rate": 4.7609375000000004e-05,
"loss": 0.693,
"step": 7710
},
{
"epoch": 111.88,
"learning_rate": 4.753125000000001e-05,
"loss": 0.6894,
"step": 7720
},
{
"epoch": 112.03,
"learning_rate": 4.7453125e-05,
"loss": 0.5552,
"step": 7730
},
{
"epoch": 112.17,
"learning_rate": 4.7375e-05,
"loss": 0.5774,
"step": 7740
},
{
"epoch": 112.32,
"learning_rate": 4.7296875e-05,
"loss": 0.5482,
"step": 7750
},
{
"epoch": 112.46,
"learning_rate": 4.721875e-05,
"loss": 0.6571,
"step": 7760
},
{
"epoch": 112.61,
"learning_rate": 4.7140625e-05,
"loss": 0.6455,
"step": 7770
},
{
"epoch": 112.75,
"learning_rate": 4.7062500000000006e-05,
"loss": 0.6115,
"step": 7780
},
{
"epoch": 112.9,
"learning_rate": 4.6984375e-05,
"loss": 0.6006,
"step": 7790
},
{
"epoch": 113.04,
"learning_rate": 4.690625e-05,
"loss": 0.5906,
"step": 7800
},
{
"epoch": 113.19,
"learning_rate": 4.6828125e-05,
"loss": 0.5477,
"step": 7810
},
{
"epoch": 113.33,
"learning_rate": 4.6750000000000005e-05,
"loss": 0.6492,
"step": 7820
},
{
"epoch": 113.48,
"learning_rate": 4.6671875e-05,
"loss": 0.5919,
"step": 7830
},
{
"epoch": 113.62,
"learning_rate": 4.6593750000000004e-05,
"loss": 0.5931,
"step": 7840
},
{
"epoch": 113.77,
"learning_rate": 4.6515625e-05,
"loss": 0.6467,
"step": 7850
},
{
"epoch": 113.91,
"learning_rate": 4.64375e-05,
"loss": 0.5646,
"step": 7860
},
{
"epoch": 114.06,
"learning_rate": 4.635937500000001e-05,
"loss": 0.6137,
"step": 7870
},
{
"epoch": 114.2,
"learning_rate": 4.6281250000000003e-05,
"loss": 0.5523,
"step": 7880
},
{
"epoch": 114.35,
"learning_rate": 4.6203125e-05,
"loss": 0.6965,
"step": 7890
},
{
"epoch": 114.49,
"learning_rate": 4.6125e-05,
"loss": 0.542,
"step": 7900
},
{
"epoch": 114.64,
"learning_rate": 4.6046875e-05,
"loss": 0.5662,
"step": 7910
},
{
"epoch": 114.78,
"learning_rate": 4.596875e-05,
"loss": 0.5677,
"step": 7920
},
{
"epoch": 114.93,
"learning_rate": 4.5890625000000006e-05,
"loss": 0.5547,
"step": 7930
},
{
"epoch": 115.07,
"learning_rate": 4.58125e-05,
"loss": 0.6085,
"step": 7940
},
{
"epoch": 115.22,
"learning_rate": 4.5734375e-05,
"loss": 0.5735,
"step": 7950
},
{
"epoch": 115.36,
"learning_rate": 4.565625e-05,
"loss": 0.5977,
"step": 7960
},
{
"epoch": 115.51,
"learning_rate": 4.5578125000000005e-05,
"loss": 0.6654,
"step": 7970
},
{
"epoch": 115.65,
"learning_rate": 4.55e-05,
"loss": 0.6036,
"step": 7980
},
{
"epoch": 115.8,
"learning_rate": 4.5421875000000005e-05,
"loss": 0.6112,
"step": 7990
},
{
"epoch": 115.94,
"learning_rate": 4.534375e-05,
"loss": 0.5929,
"step": 8000
},
{
"epoch": 115.94,
"eval_loss": 0.2322680950164795,
"eval_runtime": 572.4419,
"eval_samples_per_second": 5.929,
"eval_steps_per_second": 0.742,
"eval_wer": 0.16172373838226956,
"step": 8000
},
{
"epoch": 116.09,
"learning_rate": 4.5265625e-05,
"loss": 0.5724,
"step": 8010
},
{
"epoch": 116.23,
"learning_rate": 4.518750000000001e-05,
"loss": 0.589,
"step": 8020
},
{
"epoch": 116.38,
"learning_rate": 4.5109375000000004e-05,
"loss": 0.607,
"step": 8030
},
{
"epoch": 116.52,
"learning_rate": 4.503125e-05,
"loss": 0.6171,
"step": 8040
},
{
"epoch": 116.67,
"learning_rate": 4.4953125000000003e-05,
"loss": 0.6011,
"step": 8050
},
{
"epoch": 116.81,
"learning_rate": 4.4875e-05,
"loss": 0.6247,
"step": 8060
},
{
"epoch": 116.96,
"learning_rate": 4.4796875e-05,
"loss": 0.566,
"step": 8070
},
{
"epoch": 117.1,
"learning_rate": 4.4718750000000006e-05,
"loss": 0.6967,
"step": 8080
},
{
"epoch": 117.25,
"learning_rate": 4.4640625e-05,
"loss": 0.6296,
"step": 8090
},
{
"epoch": 117.39,
"learning_rate": 4.45625e-05,
"loss": 0.6186,
"step": 8100
},
{
"epoch": 117.54,
"learning_rate": 4.4484375e-05,
"loss": 0.5811,
"step": 8110
},
{
"epoch": 117.68,
"learning_rate": 4.4406250000000005e-05,
"loss": 0.5486,
"step": 8120
},
{
"epoch": 117.83,
"learning_rate": 4.4328125e-05,
"loss": 0.5805,
"step": 8130
},
{
"epoch": 117.97,
"learning_rate": 4.4250000000000005e-05,
"loss": 0.572,
"step": 8140
},
{
"epoch": 118.12,
"learning_rate": 4.4171875e-05,
"loss": 0.6108,
"step": 8150
},
{
"epoch": 118.26,
"learning_rate": 4.409375e-05,
"loss": 0.666,
"step": 8160
},
{
"epoch": 118.41,
"learning_rate": 4.401562500000001e-05,
"loss": 0.5758,
"step": 8170
},
{
"epoch": 118.55,
"learning_rate": 4.3937500000000004e-05,
"loss": 0.6212,
"step": 8180
},
{
"epoch": 118.7,
"learning_rate": 4.3859375e-05,
"loss": 0.6679,
"step": 8190
},
{
"epoch": 118.84,
"learning_rate": 4.3781250000000004e-05,
"loss": 0.6261,
"step": 8200
},
{
"epoch": 118.99,
"learning_rate": 4.3703125e-05,
"loss": 0.6025,
"step": 8210
},
{
"epoch": 119.13,
"learning_rate": 4.3625e-05,
"loss": 0.6722,
"step": 8220
},
{
"epoch": 119.28,
"learning_rate": 4.3546875000000006e-05,
"loss": 0.5637,
"step": 8230
},
{
"epoch": 119.42,
"learning_rate": 4.346875e-05,
"loss": 0.6138,
"step": 8240
},
{
"epoch": 119.57,
"learning_rate": 4.3390625e-05,
"loss": 0.6397,
"step": 8250
},
{
"epoch": 119.71,
"learning_rate": 4.33125e-05,
"loss": 0.5852,
"step": 8260
},
{
"epoch": 119.86,
"learning_rate": 4.3234375000000006e-05,
"loss": 0.616,
"step": 8270
},
{
"epoch": 120.0,
"learning_rate": 4.315625e-05,
"loss": 0.5824,
"step": 8280
},
{
"epoch": 120.14,
"learning_rate": 4.3078125000000005e-05,
"loss": 0.5909,
"step": 8290
},
{
"epoch": 120.29,
"learning_rate": 4.3e-05,
"loss": 0.6523,
"step": 8300
},
{
"epoch": 120.43,
"learning_rate": 4.2921875e-05,
"loss": 0.6134,
"step": 8310
},
{
"epoch": 120.58,
"learning_rate": 4.284375000000001e-05,
"loss": 0.5903,
"step": 8320
},
{
"epoch": 120.72,
"learning_rate": 4.2765625000000004e-05,
"loss": 0.5471,
"step": 8330
},
{
"epoch": 120.87,
"learning_rate": 4.26875e-05,
"loss": 0.62,
"step": 8340
},
{
"epoch": 121.01,
"learning_rate": 4.2609375000000004e-05,
"loss": 0.6299,
"step": 8350
},
{
"epoch": 121.16,
"learning_rate": 4.253125e-05,
"loss": 0.5991,
"step": 8360
},
{
"epoch": 121.3,
"learning_rate": 4.2453125000000004e-05,
"loss": 0.597,
"step": 8370
},
{
"epoch": 121.45,
"learning_rate": 4.237500000000001e-05,
"loss": 0.5925,
"step": 8380
},
{
"epoch": 121.59,
"learning_rate": 4.2296875e-05,
"loss": 0.6336,
"step": 8390
},
{
"epoch": 121.74,
"learning_rate": 4.221875e-05,
"loss": 0.5826,
"step": 8400
},
{
"epoch": 121.88,
"learning_rate": 4.2140625e-05,
"loss": 0.5964,
"step": 8410
},
{
"epoch": 122.03,
"learning_rate": 4.2062500000000006e-05,
"loss": 0.5978,
"step": 8420
},
{
"epoch": 122.17,
"learning_rate": 4.1984375e-05,
"loss": 0.6456,
"step": 8430
},
{
"epoch": 122.32,
"learning_rate": 4.1906250000000006e-05,
"loss": 0.6088,
"step": 8440
},
{
"epoch": 122.46,
"learning_rate": 4.1828125e-05,
"loss": 0.6204,
"step": 8450
},
{
"epoch": 122.61,
"learning_rate": 4.175e-05,
"loss": 0.5545,
"step": 8460
},
{
"epoch": 122.75,
"learning_rate": 4.1671875e-05,
"loss": 0.5823,
"step": 8470
},
{
"epoch": 122.9,
"learning_rate": 4.1593750000000005e-05,
"loss": 0.5386,
"step": 8480
},
{
"epoch": 123.04,
"learning_rate": 4.1515625e-05,
"loss": 0.5619,
"step": 8490
},
{
"epoch": 123.19,
"learning_rate": 4.1437500000000004e-05,
"loss": 0.6218,
"step": 8500
},
{
"epoch": 123.19,
"eval_loss": 0.22871814668178558,
"eval_runtime": 573.0196,
"eval_samples_per_second": 5.923,
"eval_steps_per_second": 0.742,
"eval_wer": 0.1565712595222247,
"step": 8500
},
{
"epoch": 123.33,
"learning_rate": 4.1359375e-05,
"loss": 0.6351,
"step": 8510
},
{
"epoch": 123.48,
"learning_rate": 4.1281250000000004e-05,
"loss": 0.5678,
"step": 8520
},
{
"epoch": 123.62,
"learning_rate": 4.1203125e-05,
"loss": 0.7051,
"step": 8530
},
{
"epoch": 123.77,
"learning_rate": 4.1125000000000004e-05,
"loss": 0.5785,
"step": 8540
},
{
"epoch": 123.91,
"learning_rate": 4.1046875e-05,
"loss": 0.6527,
"step": 8550
},
{
"epoch": 124.06,
"learning_rate": 4.096875e-05,
"loss": 0.5509,
"step": 8560
},
{
"epoch": 124.2,
"learning_rate": 4.0890625000000006e-05,
"loss": 0.6065,
"step": 8570
},
{
"epoch": 124.35,
"learning_rate": 4.08125e-05,
"loss": 0.6249,
"step": 8580
},
{
"epoch": 124.49,
"learning_rate": 4.0734375e-05,
"loss": 0.5477,
"step": 8590
},
{
"epoch": 124.64,
"learning_rate": 4.065625e-05,
"loss": 0.6066,
"step": 8600
},
{
"epoch": 124.78,
"learning_rate": 4.0585937500000007e-05,
"loss": 0.5868,
"step": 8610
},
{
"epoch": 124.93,
"learning_rate": 4.05078125e-05,
"loss": 0.5902,
"step": 8620
},
{
"epoch": 125.07,
"learning_rate": 4.04296875e-05,
"loss": 0.6138,
"step": 8630
},
{
"epoch": 125.22,
"learning_rate": 4.03515625e-05,
"loss": 0.6382,
"step": 8640
},
{
"epoch": 125.36,
"learning_rate": 4.02734375e-05,
"loss": 0.6279,
"step": 8650
},
{
"epoch": 125.51,
"learning_rate": 4.01953125e-05,
"loss": 0.6699,
"step": 8660
},
{
"epoch": 125.65,
"learning_rate": 4.0117187500000005e-05,
"loss": 0.6396,
"step": 8670
},
{
"epoch": 125.8,
"learning_rate": 4.00390625e-05,
"loss": 0.564,
"step": 8680
},
{
"epoch": 125.94,
"learning_rate": 3.99609375e-05,
"loss": 0.556,
"step": 8690
},
{
"epoch": 126.09,
"learning_rate": 3.98828125e-05,
"loss": 0.6425,
"step": 8700
},
{
"epoch": 126.23,
"learning_rate": 3.9804687500000004e-05,
"loss": 0.5753,
"step": 8710
},
{
"epoch": 126.38,
"learning_rate": 3.97265625e-05,
"loss": 0.5892,
"step": 8720
},
{
"epoch": 126.52,
"learning_rate": 3.9648437500000004e-05,
"loss": 0.5827,
"step": 8730
},
{
"epoch": 126.67,
"learning_rate": 3.95703125e-05,
"loss": 0.6517,
"step": 8740
},
{
"epoch": 126.81,
"learning_rate": 3.94921875e-05,
"loss": 0.5346,
"step": 8750
},
{
"epoch": 126.96,
"learning_rate": 3.941406250000001e-05,
"loss": 0.5868,
"step": 8760
},
{
"epoch": 127.1,
"learning_rate": 3.93359375e-05,
"loss": 0.5673,
"step": 8770
},
{
"epoch": 127.25,
"learning_rate": 3.92578125e-05,
"loss": 0.59,
"step": 8780
},
{
"epoch": 127.39,
"learning_rate": 3.91796875e-05,
"loss": 0.5705,
"step": 8790
},
{
"epoch": 127.54,
"learning_rate": 3.91015625e-05,
"loss": 0.5958,
"step": 8800
},
{
"epoch": 127.68,
"learning_rate": 3.90234375e-05,
"loss": 0.5522,
"step": 8810
},
{
"epoch": 127.83,
"learning_rate": 3.8945312500000006e-05,
"loss": 0.581,
"step": 8820
},
{
"epoch": 127.97,
"learning_rate": 3.88671875e-05,
"loss": 0.5469,
"step": 8830
},
{
"epoch": 128.12,
"learning_rate": 3.87890625e-05,
"loss": 0.6312,
"step": 8840
},
{
"epoch": 128.26,
"learning_rate": 3.87109375e-05,
"loss": 0.6486,
"step": 8850
},
{
"epoch": 128.41,
"learning_rate": 3.8632812500000005e-05,
"loss": 0.6362,
"step": 8860
},
{
"epoch": 128.55,
"learning_rate": 3.85546875e-05,
"loss": 0.5602,
"step": 8870
},
{
"epoch": 128.7,
"learning_rate": 3.8476562500000004e-05,
"loss": 0.5554,
"step": 8880
},
{
"epoch": 128.84,
"learning_rate": 3.83984375e-05,
"loss": 0.5491,
"step": 8890
},
{
"epoch": 128.99,
"learning_rate": 3.83203125e-05,
"loss": 0.5681,
"step": 8900
},
{
"epoch": 129.13,
"learning_rate": 3.824218750000001e-05,
"loss": 0.6239,
"step": 8910
},
{
"epoch": 129.28,
"learning_rate": 3.8164062500000004e-05,
"loss": 0.5759,
"step": 8920
},
{
"epoch": 129.42,
"learning_rate": 3.80859375e-05,
"loss": 0.5868,
"step": 8930
},
{
"epoch": 129.57,
"learning_rate": 3.8015625e-05,
"loss": 0.5796,
"step": 8940
},
{
"epoch": 129.71,
"learning_rate": 3.79375e-05,
"loss": 0.5501,
"step": 8950
},
{
"epoch": 129.86,
"learning_rate": 3.7859375000000004e-05,
"loss": 0.5976,
"step": 8960
},
{
"epoch": 130.0,
"learning_rate": 3.778125e-05,
"loss": 0.5488,
"step": 8970
},
{
"epoch": 130.14,
"learning_rate": 3.7703125e-05,
"loss": 0.5688,
"step": 8980
},
{
"epoch": 130.29,
"learning_rate": 3.7625e-05,
"loss": 0.5448,
"step": 8990
},
{
"epoch": 130.43,
"learning_rate": 3.7546875e-05,
"loss": 0.5751,
"step": 9000
},
{
"epoch": 130.43,
"eval_loss": 0.22747375071048737,
"eval_runtime": 574.7485,
"eval_samples_per_second": 5.905,
"eval_steps_per_second": 0.739,
"eval_wer": 0.1562672057457946,
"step": 9000
},
{
"epoch": 130.58,
"learning_rate": 3.746875e-05,
"loss": 0.6,
"step": 9010
},
{
"epoch": 130.72,
"learning_rate": 3.7390625e-05,
"loss": 0.5781,
"step": 9020
},
{
"epoch": 130.87,
"learning_rate": 3.73125e-05,
"loss": 0.5545,
"step": 9030
},
{
"epoch": 131.01,
"learning_rate": 3.7234375e-05,
"loss": 0.6116,
"step": 9040
},
{
"epoch": 131.16,
"learning_rate": 3.7156250000000005e-05,
"loss": 0.5634,
"step": 9050
},
{
"epoch": 131.3,
"learning_rate": 3.7078125e-05,
"loss": 0.6436,
"step": 9060
},
{
"epoch": 131.45,
"learning_rate": 3.7e-05,
"loss": 0.5882,
"step": 9070
},
{
"epoch": 131.59,
"learning_rate": 3.6921875e-05,
"loss": 0.5391,
"step": 9080
},
{
"epoch": 131.74,
"learning_rate": 3.684375e-05,
"loss": 0.5211,
"step": 9090
},
{
"epoch": 131.88,
"learning_rate": 3.6765625e-05,
"loss": 0.5971,
"step": 9100
},
{
"epoch": 132.03,
"learning_rate": 3.6687500000000004e-05,
"loss": 0.5314,
"step": 9110
},
{
"epoch": 132.17,
"learning_rate": 3.6609375e-05,
"loss": 0.573,
"step": 9120
},
{
"epoch": 132.32,
"learning_rate": 3.653125e-05,
"loss": 0.5678,
"step": 9130
},
{
"epoch": 132.46,
"learning_rate": 3.6453125e-05,
"loss": 0.5369,
"step": 9140
},
{
"epoch": 132.61,
"learning_rate": 3.6375e-05,
"loss": 0.5976,
"step": 9150
},
{
"epoch": 132.75,
"learning_rate": 3.6296875e-05,
"loss": 0.609,
"step": 9160
},
{
"epoch": 132.9,
"learning_rate": 3.621875e-05,
"loss": 0.5859,
"step": 9170
},
{
"epoch": 133.04,
"learning_rate": 3.6140625e-05,
"loss": 0.6005,
"step": 9180
},
{
"epoch": 133.19,
"learning_rate": 3.60625e-05,
"loss": 0.5796,
"step": 9190
},
{
"epoch": 133.33,
"learning_rate": 3.5984375000000006e-05,
"loss": 0.5125,
"step": 9200
},
{
"epoch": 133.48,
"learning_rate": 3.590625e-05,
"loss": 0.5465,
"step": 9210
},
{
"epoch": 133.62,
"learning_rate": 3.5828125e-05,
"loss": 0.5985,
"step": 9220
},
{
"epoch": 133.77,
"learning_rate": 3.575e-05,
"loss": 0.5687,
"step": 9230
},
{
"epoch": 133.91,
"learning_rate": 3.5671875e-05,
"loss": 0.6586,
"step": 9240
},
{
"epoch": 134.06,
"learning_rate": 3.559375e-05,
"loss": 0.5656,
"step": 9250
},
{
"epoch": 134.2,
"learning_rate": 3.5515625000000004e-05,
"loss": 0.6136,
"step": 9260
},
{
"epoch": 134.35,
"learning_rate": 3.54375e-05,
"loss": 0.6063,
"step": 9270
},
{
"epoch": 134.49,
"learning_rate": 3.5359375e-05,
"loss": 0.5242,
"step": 9280
},
{
"epoch": 134.64,
"learning_rate": 3.528125e-05,
"loss": 0.5605,
"step": 9290
},
{
"epoch": 134.78,
"learning_rate": 3.5203125000000004e-05,
"loss": 0.5774,
"step": 9300
},
{
"epoch": 134.93,
"learning_rate": 3.5125e-05,
"loss": 0.5937,
"step": 9310
},
{
"epoch": 135.07,
"learning_rate": 3.5046875e-05,
"loss": 0.5875,
"step": 9320
},
{
"epoch": 135.22,
"learning_rate": 3.496875e-05,
"loss": 0.5483,
"step": 9330
},
{
"epoch": 135.36,
"learning_rate": 3.4890624999999996e-05,
"loss": 0.5705,
"step": 9340
},
{
"epoch": 135.51,
"learning_rate": 3.4812500000000006e-05,
"loss": 0.6026,
"step": 9350
},
{
"epoch": 135.65,
"learning_rate": 3.4734375e-05,
"loss": 0.5888,
"step": 9360
},
{
"epoch": 135.8,
"learning_rate": 3.465625e-05,
"loss": 0.5913,
"step": 9370
},
{
"epoch": 135.94,
"learning_rate": 3.4578125e-05,
"loss": 0.5792,
"step": 9380
},
{
"epoch": 136.09,
"learning_rate": 3.45e-05,
"loss": 0.615,
"step": 9390
},
{
"epoch": 136.23,
"learning_rate": 3.4421875e-05,
"loss": 0.5827,
"step": 9400
},
{
"epoch": 136.38,
"learning_rate": 3.4343750000000005e-05,
"loss": 0.5956,
"step": 9410
},
{
"epoch": 136.52,
"learning_rate": 3.4265625e-05,
"loss": 0.4951,
"step": 9420
},
{
"epoch": 136.67,
"learning_rate": 3.41875e-05,
"loss": 0.5786,
"step": 9430
},
{
"epoch": 136.81,
"learning_rate": 3.4109375e-05,
"loss": 0.5307,
"step": 9440
},
{
"epoch": 136.96,
"learning_rate": 3.4031250000000004e-05,
"loss": 0.6652,
"step": 9450
},
{
"epoch": 137.1,
"learning_rate": 3.3953125e-05,
"loss": 0.5879,
"step": 9460
},
{
"epoch": 137.25,
"learning_rate": 3.3875000000000003e-05,
"loss": 0.6949,
"step": 9470
},
{
"epoch": 137.39,
"learning_rate": 3.3796875e-05,
"loss": 0.5537,
"step": 9480
},
{
"epoch": 137.54,
"learning_rate": 3.3718749999999996e-05,
"loss": 0.5923,
"step": 9490
},
{
"epoch": 137.68,
"learning_rate": 3.3640625000000006e-05,
"loss": 0.5181,
"step": 9500
},
{
"epoch": 137.68,
"eval_loss": 0.23162005841732025,
"eval_runtime": 574.0095,
"eval_samples_per_second": 5.913,
"eval_steps_per_second": 0.74,
"eval_wer": 0.15791895734207692,
"step": 9500
},
{
"epoch": 137.83,
"learning_rate": 3.35625e-05,
"loss": 0.5543,
"step": 9510
},
{
"epoch": 137.97,
"learning_rate": 3.3484375e-05,
"loss": 0.5875,
"step": 9520
},
{
"epoch": 138.12,
"learning_rate": 3.340625e-05,
"loss": 0.5757,
"step": 9530
},
{
"epoch": 138.26,
"learning_rate": 3.3328125e-05,
"loss": 0.5366,
"step": 9540
},
{
"epoch": 138.41,
"learning_rate": 3.325e-05,
"loss": 0.5328,
"step": 9550
},
{
"epoch": 138.55,
"learning_rate": 3.3171875000000005e-05,
"loss": 0.5193,
"step": 9560
},
{
"epoch": 138.7,
"learning_rate": 3.309375e-05,
"loss": 0.5377,
"step": 9570
},
{
"epoch": 138.84,
"learning_rate": 3.3015625e-05,
"loss": 0.5687,
"step": 9580
},
{
"epoch": 138.99,
"learning_rate": 3.29375e-05,
"loss": 0.5416,
"step": 9590
},
{
"epoch": 139.13,
"learning_rate": 3.2859375000000004e-05,
"loss": 0.5427,
"step": 9600
},
{
"epoch": 139.28,
"learning_rate": 3.278125e-05,
"loss": 0.577,
"step": 9610
},
{
"epoch": 139.42,
"learning_rate": 3.2703125000000004e-05,
"loss": 0.5943,
"step": 9620
},
{
"epoch": 139.57,
"learning_rate": 3.2625e-05,
"loss": 0.5481,
"step": 9630
},
{
"epoch": 139.71,
"learning_rate": 3.2546874999999997e-05,
"loss": 0.5904,
"step": 9640
},
{
"epoch": 139.86,
"learning_rate": 3.2468750000000007e-05,
"loss": 0.6591,
"step": 9650
},
{
"epoch": 140.0,
"learning_rate": 3.2390625e-05,
"loss": 0.6146,
"step": 9660
},
{
"epoch": 140.14,
"learning_rate": 3.23125e-05,
"loss": 0.5974,
"step": 9670
},
{
"epoch": 140.29,
"learning_rate": 3.2234375e-05,
"loss": 0.546,
"step": 9680
},
{
"epoch": 140.43,
"learning_rate": 3.215625e-05,
"loss": 0.5433,
"step": 9690
},
{
"epoch": 140.58,
"learning_rate": 3.2078125e-05,
"loss": 0.5357,
"step": 9700
},
{
"epoch": 140.72,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.6074,
"step": 9710
},
{
"epoch": 140.87,
"learning_rate": 3.1921875e-05,
"loss": 0.5543,
"step": 9720
},
{
"epoch": 141.01,
"learning_rate": 3.184375e-05,
"loss": 0.5415,
"step": 9730
},
{
"epoch": 141.16,
"learning_rate": 3.1765625e-05,
"loss": 0.5853,
"step": 9740
},
{
"epoch": 141.3,
"learning_rate": 3.1687500000000005e-05,
"loss": 0.5143,
"step": 9750
},
{
"epoch": 141.45,
"learning_rate": 3.1609375e-05,
"loss": 0.503,
"step": 9760
},
{
"epoch": 141.59,
"learning_rate": 3.1531250000000004e-05,
"loss": 0.5154,
"step": 9770
},
{
"epoch": 141.74,
"learning_rate": 3.1453125e-05,
"loss": 0.562,
"step": 9780
},
{
"epoch": 141.88,
"learning_rate": 3.1375e-05,
"loss": 0.5454,
"step": 9790
},
{
"epoch": 142.03,
"learning_rate": 3.1296875e-05,
"loss": 0.5722,
"step": 9800
},
{
"epoch": 142.17,
"learning_rate": 3.121875e-05,
"loss": 0.6911,
"step": 9810
},
{
"epoch": 142.32,
"learning_rate": 3.1140625e-05,
"loss": 0.5725,
"step": 9820
},
{
"epoch": 142.46,
"learning_rate": 3.10625e-05,
"loss": 0.5842,
"step": 9830
},
{
"epoch": 142.61,
"learning_rate": 3.0984375e-05,
"loss": 0.5597,
"step": 9840
},
{
"epoch": 142.75,
"learning_rate": 3.090625e-05,
"loss": 0.592,
"step": 9850
},
{
"epoch": 142.9,
"learning_rate": 3.0828125e-05,
"loss": 0.5506,
"step": 9860
},
{
"epoch": 143.04,
"learning_rate": 3.075e-05,
"loss": 0.6292,
"step": 9870
},
{
"epoch": 143.19,
"learning_rate": 3.0671875e-05,
"loss": 0.5672,
"step": 9880
},
{
"epoch": 143.33,
"learning_rate": 3.059375e-05,
"loss": 0.5315,
"step": 9890
},
{
"epoch": 143.48,
"learning_rate": 3.0515625000000005e-05,
"loss": 0.5563,
"step": 9900
},
{
"epoch": 143.62,
"learning_rate": 3.04375e-05,
"loss": 0.5877,
"step": 9910
},
{
"epoch": 143.77,
"learning_rate": 3.0359375e-05,
"loss": 0.546,
"step": 9920
},
{
"epoch": 143.91,
"learning_rate": 3.028125e-05,
"loss": 0.5425,
"step": 9930
},
{
"epoch": 144.06,
"learning_rate": 3.0203124999999997e-05,
"loss": 0.5368,
"step": 9940
},
{
"epoch": 144.2,
"learning_rate": 3.0125000000000004e-05,
"loss": 0.538,
"step": 9950
},
{
"epoch": 144.35,
"learning_rate": 3.0046875000000004e-05,
"loss": 0.5367,
"step": 9960
},
{
"epoch": 144.49,
"learning_rate": 2.996875e-05,
"loss": 0.5591,
"step": 9970
},
{
"epoch": 144.64,
"learning_rate": 2.9890625e-05,
"loss": 0.5554,
"step": 9980
},
{
"epoch": 144.78,
"learning_rate": 2.98125e-05,
"loss": 0.4985,
"step": 9990
},
{
"epoch": 144.93,
"learning_rate": 2.9734375000000003e-05,
"loss": 0.6306,
"step": 10000
},
{
"epoch": 144.93,
"eval_loss": 0.23715920746326447,
"eval_runtime": 577.4706,
"eval_samples_per_second": 5.877,
"eval_steps_per_second": 0.736,
"eval_wer": 0.15560157450550172,
"step": 10000
},
{
"epoch": 145.07,
"learning_rate": 2.9656250000000003e-05,
"loss": 0.5794,
"step": 10010
},
{
"epoch": 145.22,
"learning_rate": 2.9578125000000002e-05,
"loss": 0.5792,
"step": 10020
},
{
"epoch": 145.36,
"learning_rate": 2.95e-05,
"loss": 0.5664,
"step": 10030
},
{
"epoch": 145.51,
"learning_rate": 2.9421875e-05,
"loss": 0.5737,
"step": 10040
},
{
"epoch": 145.65,
"learning_rate": 2.9343750000000002e-05,
"loss": 0.5735,
"step": 10050
},
{
"epoch": 145.8,
"learning_rate": 2.9265625e-05,
"loss": 0.5972,
"step": 10060
},
{
"epoch": 145.94,
"learning_rate": 2.91875e-05,
"loss": 0.5998,
"step": 10070
},
{
"epoch": 146.09,
"learning_rate": 2.9109375e-05,
"loss": 0.5357,
"step": 10080
},
{
"epoch": 146.23,
"learning_rate": 2.9031249999999998e-05,
"loss": 0.522,
"step": 10090
},
{
"epoch": 146.38,
"learning_rate": 2.8953125000000004e-05,
"loss": 0.5231,
"step": 10100
},
{
"epoch": 146.52,
"learning_rate": 2.8875e-05,
"loss": 0.5343,
"step": 10110
},
{
"epoch": 146.67,
"learning_rate": 2.8796875e-05,
"loss": 0.5996,
"step": 10120
},
{
"epoch": 146.81,
"learning_rate": 2.871875e-05,
"loss": 0.5595,
"step": 10130
},
{
"epoch": 146.96,
"learning_rate": 2.8640625e-05,
"loss": 0.5386,
"step": 10140
},
{
"epoch": 147.1,
"learning_rate": 2.8562500000000003e-05,
"loss": 0.5338,
"step": 10150
},
{
"epoch": 147.25,
"learning_rate": 2.8484375000000003e-05,
"loss": 0.5273,
"step": 10160
},
{
"epoch": 147.39,
"learning_rate": 2.840625e-05,
"loss": 0.628,
"step": 10170
},
{
"epoch": 147.54,
"learning_rate": 2.8328125e-05,
"loss": 0.5602,
"step": 10180
},
{
"epoch": 147.68,
"learning_rate": 2.825e-05,
"loss": 0.621,
"step": 10190
},
{
"epoch": 147.83,
"learning_rate": 2.8171875000000002e-05,
"loss": 0.6112,
"step": 10200
},
{
"epoch": 147.97,
"learning_rate": 2.8093750000000002e-05,
"loss": 0.4686,
"step": 10210
},
{
"epoch": 148.12,
"learning_rate": 2.8015625e-05,
"loss": 0.5582,
"step": 10220
},
{
"epoch": 148.26,
"learning_rate": 2.79375e-05,
"loss": 0.5353,
"step": 10230
},
{
"epoch": 148.41,
"learning_rate": 2.7859374999999998e-05,
"loss": 0.5545,
"step": 10240
},
{
"epoch": 148.55,
"learning_rate": 2.7781250000000004e-05,
"loss": 0.497,
"step": 10250
},
{
"epoch": 148.7,
"learning_rate": 2.7703125e-05,
"loss": 0.6022,
"step": 10260
},
{
"epoch": 148.84,
"learning_rate": 2.7625e-05,
"loss": 0.5438,
"step": 10270
},
{
"epoch": 148.99,
"learning_rate": 2.7546875e-05,
"loss": 0.6067,
"step": 10280
},
{
"epoch": 149.13,
"learning_rate": 2.746875e-05,
"loss": 0.5966,
"step": 10290
},
{
"epoch": 149.28,
"learning_rate": 2.7390625000000003e-05,
"loss": 0.6345,
"step": 10300
},
{
"epoch": 149.42,
"learning_rate": 2.7312500000000003e-05,
"loss": 0.5105,
"step": 10310
},
{
"epoch": 149.57,
"learning_rate": 2.7234375e-05,
"loss": 0.573,
"step": 10320
},
{
"epoch": 149.71,
"learning_rate": 2.715625e-05,
"loss": 0.5542,
"step": 10330
},
{
"epoch": 149.86,
"learning_rate": 2.7078125e-05,
"loss": 0.6154,
"step": 10340
},
{
"epoch": 150.0,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.5475,
"step": 10350
},
{
"epoch": 150.14,
"learning_rate": 2.6921875000000002e-05,
"loss": 0.5476,
"step": 10360
},
{
"epoch": 150.29,
"learning_rate": 2.6843750000000002e-05,
"loss": 0.5667,
"step": 10370
},
{
"epoch": 150.43,
"learning_rate": 2.6765625e-05,
"loss": 0.5752,
"step": 10380
},
{
"epoch": 150.58,
"learning_rate": 2.6687499999999998e-05,
"loss": 0.5797,
"step": 10390
},
{
"epoch": 150.72,
"learning_rate": 2.6609375000000005e-05,
"loss": 0.5585,
"step": 10400
},
{
"epoch": 150.87,
"learning_rate": 2.653125e-05,
"loss": 0.6719,
"step": 10410
},
{
"epoch": 151.01,
"learning_rate": 2.6453125e-05,
"loss": 0.535,
"step": 10420
},
{
"epoch": 151.16,
"learning_rate": 2.6375e-05,
"loss": 0.5853,
"step": 10430
},
{
"epoch": 151.3,
"learning_rate": 2.6296874999999997e-05,
"loss": 0.5341,
"step": 10440
},
{
"epoch": 151.45,
"learning_rate": 2.6218750000000004e-05,
"loss": 0.4932,
"step": 10450
},
{
"epoch": 151.59,
"learning_rate": 2.6140625000000004e-05,
"loss": 0.5924,
"step": 10460
},
{
"epoch": 151.74,
"learning_rate": 2.60625e-05,
"loss": 0.5665,
"step": 10470
},
{
"epoch": 151.88,
"learning_rate": 2.5984375e-05,
"loss": 0.5202,
"step": 10480
},
{
"epoch": 152.03,
"learning_rate": 2.590625e-05,
"loss": 0.5209,
"step": 10490
},
{
"epoch": 152.17,
"learning_rate": 2.5828125000000003e-05,
"loss": 0.5874,
"step": 10500
},
{
"epoch": 152.17,
"eval_loss": 0.23621943593025208,
"eval_runtime": 575.3543,
"eval_samples_per_second": 5.899,
"eval_steps_per_second": 0.739,
"eval_wer": 0.15330884467782627,
"step": 10500
},
{
"epoch": 152.32,
"learning_rate": 2.5750000000000002e-05,
"loss": 0.57,
"step": 10510
},
{
"epoch": 152.46,
"learning_rate": 2.5671875000000002e-05,
"loss": 0.58,
"step": 10520
},
{
"epoch": 152.61,
"learning_rate": 2.559375e-05,
"loss": 0.5269,
"step": 10530
},
{
"epoch": 152.75,
"learning_rate": 2.5515625e-05,
"loss": 0.5519,
"step": 10540
},
{
"epoch": 152.9,
"learning_rate": 2.54375e-05,
"loss": 0.497,
"step": 10550
},
{
"epoch": 153.04,
"learning_rate": 2.5359375e-05,
"loss": 0.5686,
"step": 10560
},
{
"epoch": 153.19,
"learning_rate": 2.528125e-05,
"loss": 0.5219,
"step": 10570
},
{
"epoch": 153.33,
"learning_rate": 2.5203125e-05,
"loss": 0.5695,
"step": 10580
},
{
"epoch": 153.48,
"learning_rate": 2.5124999999999997e-05,
"loss": 0.5747,
"step": 10590
},
{
"epoch": 153.62,
"learning_rate": 2.5046875000000004e-05,
"loss": 0.5297,
"step": 10600
},
{
"epoch": 153.77,
"learning_rate": 2.496875e-05,
"loss": 0.5956,
"step": 10610
},
{
"epoch": 153.91,
"learning_rate": 2.4890625e-05,
"loss": 0.5752,
"step": 10620
},
{
"epoch": 154.06,
"learning_rate": 2.4812500000000003e-05,
"loss": 0.5765,
"step": 10630
},
{
"epoch": 154.2,
"learning_rate": 2.4734375e-05,
"loss": 0.5627,
"step": 10640
},
{
"epoch": 154.35,
"learning_rate": 2.465625e-05,
"loss": 0.5591,
"step": 10650
},
{
"epoch": 154.49,
"learning_rate": 2.4578125000000003e-05,
"loss": 0.5595,
"step": 10660
},
{
"epoch": 154.64,
"learning_rate": 2.45e-05,
"loss": 0.5978,
"step": 10670
},
{
"epoch": 154.78,
"learning_rate": 2.4421875000000002e-05,
"loss": 0.5249,
"step": 10680
},
{
"epoch": 154.93,
"learning_rate": 2.4343750000000002e-05,
"loss": 0.5522,
"step": 10690
},
{
"epoch": 155.07,
"learning_rate": 2.4265625e-05,
"loss": 0.5152,
"step": 10700
},
{
"epoch": 155.22,
"learning_rate": 2.4187500000000002e-05,
"loss": 0.6,
"step": 10710
},
{
"epoch": 155.36,
"learning_rate": 2.4109375e-05,
"loss": 0.588,
"step": 10720
},
{
"epoch": 155.51,
"learning_rate": 2.403125e-05,
"loss": 0.52,
"step": 10730
},
{
"epoch": 155.65,
"learning_rate": 2.3953125e-05,
"loss": 0.5191,
"step": 10740
},
{
"epoch": 155.8,
"learning_rate": 2.3875e-05,
"loss": 0.5456,
"step": 10750
},
{
"epoch": 155.94,
"learning_rate": 2.3796875e-05,
"loss": 0.6365,
"step": 10760
},
{
"epoch": 156.09,
"learning_rate": 2.371875e-05,
"loss": 0.5344,
"step": 10770
},
{
"epoch": 156.23,
"learning_rate": 2.3640625000000004e-05,
"loss": 0.5185,
"step": 10780
},
{
"epoch": 156.38,
"learning_rate": 2.35625e-05,
"loss": 0.5619,
"step": 10790
},
{
"epoch": 156.52,
"learning_rate": 2.3484375e-05,
"loss": 0.5248,
"step": 10800
},
{
"epoch": 156.67,
"learning_rate": 2.3406250000000003e-05,
"loss": 0.499,
"step": 10810
},
{
"epoch": 156.81,
"learning_rate": 2.3328125e-05,
"loss": 0.5657,
"step": 10820
},
{
"epoch": 156.96,
"learning_rate": 2.3250000000000003e-05,
"loss": 0.52,
"step": 10830
},
{
"epoch": 157.1,
"learning_rate": 2.3171875000000003e-05,
"loss": 0.5119,
"step": 10840
},
{
"epoch": 157.25,
"learning_rate": 2.309375e-05,
"loss": 0.5749,
"step": 10850
},
{
"epoch": 157.39,
"learning_rate": 2.3015625000000002e-05,
"loss": 0.57,
"step": 10860
},
{
"epoch": 157.54,
"learning_rate": 2.2937500000000002e-05,
"loss": 0.5096,
"step": 10870
},
{
"epoch": 157.68,
"learning_rate": 2.2859375e-05,
"loss": 0.5025,
"step": 10880
},
{
"epoch": 157.83,
"learning_rate": 2.278125e-05,
"loss": 0.5884,
"step": 10890
},
{
"epoch": 157.97,
"learning_rate": 2.2703125e-05,
"loss": 0.4872,
"step": 10900
},
{
"epoch": 158.12,
"learning_rate": 2.2625e-05,
"loss": 0.5518,
"step": 10910
},
{
"epoch": 158.26,
"learning_rate": 2.2546875e-05,
"loss": 0.5348,
"step": 10920
},
{
"epoch": 158.41,
"learning_rate": 2.246875e-05,
"loss": 0.5997,
"step": 10930
},
{
"epoch": 158.55,
"learning_rate": 2.2390625e-05,
"loss": 0.5602,
"step": 10940
},
{
"epoch": 158.7,
"learning_rate": 2.23125e-05,
"loss": 0.5996,
"step": 10950
},
{
"epoch": 158.84,
"learning_rate": 2.2234375e-05,
"loss": 0.6152,
"step": 10960
},
{
"epoch": 158.99,
"learning_rate": 2.215625e-05,
"loss": 0.5291,
"step": 10970
},
{
"epoch": 159.13,
"learning_rate": 2.2078125000000003e-05,
"loss": 0.55,
"step": 10980
},
{
"epoch": 159.28,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.6242,
"step": 10990
},
{
"epoch": 159.42,
"learning_rate": 2.1921875e-05,
"loss": 0.5546,
"step": 11000
},
{
"epoch": 159.42,
"eval_loss": 0.23421239852905273,
"eval_runtime": 576.3026,
"eval_samples_per_second": 5.889,
"eval_steps_per_second": 0.737,
"eval_wer": 0.15427852969454922,
"step": 11000
},
{
"epoch": 159.57,
"learning_rate": 2.1843750000000002e-05,
"loss": 0.5893,
"step": 11010
},
{
"epoch": 159.71,
"learning_rate": 2.1765625000000002e-05,
"loss": 0.5654,
"step": 11020
},
{
"epoch": 159.86,
"learning_rate": 2.1687500000000002e-05,
"loss": 0.5748,
"step": 11030
},
{
"epoch": 160.0,
"learning_rate": 2.1609375000000002e-05,
"loss": 0.5317,
"step": 11040
},
{
"epoch": 160.14,
"learning_rate": 2.153125e-05,
"loss": 0.4892,
"step": 11050
},
{
"epoch": 160.29,
"learning_rate": 2.1453125e-05,
"loss": 0.544,
"step": 11060
},
{
"epoch": 160.43,
"learning_rate": 2.1375e-05,
"loss": 0.5197,
"step": 11070
},
{
"epoch": 160.58,
"learning_rate": 2.1296875e-05,
"loss": 0.5127,
"step": 11080
},
{
"epoch": 160.72,
"learning_rate": 2.121875e-05,
"loss": 0.5438,
"step": 11090
},
{
"epoch": 160.87,
"learning_rate": 2.1140625e-05,
"loss": 0.5184,
"step": 11100
},
{
"epoch": 161.01,
"learning_rate": 2.10625e-05,
"loss": 0.6122,
"step": 11110
},
{
"epoch": 161.16,
"learning_rate": 2.0984375e-05,
"loss": 0.5086,
"step": 11120
},
{
"epoch": 161.3,
"learning_rate": 2.0906250000000003e-05,
"loss": 0.6104,
"step": 11130
},
{
"epoch": 161.45,
"learning_rate": 2.0828125e-05,
"loss": 0.5168,
"step": 11140
},
{
"epoch": 161.59,
"learning_rate": 2.075e-05,
"loss": 0.5145,
"step": 11150
},
{
"epoch": 161.74,
"learning_rate": 2.0671875000000003e-05,
"loss": 0.5389,
"step": 11160
},
{
"epoch": 161.88,
"learning_rate": 2.059375e-05,
"loss": 0.671,
"step": 11170
},
{
"epoch": 162.03,
"learning_rate": 2.05234375e-05,
"loss": 0.5659,
"step": 11180
},
{
"epoch": 162.17,
"learning_rate": 2.0453125e-05,
"loss": 0.5035,
"step": 11190
},
{
"epoch": 162.32,
"learning_rate": 2.0375e-05,
"loss": 0.5038,
"step": 11200
},
{
"epoch": 162.46,
"learning_rate": 2.0296875e-05,
"loss": 0.6035,
"step": 11210
},
{
"epoch": 162.61,
"learning_rate": 2.021875e-05,
"loss": 0.5472,
"step": 11220
},
{
"epoch": 162.75,
"learning_rate": 2.0140625000000003e-05,
"loss": 0.5149,
"step": 11230
},
{
"epoch": 162.9,
"learning_rate": 2.00625e-05,
"loss": 0.6043,
"step": 11240
},
{
"epoch": 163.04,
"learning_rate": 1.9984375e-05,
"loss": 0.5377,
"step": 11250
},
{
"epoch": 163.19,
"learning_rate": 1.9906250000000003e-05,
"loss": 0.5328,
"step": 11260
},
{
"epoch": 163.33,
"learning_rate": 1.9828125e-05,
"loss": 0.4993,
"step": 11270
},
{
"epoch": 163.48,
"learning_rate": 1.9750000000000002e-05,
"loss": 0.5234,
"step": 11280
},
{
"epoch": 163.62,
"learning_rate": 1.9671875000000002e-05,
"loss": 0.6392,
"step": 11290
},
{
"epoch": 163.77,
"learning_rate": 1.959375e-05,
"loss": 0.5308,
"step": 11300
},
{
"epoch": 163.91,
"learning_rate": 1.9515625000000002e-05,
"loss": 0.5907,
"step": 11310
},
{
"epoch": 164.06,
"learning_rate": 1.94375e-05,
"loss": 0.5679,
"step": 11320
},
{
"epoch": 164.2,
"learning_rate": 1.9359375e-05,
"loss": 0.5094,
"step": 11330
},
{
"epoch": 164.35,
"learning_rate": 1.928125e-05,
"loss": 0.5877,
"step": 11340
},
{
"epoch": 164.49,
"learning_rate": 1.9203125e-05,
"loss": 0.5177,
"step": 11350
},
{
"epoch": 164.64,
"learning_rate": 1.9125e-05,
"loss": 0.5452,
"step": 11360
},
{
"epoch": 164.78,
"learning_rate": 1.9046875e-05,
"loss": 0.568,
"step": 11370
},
{
"epoch": 164.93,
"learning_rate": 1.896875e-05,
"loss": 0.5717,
"step": 11380
},
{
"epoch": 165.07,
"learning_rate": 1.8890625e-05,
"loss": 0.5861,
"step": 11390
},
{
"epoch": 165.22,
"learning_rate": 1.88125e-05,
"loss": 0.5153,
"step": 11400
},
{
"epoch": 165.36,
"learning_rate": 1.8734375e-05,
"loss": 0.5797,
"step": 11410
},
{
"epoch": 165.51,
"learning_rate": 1.865625e-05,
"loss": 0.5237,
"step": 11420
},
{
"epoch": 165.65,
"learning_rate": 1.8578125000000003e-05,
"loss": 0.5606,
"step": 11430
},
{
"epoch": 165.8,
"learning_rate": 1.85e-05,
"loss": 0.5389,
"step": 11440
},
{
"epoch": 165.94,
"learning_rate": 1.8421875e-05,
"loss": 0.572,
"step": 11450
},
{
"epoch": 166.09,
"learning_rate": 1.8343750000000002e-05,
"loss": 0.5169,
"step": 11460
},
{
"epoch": 166.23,
"learning_rate": 1.8265625e-05,
"loss": 0.506,
"step": 11470
},
{
"epoch": 166.38,
"learning_rate": 1.81875e-05,
"loss": 0.5406,
"step": 11480
},
{
"epoch": 166.52,
"learning_rate": 1.8109375e-05,
"loss": 0.5913,
"step": 11490
},
{
"epoch": 166.67,
"learning_rate": 1.803125e-05,
"loss": 0.6294,
"step": 11500
},
{
"epoch": 166.67,
"eval_loss": 0.23806829750537872,
"eval_runtime": 576.0097,
"eval_samples_per_second": 5.892,
"eval_steps_per_second": 0.738,
"eval_wer": 0.15362933379352284,
"step": 11500
},
{
"epoch": 166.81,
"learning_rate": 1.7953125e-05,
"loss": 0.51,
"step": 11510
},
{
"epoch": 166.96,
"learning_rate": 1.7875e-05,
"loss": 0.5439,
"step": 11520
},
{
"epoch": 167.1,
"learning_rate": 1.7796875e-05,
"loss": 0.4877,
"step": 11530
},
{
"epoch": 167.25,
"learning_rate": 1.771875e-05,
"loss": 0.5299,
"step": 11540
},
{
"epoch": 167.39,
"learning_rate": 1.7640625e-05,
"loss": 0.5475,
"step": 11550
},
{
"epoch": 167.54,
"learning_rate": 1.75625e-05,
"loss": 0.5661,
"step": 11560
},
{
"epoch": 167.68,
"learning_rate": 1.7484375e-05,
"loss": 0.5416,
"step": 11570
},
{
"epoch": 167.83,
"learning_rate": 1.7406250000000003e-05,
"loss": 0.4929,
"step": 11580
},
{
"epoch": 167.97,
"learning_rate": 1.7328125e-05,
"loss": 0.5195,
"step": 11590
},
{
"epoch": 168.12,
"learning_rate": 1.725e-05,
"loss": 0.5883,
"step": 11600
},
{
"epoch": 168.26,
"learning_rate": 1.7171875000000002e-05,
"loss": 0.5723,
"step": 11610
},
{
"epoch": 168.41,
"learning_rate": 1.709375e-05,
"loss": 0.5621,
"step": 11620
},
{
"epoch": 168.55,
"learning_rate": 1.7015625000000002e-05,
"loss": 0.5567,
"step": 11630
},
{
"epoch": 168.7,
"learning_rate": 1.6937500000000002e-05,
"loss": 0.4995,
"step": 11640
},
{
"epoch": 168.84,
"learning_rate": 1.6859374999999998e-05,
"loss": 0.5601,
"step": 11650
},
{
"epoch": 168.99,
"learning_rate": 1.678125e-05,
"loss": 0.5122,
"step": 11660
},
{
"epoch": 169.13,
"learning_rate": 1.6703125e-05,
"loss": 0.5406,
"step": 11670
},
{
"epoch": 169.28,
"learning_rate": 1.6625e-05,
"loss": 0.4914,
"step": 11680
},
{
"epoch": 169.42,
"learning_rate": 1.6546875e-05,
"loss": 0.6525,
"step": 11690
},
{
"epoch": 169.57,
"learning_rate": 1.646875e-05,
"loss": 0.5181,
"step": 11700
},
{
"epoch": 169.71,
"learning_rate": 1.6390625e-05,
"loss": 0.5328,
"step": 11710
},
{
"epoch": 169.86,
"learning_rate": 1.63125e-05,
"loss": 0.5583,
"step": 11720
},
{
"epoch": 170.0,
"learning_rate": 1.6234375000000003e-05,
"loss": 0.5376,
"step": 11730
},
{
"epoch": 170.14,
"learning_rate": 1.615625e-05,
"loss": 0.5182,
"step": 11740
},
{
"epoch": 170.29,
"learning_rate": 1.6078125e-05,
"loss": 0.4756,
"step": 11750
},
{
"epoch": 170.43,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.5295,
"step": 11760
},
{
"epoch": 170.58,
"learning_rate": 1.5921875e-05,
"loss": 0.5482,
"step": 11770
},
{
"epoch": 170.72,
"learning_rate": 1.5843750000000002e-05,
"loss": 0.5513,
"step": 11780
},
{
"epoch": 170.87,
"learning_rate": 1.5765625000000002e-05,
"loss": 0.5341,
"step": 11790
},
{
"epoch": 171.01,
"learning_rate": 1.56875e-05,
"loss": 0.4755,
"step": 11800
},
{
"epoch": 171.16,
"learning_rate": 1.5609375e-05,
"loss": 0.5352,
"step": 11810
},
{
"epoch": 171.3,
"learning_rate": 1.553125e-05,
"loss": 0.5318,
"step": 11820
},
{
"epoch": 171.45,
"learning_rate": 1.5453125e-05,
"loss": 0.4488,
"step": 11830
},
{
"epoch": 171.59,
"learning_rate": 1.5375e-05,
"loss": 0.6219,
"step": 11840
},
{
"epoch": 171.74,
"learning_rate": 1.5296875e-05,
"loss": 0.5449,
"step": 11850
},
{
"epoch": 171.88,
"learning_rate": 1.521875e-05,
"loss": 0.5278,
"step": 11860
},
{
"epoch": 172.03,
"learning_rate": 1.5140625e-05,
"loss": 0.539,
"step": 11870
},
{
"epoch": 172.17,
"learning_rate": 1.5062500000000002e-05,
"loss": 0.5436,
"step": 11880
},
{
"epoch": 172.32,
"learning_rate": 1.4984375e-05,
"loss": 0.5874,
"step": 11890
},
{
"epoch": 172.46,
"learning_rate": 1.490625e-05,
"loss": 0.5529,
"step": 11900
},
{
"epoch": 172.61,
"learning_rate": 1.4828125000000001e-05,
"loss": 0.5403,
"step": 11910
},
{
"epoch": 172.75,
"learning_rate": 1.475e-05,
"loss": 0.4999,
"step": 11920
},
{
"epoch": 172.9,
"learning_rate": 1.4671875000000001e-05,
"loss": 0.6045,
"step": 11930
},
{
"epoch": 173.04,
"learning_rate": 1.459375e-05,
"loss": 0.5237,
"step": 11940
},
{
"epoch": 173.19,
"learning_rate": 1.4515624999999999e-05,
"loss": 0.5556,
"step": 11950
},
{
"epoch": 173.33,
"learning_rate": 1.44375e-05,
"loss": 0.5837,
"step": 11960
},
{
"epoch": 173.48,
"learning_rate": 1.4359375e-05,
"loss": 0.5021,
"step": 11970
},
{
"epoch": 173.62,
"learning_rate": 1.4281250000000002e-05,
"loss": 0.5294,
"step": 11980
},
{
"epoch": 173.77,
"learning_rate": 1.4203125e-05,
"loss": 0.5379,
"step": 11990
},
{
"epoch": 173.91,
"learning_rate": 1.4125e-05,
"loss": 0.5989,
"step": 12000
},
{
"epoch": 173.91,
"eval_loss": 0.23597407341003418,
"eval_runtime": 575.4715,
"eval_samples_per_second": 5.898,
"eval_steps_per_second": 0.739,
"eval_wer": 0.1526843017856996,
"step": 12000
},
{
"epoch": 174.06,
"learning_rate": 1.4046875000000001e-05,
"loss": 0.5167,
"step": 12010
},
{
"epoch": 174.2,
"learning_rate": 1.396875e-05,
"loss": 0.5127,
"step": 12020
},
{
"epoch": 174.35,
"learning_rate": 1.3890625000000002e-05,
"loss": 0.613,
"step": 12030
},
{
"epoch": 174.49,
"learning_rate": 1.38125e-05,
"loss": 0.5287,
"step": 12040
},
{
"epoch": 174.64,
"learning_rate": 1.3734375e-05,
"loss": 0.5609,
"step": 12050
},
{
"epoch": 174.78,
"learning_rate": 1.3656250000000002e-05,
"loss": 0.5278,
"step": 12060
},
{
"epoch": 174.93,
"learning_rate": 1.3578125e-05,
"loss": 0.5928,
"step": 12070
},
{
"epoch": 175.07,
"learning_rate": 1.3500000000000001e-05,
"loss": 0.56,
"step": 12080
},
{
"epoch": 175.22,
"learning_rate": 1.3421875000000001e-05,
"loss": 0.5716,
"step": 12090
},
{
"epoch": 175.36,
"learning_rate": 1.3343749999999999e-05,
"loss": 0.5499,
"step": 12100
},
{
"epoch": 175.51,
"learning_rate": 1.3265625e-05,
"loss": 0.476,
"step": 12110
},
{
"epoch": 175.65,
"learning_rate": 1.31875e-05,
"loss": 0.5134,
"step": 12120
},
{
"epoch": 175.8,
"learning_rate": 1.3109375000000002e-05,
"loss": 0.5093,
"step": 12130
},
{
"epoch": 175.94,
"learning_rate": 1.303125e-05,
"loss": 0.579,
"step": 12140
},
{
"epoch": 176.09,
"learning_rate": 1.2953125e-05,
"loss": 0.4885,
"step": 12150
},
{
"epoch": 176.23,
"learning_rate": 1.2875000000000001e-05,
"loss": 0.5433,
"step": 12160
},
{
"epoch": 176.38,
"learning_rate": 1.2796875e-05,
"loss": 0.5137,
"step": 12170
},
{
"epoch": 176.52,
"learning_rate": 1.271875e-05,
"loss": 0.5202,
"step": 12180
},
{
"epoch": 176.67,
"learning_rate": 1.2640625e-05,
"loss": 0.5336,
"step": 12190
},
{
"epoch": 176.81,
"learning_rate": 1.2562499999999999e-05,
"loss": 0.5573,
"step": 12200
},
{
"epoch": 176.96,
"learning_rate": 1.2484375e-05,
"loss": 0.4605,
"step": 12210
},
{
"epoch": 177.1,
"learning_rate": 1.2406250000000002e-05,
"loss": 0.4848,
"step": 12220
},
{
"epoch": 177.25,
"learning_rate": 1.2328125e-05,
"loss": 0.5071,
"step": 12230
},
{
"epoch": 177.39,
"learning_rate": 1.225e-05,
"loss": 0.4797,
"step": 12240
},
{
"epoch": 177.54,
"learning_rate": 1.2171875000000001e-05,
"loss": 0.5308,
"step": 12250
},
{
"epoch": 177.68,
"learning_rate": 1.2093750000000001e-05,
"loss": 0.6085,
"step": 12260
},
{
"epoch": 177.83,
"learning_rate": 1.2015625e-05,
"loss": 0.5489,
"step": 12270
},
{
"epoch": 177.97,
"learning_rate": 1.19375e-05,
"loss": 0.4995,
"step": 12280
},
{
"epoch": 178.12,
"learning_rate": 1.1859375e-05,
"loss": 0.5686,
"step": 12290
},
{
"epoch": 178.26,
"learning_rate": 1.178125e-05,
"loss": 0.5471,
"step": 12300
},
{
"epoch": 178.41,
"learning_rate": 1.1703125000000002e-05,
"loss": 0.5439,
"step": 12310
},
{
"epoch": 178.55,
"learning_rate": 1.1625000000000001e-05,
"loss": 0.53,
"step": 12320
},
{
"epoch": 178.7,
"learning_rate": 1.1546875e-05,
"loss": 0.5401,
"step": 12330
},
{
"epoch": 178.84,
"learning_rate": 1.1468750000000001e-05,
"loss": 0.5493,
"step": 12340
},
{
"epoch": 178.99,
"learning_rate": 1.1390625e-05,
"loss": 0.5021,
"step": 12350
},
{
"epoch": 179.13,
"learning_rate": 1.13125e-05,
"loss": 0.5518,
"step": 12360
},
{
"epoch": 179.28,
"learning_rate": 1.1234375e-05,
"loss": 0.512,
"step": 12370
},
{
"epoch": 179.42,
"learning_rate": 1.115625e-05,
"loss": 0.503,
"step": 12380
},
{
"epoch": 179.57,
"learning_rate": 1.1078125e-05,
"loss": 0.5771,
"step": 12390
},
{
"epoch": 179.71,
"learning_rate": 1.1000000000000001e-05,
"loss": 0.5227,
"step": 12400
},
{
"epoch": 179.86,
"learning_rate": 1.0921875000000001e-05,
"loss": 0.5993,
"step": 12410
},
{
"epoch": 180.0,
"learning_rate": 1.0843750000000001e-05,
"loss": 0.4581,
"step": 12420
},
{
"epoch": 180.14,
"learning_rate": 1.0765625e-05,
"loss": 0.523,
"step": 12430
},
{
"epoch": 180.29,
"learning_rate": 1.06875e-05,
"loss": 0.5472,
"step": 12440
},
{
"epoch": 180.43,
"learning_rate": 1.0609375e-05,
"loss": 0.595,
"step": 12450
},
{
"epoch": 180.58,
"learning_rate": 1.053125e-05,
"loss": 0.5503,
"step": 12460
},
{
"epoch": 180.72,
"learning_rate": 1.0453125000000002e-05,
"loss": 0.5245,
"step": 12470
},
{
"epoch": 180.87,
"learning_rate": 1.0375e-05,
"loss": 0.5567,
"step": 12480
},
{
"epoch": 181.01,
"learning_rate": 1.0296875e-05,
"loss": 0.4907,
"step": 12490
},
{
"epoch": 181.16,
"learning_rate": 1.0218750000000001e-05,
"loss": 0.5697,
"step": 12500
},
{
"epoch": 181.16,
"eval_loss": 0.23992973566055298,
"eval_runtime": 578.3824,
"eval_samples_per_second": 5.868,
"eval_steps_per_second": 0.735,
"eval_wer": 0.15259390741973392,
"step": 12500
},
{
"epoch": 181.3,
"learning_rate": 1.0140625000000001e-05,
"loss": 0.5403,
"step": 12510
},
{
"epoch": 181.45,
"learning_rate": 1.00625e-05,
"loss": 0.5109,
"step": 12520
},
{
"epoch": 181.59,
"learning_rate": 9.984375e-06,
"loss": 0.4829,
"step": 12530
},
{
"epoch": 181.74,
"learning_rate": 9.90625e-06,
"loss": 0.5025,
"step": 12540
},
{
"epoch": 181.88,
"learning_rate": 9.828125e-06,
"loss": 0.5442,
"step": 12550
},
{
"epoch": 182.03,
"learning_rate": 9.750000000000002e-06,
"loss": 0.5034,
"step": 12560
},
{
"epoch": 182.17,
"learning_rate": 9.671875000000001e-06,
"loss": 0.4793,
"step": 12570
},
{
"epoch": 182.32,
"learning_rate": 9.59375e-06,
"loss": 0.5074,
"step": 12580
},
{
"epoch": 182.46,
"learning_rate": 9.515625000000001e-06,
"loss": 0.5132,
"step": 12590
},
{
"epoch": 182.61,
"learning_rate": 9.4375e-06,
"loss": 0.5744,
"step": 12600
},
{
"epoch": 182.75,
"learning_rate": 9.359375e-06,
"loss": 0.4714,
"step": 12610
},
{
"epoch": 182.9,
"learning_rate": 9.28125e-06,
"loss": 0.566,
"step": 12620
},
{
"epoch": 183.04,
"learning_rate": 9.203125e-06,
"loss": 0.5804,
"step": 12630
},
{
"epoch": 183.19,
"learning_rate": 9.125e-06,
"loss": 0.4781,
"step": 12640
},
{
"epoch": 183.33,
"learning_rate": 9.046875e-06,
"loss": 0.4948,
"step": 12650
},
{
"epoch": 183.48,
"learning_rate": 8.968750000000001e-06,
"loss": 0.4981,
"step": 12660
},
{
"epoch": 183.62,
"learning_rate": 8.890625000000001e-06,
"loss": 0.5249,
"step": 12670
},
{
"epoch": 183.77,
"learning_rate": 8.8125e-06,
"loss": 0.5197,
"step": 12680
},
{
"epoch": 183.91,
"learning_rate": 8.734375e-06,
"loss": 0.5002,
"step": 12690
},
{
"epoch": 184.06,
"learning_rate": 8.65625e-06,
"loss": 0.5451,
"step": 12700
},
{
"epoch": 184.2,
"learning_rate": 8.578125e-06,
"loss": 0.5132,
"step": 12710
},
{
"epoch": 184.35,
"learning_rate": 8.500000000000002e-06,
"loss": 0.4806,
"step": 12720
},
{
"epoch": 184.49,
"learning_rate": 8.421875e-06,
"loss": 0.5207,
"step": 12730
},
{
"epoch": 184.64,
"learning_rate": 8.34375e-06,
"loss": 0.5618,
"step": 12740
},
{
"epoch": 184.78,
"learning_rate": 8.265625000000001e-06,
"loss": 0.5574,
"step": 12750
},
{
"epoch": 184.93,
"learning_rate": 8.1875e-06,
"loss": 0.5181,
"step": 12760
},
{
"epoch": 185.07,
"learning_rate": 8.109375e-06,
"loss": 0.5471,
"step": 12770
},
{
"epoch": 185.22,
"learning_rate": 8.03125e-06,
"loss": 0.4863,
"step": 12780
},
{
"epoch": 185.36,
"learning_rate": 7.953125e-06,
"loss": 0.547,
"step": 12790
},
{
"epoch": 185.51,
"learning_rate": 7.875e-06,
"loss": 0.5103,
"step": 12800
},
{
"epoch": 185.65,
"learning_rate": 7.796875000000001e-06,
"loss": 0.5406,
"step": 12810
},
{
"epoch": 185.8,
"learning_rate": 7.718750000000001e-06,
"loss": 0.5136,
"step": 12820
},
{
"epoch": 185.94,
"learning_rate": 7.640625e-06,
"loss": 0.4991,
"step": 12830
},
{
"epoch": 186.09,
"learning_rate": 7.5625e-06,
"loss": 0.5515,
"step": 12840
},
{
"epoch": 186.23,
"learning_rate": 7.484375000000001e-06,
"loss": 0.4893,
"step": 12850
},
{
"epoch": 186.38,
"learning_rate": 7.4062500000000005e-06,
"loss": 0.6101,
"step": 12860
},
{
"epoch": 186.52,
"learning_rate": 7.328125000000001e-06,
"loss": 0.5348,
"step": 12870
},
{
"epoch": 186.67,
"learning_rate": 7.25e-06,
"loss": 0.554,
"step": 12880
},
{
"epoch": 186.81,
"learning_rate": 7.171875e-06,
"loss": 0.5119,
"step": 12890
},
{
"epoch": 186.96,
"learning_rate": 7.0937500000000005e-06,
"loss": 0.5348,
"step": 12900
},
{
"epoch": 187.1,
"learning_rate": 7.015625e-06,
"loss": 0.5884,
"step": 12910
},
{
"epoch": 187.25,
"learning_rate": 6.937500000000001e-06,
"loss": 0.496,
"step": 12920
},
{
"epoch": 187.39,
"learning_rate": 6.859375e-06,
"loss": 0.5119,
"step": 12930
},
{
"epoch": 187.54,
"learning_rate": 6.7812500000000005e-06,
"loss": 0.5284,
"step": 12940
},
{
"epoch": 187.68,
"learning_rate": 6.703125e-06,
"loss": 0.5597,
"step": 12950
},
{
"epoch": 187.83,
"learning_rate": 6.625000000000001e-06,
"loss": 0.5816,
"step": 12960
},
{
"epoch": 187.97,
"learning_rate": 6.546875000000001e-06,
"loss": 0.5311,
"step": 12970
},
{
"epoch": 188.12,
"learning_rate": 6.46875e-06,
"loss": 0.5023,
"step": 12980
},
{
"epoch": 188.26,
"learning_rate": 6.390625e-06,
"loss": 0.5199,
"step": 12990
},
{
"epoch": 188.41,
"learning_rate": 6.3125e-06,
"loss": 0.5379,
"step": 13000
},
{
"epoch": 188.41,
"eval_loss": 0.2375340759754181,
"eval_runtime": 574.5803,
"eval_samples_per_second": 5.907,
"eval_steps_per_second": 0.74,
"eval_wer": 0.15229807131293707,
"step": 13000
},
{
"epoch": 188.55,
"learning_rate": 6.234375000000001e-06,
"loss": 0.5227,
"step": 13010
},
{
"epoch": 188.7,
"learning_rate": 6.1562500000000006e-06,
"loss": 0.4946,
"step": 13020
},
{
"epoch": 188.84,
"learning_rate": 6.078125e-06,
"loss": 0.5618,
"step": 13030
},
{
"epoch": 188.99,
"learning_rate": 6e-06,
"loss": 0.5023,
"step": 13040
},
{
"epoch": 189.13,
"learning_rate": 5.921875e-06,
"loss": 0.4884,
"step": 13050
},
{
"epoch": 189.28,
"learning_rate": 5.843750000000001e-06,
"loss": 0.4912,
"step": 13060
},
{
"epoch": 189.42,
"learning_rate": 5.765625e-06,
"loss": 0.5511,
"step": 13070
},
{
"epoch": 189.57,
"learning_rate": 5.6875e-06,
"loss": 0.542,
"step": 13080
},
{
"epoch": 189.71,
"learning_rate": 5.609375e-06,
"loss": 0.5275,
"step": 13090
},
{
"epoch": 189.86,
"learning_rate": 5.531250000000001e-06,
"loss": 0.5719,
"step": 13100
},
{
"epoch": 190.0,
"learning_rate": 5.453125e-06,
"loss": 0.5686,
"step": 13110
},
{
"epoch": 190.14,
"learning_rate": 5.375e-06,
"loss": 0.5425,
"step": 13120
},
{
"epoch": 190.29,
"learning_rate": 5.296875000000001e-06,
"loss": 0.5322,
"step": 13130
},
{
"epoch": 190.43,
"learning_rate": 5.21875e-06,
"loss": 0.5146,
"step": 13140
},
{
"epoch": 190.58,
"learning_rate": 5.1406250000000004e-06,
"loss": 0.5364,
"step": 13150
},
{
"epoch": 190.72,
"learning_rate": 5.0625e-06,
"loss": 0.496,
"step": 13160
},
{
"epoch": 190.87,
"learning_rate": 4.984375e-06,
"loss": 0.6167,
"step": 13170
},
{
"epoch": 191.01,
"learning_rate": 4.906250000000001e-06,
"loss": 0.4898,
"step": 13180
},
{
"epoch": 191.16,
"learning_rate": 4.828125e-06,
"loss": 0.5216,
"step": 13190
},
{
"epoch": 191.3,
"learning_rate": 4.75e-06,
"loss": 0.5122,
"step": 13200
},
{
"epoch": 191.45,
"learning_rate": 4.671875000000001e-06,
"loss": 0.5293,
"step": 13210
},
{
"epoch": 191.59,
"learning_rate": 4.59375e-06,
"loss": 0.5072,
"step": 13220
},
{
"epoch": 191.74,
"learning_rate": 4.5156250000000005e-06,
"loss": 0.4934,
"step": 13230
},
{
"epoch": 191.88,
"learning_rate": 4.4375e-06,
"loss": 0.5224,
"step": 13240
},
{
"epoch": 192.03,
"learning_rate": 4.359375e-06,
"loss": 0.4804,
"step": 13250
},
{
"epoch": 192.17,
"learning_rate": 4.281250000000001e-06,
"loss": 0.6039,
"step": 13260
},
{
"epoch": 192.32,
"learning_rate": 4.2031250000000005e-06,
"loss": 0.4833,
"step": 13270
},
{
"epoch": 192.46,
"learning_rate": 4.125e-06,
"loss": 0.5236,
"step": 13280
},
{
"epoch": 192.61,
"learning_rate": 4.046875e-06,
"loss": 0.5312,
"step": 13290
},
{
"epoch": 192.75,
"learning_rate": 3.96875e-06,
"loss": 0.5886,
"step": 13300
},
{
"epoch": 192.9,
"learning_rate": 3.8906250000000005e-06,
"loss": 0.5098,
"step": 13310
},
{
"epoch": 193.04,
"learning_rate": 3.8125e-06,
"loss": 0.5223,
"step": 13320
},
{
"epoch": 193.19,
"learning_rate": 3.7343750000000006e-06,
"loss": 0.581,
"step": 13330
},
{
"epoch": 193.33,
"learning_rate": 3.65625e-06,
"loss": 0.4837,
"step": 13340
},
{
"epoch": 193.48,
"learning_rate": 3.578125e-06,
"loss": 0.5184,
"step": 13350
},
{
"epoch": 193.62,
"learning_rate": 3.5000000000000004e-06,
"loss": 0.5145,
"step": 13360
},
{
"epoch": 193.77,
"learning_rate": 3.421875e-06,
"loss": 0.4956,
"step": 13370
},
{
"epoch": 193.91,
"learning_rate": 3.3437500000000004e-06,
"loss": 0.5211,
"step": 13380
},
{
"epoch": 194.06,
"learning_rate": 3.2656249999999998e-06,
"loss": 0.5218,
"step": 13390
},
{
"epoch": 194.2,
"learning_rate": 3.1875000000000004e-06,
"loss": 0.5457,
"step": 13400
},
{
"epoch": 194.35,
"learning_rate": 3.109375e-06,
"loss": 0.4737,
"step": 13410
},
{
"epoch": 194.49,
"learning_rate": 3.03125e-06,
"loss": 0.5828,
"step": 13420
},
{
"epoch": 194.64,
"learning_rate": 2.9531249999999998e-06,
"loss": 0.5597,
"step": 13430
},
{
"epoch": 194.78,
"learning_rate": 2.8750000000000004e-06,
"loss": 0.5,
"step": 13440
},
{
"epoch": 194.93,
"learning_rate": 2.7968750000000002e-06,
"loss": 0.5694,
"step": 13450
},
{
"epoch": 195.07,
"learning_rate": 2.71875e-06,
"loss": 0.5176,
"step": 13460
},
{
"epoch": 195.22,
"learning_rate": 2.6406250000000002e-06,
"loss": 0.5216,
"step": 13470
},
{
"epoch": 195.36,
"learning_rate": 2.5625e-06,
"loss": 0.489,
"step": 13480
},
{
"epoch": 195.51,
"learning_rate": 2.4843750000000002e-06,
"loss": 0.5186,
"step": 13490
},
{
"epoch": 195.65,
"learning_rate": 2.40625e-06,
"loss": 0.5022,
"step": 13500
},
{
"epoch": 195.65,
"eval_loss": 0.23950409889221191,
"eval_runtime": 592.332,
"eval_samples_per_second": 5.73,
"eval_steps_per_second": 0.718,
"eval_wer": 0.15194471151870753,
"step": 13500
},
{
"epoch": 195.8,
"learning_rate": 2.3281250000000003e-06,
"loss": 0.5106,
"step": 13510
},
{
"epoch": 195.94,
"learning_rate": 2.25e-06,
"loss": 0.4966,
"step": 13520
},
{
"epoch": 196.09,
"learning_rate": 2.171875e-06,
"loss": 0.5053,
"step": 13530
},
{
"epoch": 196.23,
"learning_rate": 2.09375e-06,
"loss": 0.5594,
"step": 13540
},
{
"epoch": 196.38,
"learning_rate": 2.0156250000000003e-06,
"loss": 0.5047,
"step": 13550
},
{
"epoch": 196.52,
"learning_rate": 1.9375e-06,
"loss": 0.5078,
"step": 13560
},
{
"epoch": 196.67,
"learning_rate": 1.859375e-06,
"loss": 0.5466,
"step": 13570
},
{
"epoch": 196.81,
"learning_rate": 1.7812499999999999e-06,
"loss": 0.5371,
"step": 13580
},
{
"epoch": 196.96,
"learning_rate": 1.703125e-06,
"loss": 0.5551,
"step": 13590
},
{
"epoch": 197.1,
"learning_rate": 1.6250000000000001e-06,
"loss": 0.5615,
"step": 13600
},
{
"epoch": 197.25,
"learning_rate": 1.5468750000000001e-06,
"loss": 0.5464,
"step": 13610
},
{
"epoch": 197.39,
"learning_rate": 1.46875e-06,
"loss": 0.5196,
"step": 13620
},
{
"epoch": 197.54,
"learning_rate": 1.3906250000000001e-06,
"loss": 0.4493,
"step": 13630
},
{
"epoch": 197.68,
"learning_rate": 1.3125e-06,
"loss": 0.5603,
"step": 13640
},
{
"epoch": 197.83,
"learning_rate": 1.2343750000000001e-06,
"loss": 0.5227,
"step": 13650
},
{
"epoch": 197.97,
"learning_rate": 1.15625e-06,
"loss": 0.4889,
"step": 13660
},
{
"epoch": 198.12,
"learning_rate": 1.078125e-06,
"loss": 0.5893,
"step": 13670
},
{
"epoch": 198.26,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.5509,
"step": 13680
},
{
"epoch": 198.41,
"learning_rate": 9.21875e-07,
"loss": 0.513,
"step": 13690
},
{
"epoch": 198.55,
"learning_rate": 8.437500000000001e-07,
"loss": 0.5744,
"step": 13700
},
{
"epoch": 198.7,
"learning_rate": 7.65625e-07,
"loss": 0.5457,
"step": 13710
},
{
"epoch": 198.84,
"learning_rate": 6.875000000000001e-07,
"loss": 0.5007,
"step": 13720
},
{
"epoch": 198.99,
"learning_rate": 6.093750000000001e-07,
"loss": 0.5829,
"step": 13730
},
{
"epoch": 199.13,
"learning_rate": 5.312500000000001e-07,
"loss": 0.5047,
"step": 13740
},
{
"epoch": 199.28,
"learning_rate": 4.53125e-07,
"loss": 0.5173,
"step": 13750
},
{
"epoch": 199.42,
"learning_rate": 3.75e-07,
"loss": 0.533,
"step": 13760
},
{
"epoch": 199.57,
"learning_rate": 2.96875e-07,
"loss": 0.5328,
"step": 13770
},
{
"epoch": 199.71,
"learning_rate": 2.1875000000000002e-07,
"loss": 0.5111,
"step": 13780
},
{
"epoch": 199.86,
"learning_rate": 1.40625e-07,
"loss": 0.5071,
"step": 13790
},
{
"epoch": 200.0,
"learning_rate": 6.250000000000001e-08,
"loss": 0.5151,
"step": 13800
},
{
"epoch": 200.0,
"step": 13800,
"total_flos": 2.0051176394243018e+20,
"train_loss": 0.32463424516760786,
"train_runtime": 60486.5281,
"train_samples_per_second": 7.255,
"train_steps_per_second": 0.228
}
],
"max_steps": 13800,
"num_train_epochs": 200,
"total_flos": 2.0051176394243018e+20,
"trial_name": null,
"trial_params": null
}