patrickvonplaten's picture
End of training
4b76093
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 200.0,
"global_step": 13800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"learning_rate": 7.000000000000001e-07,
"loss": 11.2514,
"step": 10
},
{
"epoch": 0.29,
"learning_rate": 1.7000000000000002e-06,
"loss": 12.1614,
"step": 20
},
{
"epoch": 0.43,
"learning_rate": 2.7e-06,
"loss": 12.0394,
"step": 30
},
{
"epoch": 0.58,
"learning_rate": 3.7e-06,
"loss": 11.8399,
"step": 40
},
{
"epoch": 0.72,
"learning_rate": 4.7e-06,
"loss": 12.1568,
"step": 50
},
{
"epoch": 0.87,
"learning_rate": 5.7000000000000005e-06,
"loss": 11.1578,
"step": 60
},
{
"epoch": 1.01,
"learning_rate": 6.700000000000001e-06,
"loss": 12.6691,
"step": 70
},
{
"epoch": 1.16,
"learning_rate": 7.7e-06,
"loss": 10.8955,
"step": 80
},
{
"epoch": 1.3,
"learning_rate": 8.7e-06,
"loss": 12.1493,
"step": 90
},
{
"epoch": 1.45,
"learning_rate": 9.7e-06,
"loss": 11.5678,
"step": 100
},
{
"epoch": 1.59,
"learning_rate": 1.0700000000000001e-05,
"loss": 10.1412,
"step": 110
},
{
"epoch": 1.74,
"learning_rate": 1.1700000000000001e-05,
"loss": 10.6275,
"step": 120
},
{
"epoch": 1.88,
"learning_rate": 1.27e-05,
"loss": 9.1186,
"step": 130
},
{
"epoch": 2.03,
"learning_rate": 1.3700000000000001e-05,
"loss": 7.4903,
"step": 140
},
{
"epoch": 2.17,
"learning_rate": 1.47e-05,
"loss": 6.6567,
"step": 150
},
{
"epoch": 2.32,
"learning_rate": 1.5700000000000002e-05,
"loss": 4.9907,
"step": 160
},
{
"epoch": 2.46,
"learning_rate": 1.6700000000000003e-05,
"loss": 5.5332,
"step": 170
},
{
"epoch": 2.61,
"learning_rate": 1.77e-05,
"loss": 4.5061,
"step": 180
},
{
"epoch": 2.75,
"learning_rate": 1.87e-05,
"loss": 4.2245,
"step": 190
},
{
"epoch": 2.9,
"learning_rate": 1.97e-05,
"loss": 4.057,
"step": 200
},
{
"epoch": 3.04,
"learning_rate": 2.07e-05,
"loss": 3.588,
"step": 210
},
{
"epoch": 3.19,
"learning_rate": 2.1700000000000002e-05,
"loss": 3.5628,
"step": 220
},
{
"epoch": 3.33,
"learning_rate": 2.2700000000000003e-05,
"loss": 3.4737,
"step": 230
},
{
"epoch": 3.48,
"learning_rate": 2.37e-05,
"loss": 3.3541,
"step": 240
},
{
"epoch": 3.62,
"learning_rate": 2.47e-05,
"loss": 3.2015,
"step": 250
},
{
"epoch": 3.77,
"learning_rate": 2.57e-05,
"loss": 3.2003,
"step": 260
},
{
"epoch": 3.91,
"learning_rate": 2.6700000000000002e-05,
"loss": 3.4131,
"step": 270
},
{
"epoch": 4.06,
"learning_rate": 2.7700000000000002e-05,
"loss": 3.0602,
"step": 280
},
{
"epoch": 4.2,
"learning_rate": 2.87e-05,
"loss": 3.27,
"step": 290
},
{
"epoch": 4.35,
"learning_rate": 2.97e-05,
"loss": 3.0795,
"step": 300
},
{
"epoch": 4.49,
"learning_rate": 3.06e-05,
"loss": 3.0245,
"step": 310
},
{
"epoch": 4.64,
"learning_rate": 3.16e-05,
"loss": 2.9565,
"step": 320
},
{
"epoch": 4.78,
"learning_rate": 3.26e-05,
"loss": 2.9714,
"step": 330
},
{
"epoch": 4.93,
"learning_rate": 3.3600000000000004e-05,
"loss": 3.06,
"step": 340
},
{
"epoch": 5.07,
"learning_rate": 3.46e-05,
"loss": 2.9985,
"step": 350
},
{
"epoch": 5.22,
"learning_rate": 3.56e-05,
"loss": 3.0004,
"step": 360
},
{
"epoch": 5.36,
"learning_rate": 3.66e-05,
"loss": 2.9545,
"step": 370
},
{
"epoch": 5.51,
"learning_rate": 3.76e-05,
"loss": 2.9668,
"step": 380
},
{
"epoch": 5.65,
"learning_rate": 3.86e-05,
"loss": 2.995,
"step": 390
},
{
"epoch": 5.8,
"learning_rate": 3.960000000000001e-05,
"loss": 3.0165,
"step": 400
},
{
"epoch": 5.94,
"learning_rate": 4.0600000000000004e-05,
"loss": 2.9506,
"step": 410
},
{
"epoch": 6.09,
"learning_rate": 4.16e-05,
"loss": 2.9652,
"step": 420
},
{
"epoch": 6.23,
"learning_rate": 4.26e-05,
"loss": 3.011,
"step": 430
},
{
"epoch": 6.38,
"learning_rate": 4.36e-05,
"loss": 2.9217,
"step": 440
},
{
"epoch": 6.52,
"learning_rate": 4.46e-05,
"loss": 3.0549,
"step": 450
},
{
"epoch": 6.67,
"learning_rate": 4.5600000000000004e-05,
"loss": 2.9966,
"step": 460
},
{
"epoch": 6.81,
"learning_rate": 4.660000000000001e-05,
"loss": 2.9123,
"step": 470
},
{
"epoch": 6.96,
"learning_rate": 4.76e-05,
"loss": 2.9838,
"step": 480
},
{
"epoch": 7.1,
"learning_rate": 4.86e-05,
"loss": 2.9364,
"step": 490
},
{
"epoch": 7.25,
"learning_rate": 4.96e-05,
"loss": 2.9888,
"step": 500
},
{
"epoch": 7.25,
"eval_loss": 2.9192206859588623,
"eval_runtime": 601.3365,
"eval_samples_per_second": 5.644,
"eval_steps_per_second": 0.707,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 7.39,
"learning_rate": 5.0600000000000003e-05,
"loss": 2.8978,
"step": 510
},
{
"epoch": 7.54,
"learning_rate": 5.16e-05,
"loss": 2.914,
"step": 520
},
{
"epoch": 7.68,
"learning_rate": 5.2600000000000005e-05,
"loss": 3.0331,
"step": 530
},
{
"epoch": 7.83,
"learning_rate": 5.360000000000001e-05,
"loss": 2.9362,
"step": 540
},
{
"epoch": 7.97,
"learning_rate": 5.4600000000000006e-05,
"loss": 2.9437,
"step": 550
},
{
"epoch": 8.12,
"learning_rate": 5.560000000000001e-05,
"loss": 2.9124,
"step": 560
},
{
"epoch": 8.26,
"learning_rate": 5.66e-05,
"loss": 2.9353,
"step": 570
},
{
"epoch": 8.41,
"learning_rate": 5.76e-05,
"loss": 2.9313,
"step": 580
},
{
"epoch": 8.55,
"learning_rate": 5.86e-05,
"loss": 2.954,
"step": 590
},
{
"epoch": 8.7,
"learning_rate": 5.96e-05,
"loss": 2.9495,
"step": 600
},
{
"epoch": 8.84,
"learning_rate": 6.06e-05,
"loss": 2.9209,
"step": 610
},
{
"epoch": 8.99,
"learning_rate": 6.16e-05,
"loss": 2.9983,
"step": 620
},
{
"epoch": 9.13,
"learning_rate": 6.26e-05,
"loss": 2.8978,
"step": 630
},
{
"epoch": 9.28,
"learning_rate": 6.35e-05,
"loss": 2.921,
"step": 640
},
{
"epoch": 9.42,
"learning_rate": 6.450000000000001e-05,
"loss": 2.9351,
"step": 650
},
{
"epoch": 9.57,
"learning_rate": 6.55e-05,
"loss": 2.9152,
"step": 660
},
{
"epoch": 9.71,
"learning_rate": 6.65e-05,
"loss": 2.984,
"step": 670
},
{
"epoch": 9.86,
"learning_rate": 6.750000000000001e-05,
"loss": 2.907,
"step": 680
},
{
"epoch": 10.0,
"learning_rate": 6.850000000000001e-05,
"loss": 2.9913,
"step": 690
},
{
"epoch": 10.14,
"learning_rate": 6.95e-05,
"loss": 2.9054,
"step": 700
},
{
"epoch": 10.29,
"learning_rate": 7.05e-05,
"loss": 2.9958,
"step": 710
},
{
"epoch": 10.43,
"learning_rate": 7.15e-05,
"loss": 2.91,
"step": 720
},
{
"epoch": 10.58,
"learning_rate": 7.25e-05,
"loss": 2.904,
"step": 730
},
{
"epoch": 10.72,
"learning_rate": 7.35e-05,
"loss": 2.9181,
"step": 740
},
{
"epoch": 10.87,
"learning_rate": 7.450000000000001e-05,
"loss": 2.8968,
"step": 750
},
{
"epoch": 11.01,
"learning_rate": 7.55e-05,
"loss": 2.9142,
"step": 760
},
{
"epoch": 11.16,
"learning_rate": 7.65e-05,
"loss": 2.9257,
"step": 770
},
{
"epoch": 11.3,
"learning_rate": 7.75e-05,
"loss": 2.958,
"step": 780
},
{
"epoch": 11.45,
"learning_rate": 7.850000000000001e-05,
"loss": 2.9208,
"step": 790
},
{
"epoch": 11.59,
"learning_rate": 7.950000000000001e-05,
"loss": 2.9139,
"step": 800
},
{
"epoch": 11.74,
"learning_rate": 8.05e-05,
"loss": 2.9225,
"step": 810
},
{
"epoch": 11.88,
"learning_rate": 8.15e-05,
"loss": 2.8919,
"step": 820
},
{
"epoch": 12.03,
"learning_rate": 8.25e-05,
"loss": 2.9232,
"step": 830
},
{
"epoch": 12.17,
"learning_rate": 8.35e-05,
"loss": 2.92,
"step": 840
},
{
"epoch": 12.32,
"learning_rate": 8.450000000000001e-05,
"loss": 2.8916,
"step": 850
},
{
"epoch": 12.46,
"learning_rate": 8.55e-05,
"loss": 2.9289,
"step": 860
},
{
"epoch": 12.61,
"learning_rate": 8.65e-05,
"loss": 2.8714,
"step": 870
},
{
"epoch": 12.75,
"learning_rate": 8.75e-05,
"loss": 2.9255,
"step": 880
},
{
"epoch": 12.9,
"learning_rate": 8.850000000000001e-05,
"loss": 2.9071,
"step": 890
},
{
"epoch": 13.04,
"learning_rate": 8.950000000000001e-05,
"loss": 2.8934,
"step": 900
},
{
"epoch": 13.19,
"learning_rate": 9.05e-05,
"loss": 2.9482,
"step": 910
},
{
"epoch": 13.33,
"learning_rate": 9.15e-05,
"loss": 2.8965,
"step": 920
},
{
"epoch": 13.48,
"learning_rate": 9.250000000000001e-05,
"loss": 2.8906,
"step": 930
},
{
"epoch": 13.62,
"learning_rate": 9.350000000000001e-05,
"loss": 2.8707,
"step": 940
},
{
"epoch": 13.77,
"learning_rate": 9.449999999999999e-05,
"loss": 2.935,
"step": 950
},
{
"epoch": 13.91,
"learning_rate": 9.55e-05,
"loss": 2.9169,
"step": 960
},
{
"epoch": 14.06,
"learning_rate": 9.65e-05,
"loss": 2.8785,
"step": 970
},
{
"epoch": 14.2,
"learning_rate": 9.75e-05,
"loss": 2.914,
"step": 980
},
{
"epoch": 14.35,
"learning_rate": 9.850000000000001e-05,
"loss": 2.9282,
"step": 990
},
{
"epoch": 14.49,
"learning_rate": 9.95e-05,
"loss": 2.9313,
"step": 1000
},
{
"epoch": 14.49,
"eval_loss": 2.869788646697998,
"eval_runtime": 583.4175,
"eval_samples_per_second": 5.817,
"eval_steps_per_second": 0.728,
"eval_wer": 1.0,
"step": 1000
},
{
"epoch": 14.64,
"learning_rate": 9.991525423728813e-05,
"loss": 2.8698,
"step": 1010
},
{
"epoch": 14.78,
"learning_rate": 9.974576271186441e-05,
"loss": 2.9056,
"step": 1020
},
{
"epoch": 14.93,
"learning_rate": 9.957627118644068e-05,
"loss": 2.8838,
"step": 1030
},
{
"epoch": 15.07,
"learning_rate": 9.940677966101696e-05,
"loss": 2.8688,
"step": 1040
},
{
"epoch": 15.22,
"learning_rate": 9.923728813559322e-05,
"loss": 2.9192,
"step": 1050
},
{
"epoch": 15.36,
"learning_rate": 9.906779661016949e-05,
"loss": 2.8884,
"step": 1060
},
{
"epoch": 15.51,
"learning_rate": 9.889830508474577e-05,
"loss": 2.9437,
"step": 1070
},
{
"epoch": 15.65,
"learning_rate": 9.872881355932204e-05,
"loss": 2.8941,
"step": 1080
},
{
"epoch": 15.8,
"learning_rate": 9.85593220338983e-05,
"loss": 2.8788,
"step": 1090
},
{
"epoch": 15.94,
"learning_rate": 9.838983050847458e-05,
"loss": 2.8942,
"step": 1100
},
{
"epoch": 16.09,
"learning_rate": 9.822033898305085e-05,
"loss": 2.9077,
"step": 1110
},
{
"epoch": 16.23,
"learning_rate": 9.805084745762713e-05,
"loss": 2.9044,
"step": 1120
},
{
"epoch": 16.38,
"learning_rate": 9.78813559322034e-05,
"loss": 2.8415,
"step": 1130
},
{
"epoch": 16.52,
"learning_rate": 9.771186440677966e-05,
"loss": 2.8204,
"step": 1140
},
{
"epoch": 16.67,
"learning_rate": 9.754237288135594e-05,
"loss": 2.7944,
"step": 1150
},
{
"epoch": 16.81,
"learning_rate": 9.737288135593221e-05,
"loss": 2.7636,
"step": 1160
},
{
"epoch": 16.96,
"learning_rate": 9.720338983050849e-05,
"loss": 2.7104,
"step": 1170
},
{
"epoch": 17.1,
"learning_rate": 9.703389830508474e-05,
"loss": 2.529,
"step": 1180
},
{
"epoch": 17.25,
"learning_rate": 9.686440677966102e-05,
"loss": 2.4256,
"step": 1190
},
{
"epoch": 17.39,
"learning_rate": 9.66949152542373e-05,
"loss": 2.3963,
"step": 1200
},
{
"epoch": 17.54,
"learning_rate": 9.652542372881357e-05,
"loss": 2.1997,
"step": 1210
},
{
"epoch": 17.68,
"learning_rate": 9.635593220338983e-05,
"loss": 2.1107,
"step": 1220
},
{
"epoch": 17.83,
"learning_rate": 9.61864406779661e-05,
"loss": 1.9865,
"step": 1230
},
{
"epoch": 17.97,
"learning_rate": 9.601694915254238e-05,
"loss": 1.7979,
"step": 1240
},
{
"epoch": 18.12,
"learning_rate": 9.584745762711866e-05,
"loss": 1.6753,
"step": 1250
},
{
"epoch": 18.26,
"learning_rate": 9.567796610169491e-05,
"loss": 1.6191,
"step": 1260
},
{
"epoch": 18.41,
"learning_rate": 9.550847457627119e-05,
"loss": 1.6077,
"step": 1270
},
{
"epoch": 18.55,
"learning_rate": 9.533898305084746e-05,
"loss": 1.5907,
"step": 1280
},
{
"epoch": 18.7,
"learning_rate": 9.516949152542374e-05,
"loss": 1.4164,
"step": 1290
},
{
"epoch": 18.84,
"learning_rate": 9.5e-05,
"loss": 1.596,
"step": 1300
},
{
"epoch": 18.99,
"learning_rate": 9.483050847457627e-05,
"loss": 1.3845,
"step": 1310
},
{
"epoch": 19.13,
"learning_rate": 9.466101694915255e-05,
"loss": 1.4502,
"step": 1320
},
{
"epoch": 19.28,
"learning_rate": 9.449152542372882e-05,
"loss": 1.3023,
"step": 1330
},
{
"epoch": 19.42,
"learning_rate": 9.432203389830508e-05,
"loss": 1.2154,
"step": 1340
},
{
"epoch": 19.57,
"learning_rate": 9.415254237288136e-05,
"loss": 1.1851,
"step": 1350
},
{
"epoch": 19.71,
"learning_rate": 9.398305084745763e-05,
"loss": 1.1953,
"step": 1360
},
{
"epoch": 19.86,
"learning_rate": 9.381355932203391e-05,
"loss": 1.1922,
"step": 1370
},
{
"epoch": 20.0,
"learning_rate": 9.364406779661016e-05,
"loss": 1.2421,
"step": 1380
},
{
"epoch": 20.14,
"learning_rate": 9.347457627118644e-05,
"loss": 1.3635,
"step": 1390
},
{
"epoch": 20.29,
"learning_rate": 9.330508474576271e-05,
"loss": 1.1689,
"step": 1400
},
{
"epoch": 20.43,
"learning_rate": 9.313559322033899e-05,
"loss": 1.2484,
"step": 1410
},
{
"epoch": 20.58,
"learning_rate": 9.296610169491527e-05,
"loss": 1.1633,
"step": 1420
},
{
"epoch": 20.72,
"learning_rate": 9.279661016949152e-05,
"loss": 1.2084,
"step": 1430
},
{
"epoch": 20.87,
"learning_rate": 9.26271186440678e-05,
"loss": 1.1117,
"step": 1440
},
{
"epoch": 21.01,
"learning_rate": 9.245762711864407e-05,
"loss": 1.0455,
"step": 1450
},
{
"epoch": 21.16,
"learning_rate": 9.228813559322035e-05,
"loss": 1.0456,
"step": 1460
},
{
"epoch": 21.3,
"learning_rate": 9.211864406779661e-05,
"loss": 1.1641,
"step": 1470
},
{
"epoch": 21.45,
"learning_rate": 9.194915254237288e-05,
"loss": 1.0116,
"step": 1480
},
{
"epoch": 21.59,
"learning_rate": 9.177966101694916e-05,
"loss": 1.0744,
"step": 1490
},
{
"epoch": 21.74,
"learning_rate": 9.161016949152543e-05,
"loss": 1.068,
"step": 1500
},
{
"epoch": 21.74,
"eval_loss": 0.2646801173686981,
"eval_runtime": 584.2239,
"eval_samples_per_second": 5.809,
"eval_steps_per_second": 0.727,
"eval_wer": 0.2564981222624888,
"step": 1500
},
{
"epoch": 21.88,
"learning_rate": 9.14406779661017e-05,
"loss": 0.949,
"step": 1510
},
{
"epoch": 22.03,
"learning_rate": 9.127118644067797e-05,
"loss": 1.0756,
"step": 1520
},
{
"epoch": 22.17,
"learning_rate": 9.110169491525424e-05,
"loss": 1.0889,
"step": 1530
},
{
"epoch": 22.32,
"learning_rate": 9.093220338983052e-05,
"loss": 1.0893,
"step": 1540
},
{
"epoch": 22.46,
"learning_rate": 9.076271186440677e-05,
"loss": 0.9964,
"step": 1550
},
{
"epoch": 22.61,
"learning_rate": 9.059322033898305e-05,
"loss": 0.9238,
"step": 1560
},
{
"epoch": 22.75,
"learning_rate": 9.042372881355933e-05,
"loss": 0.9582,
"step": 1570
},
{
"epoch": 22.9,
"learning_rate": 9.02542372881356e-05,
"loss": 0.9654,
"step": 1580
},
{
"epoch": 23.04,
"learning_rate": 9.008474576271187e-05,
"loss": 0.8715,
"step": 1590
},
{
"epoch": 23.19,
"learning_rate": 8.991525423728813e-05,
"loss": 0.9353,
"step": 1600
},
{
"epoch": 23.33,
"learning_rate": 8.974576271186441e-05,
"loss": 0.9148,
"step": 1610
},
{
"epoch": 23.48,
"learning_rate": 8.957627118644069e-05,
"loss": 0.9696,
"step": 1620
},
{
"epoch": 23.62,
"learning_rate": 8.940677966101694e-05,
"loss": 0.9165,
"step": 1630
},
{
"epoch": 23.77,
"learning_rate": 8.923728813559322e-05,
"loss": 0.9927,
"step": 1640
},
{
"epoch": 23.91,
"learning_rate": 8.906779661016949e-05,
"loss": 0.9624,
"step": 1650
},
{
"epoch": 24.06,
"learning_rate": 8.889830508474577e-05,
"loss": 0.9503,
"step": 1660
},
{
"epoch": 24.2,
"learning_rate": 8.872881355932204e-05,
"loss": 0.9891,
"step": 1670
},
{
"epoch": 24.35,
"learning_rate": 8.85593220338983e-05,
"loss": 0.9596,
"step": 1680
},
{
"epoch": 24.49,
"learning_rate": 8.838983050847458e-05,
"loss": 0.9499,
"step": 1690
},
{
"epoch": 24.64,
"learning_rate": 8.822033898305085e-05,
"loss": 0.8957,
"step": 1700
},
{
"epoch": 24.78,
"learning_rate": 8.805084745762713e-05,
"loss": 0.862,
"step": 1710
},
{
"epoch": 24.93,
"learning_rate": 8.78813559322034e-05,
"loss": 0.9045,
"step": 1720
},
{
"epoch": 25.07,
"learning_rate": 8.771186440677966e-05,
"loss": 1.0047,
"step": 1730
},
{
"epoch": 25.22,
"learning_rate": 8.754237288135594e-05,
"loss": 1.0456,
"step": 1740
},
{
"epoch": 25.36,
"learning_rate": 8.737288135593221e-05,
"loss": 1.2944,
"step": 1750
},
{
"epoch": 25.51,
"learning_rate": 8.720338983050848e-05,
"loss": 0.9649,
"step": 1760
},
{
"epoch": 25.65,
"learning_rate": 8.703389830508476e-05,
"loss": 0.903,
"step": 1770
},
{
"epoch": 25.8,
"learning_rate": 8.686440677966102e-05,
"loss": 0.8972,
"step": 1780
},
{
"epoch": 25.94,
"learning_rate": 8.66949152542373e-05,
"loss": 0.9095,
"step": 1790
},
{
"epoch": 26.09,
"learning_rate": 8.652542372881355e-05,
"loss": 0.8686,
"step": 1800
},
{
"epoch": 26.23,
"learning_rate": 8.635593220338983e-05,
"loss": 0.7924,
"step": 1810
},
{
"epoch": 26.38,
"learning_rate": 8.61864406779661e-05,
"loss": 0.8395,
"step": 1820
},
{
"epoch": 26.52,
"learning_rate": 8.601694915254238e-05,
"loss": 0.8397,
"step": 1830
},
{
"epoch": 26.67,
"learning_rate": 8.584745762711865e-05,
"loss": 0.9085,
"step": 1840
},
{
"epoch": 26.81,
"learning_rate": 8.567796610169491e-05,
"loss": 0.9359,
"step": 1850
},
{
"epoch": 26.96,
"learning_rate": 8.55084745762712e-05,
"loss": 0.9281,
"step": 1860
},
{
"epoch": 27.1,
"learning_rate": 8.533898305084746e-05,
"loss": 0.8757,
"step": 1870
},
{
"epoch": 27.25,
"learning_rate": 8.516949152542373e-05,
"loss": 0.8233,
"step": 1880
},
{
"epoch": 27.39,
"learning_rate": 8.5e-05,
"loss": 0.9204,
"step": 1890
},
{
"epoch": 27.54,
"learning_rate": 8.483050847457627e-05,
"loss": 0.8401,
"step": 1900
},
{
"epoch": 27.68,
"learning_rate": 8.466101694915255e-05,
"loss": 0.8236,
"step": 1910
},
{
"epoch": 27.83,
"learning_rate": 8.449152542372882e-05,
"loss": 0.8152,
"step": 1920
},
{
"epoch": 27.97,
"learning_rate": 8.432203389830509e-05,
"loss": 0.8678,
"step": 1930
},
{
"epoch": 28.12,
"learning_rate": 8.415254237288137e-05,
"loss": 0.8236,
"step": 1940
},
{
"epoch": 28.26,
"learning_rate": 8.398305084745763e-05,
"loss": 0.8069,
"step": 1950
},
{
"epoch": 28.41,
"learning_rate": 8.381355932203391e-05,
"loss": 0.849,
"step": 1960
},
{
"epoch": 28.55,
"learning_rate": 8.364406779661016e-05,
"loss": 0.8725,
"step": 1970
},
{
"epoch": 28.7,
"learning_rate": 8.347457627118644e-05,
"loss": 1.1043,
"step": 1980
},
{
"epoch": 28.84,
"learning_rate": 8.330508474576272e-05,
"loss": 0.8797,
"step": 1990
},
{
"epoch": 28.99,
"learning_rate": 8.313559322033899e-05,
"loss": 0.8151,
"step": 2000
},
{
"epoch": 28.99,
"eval_loss": 0.2066880613565445,
"eval_runtime": 586.2698,
"eval_samples_per_second": 5.789,
"eval_steps_per_second": 0.725,
"eval_wer": 0.17191364872749385,
"step": 2000
},
{
"epoch": 29.13,
"learning_rate": 8.296610169491526e-05,
"loss": 0.7783,
"step": 2010
},
{
"epoch": 29.28,
"learning_rate": 8.279661016949152e-05,
"loss": 0.9059,
"step": 2020
},
{
"epoch": 29.42,
"learning_rate": 8.26271186440678e-05,
"loss": 0.7881,
"step": 2030
},
{
"epoch": 29.57,
"learning_rate": 8.245762711864407e-05,
"loss": 0.8869,
"step": 2040
},
{
"epoch": 29.71,
"learning_rate": 8.228813559322034e-05,
"loss": 0.93,
"step": 2050
},
{
"epoch": 29.86,
"learning_rate": 8.211864406779662e-05,
"loss": 0.83,
"step": 2060
},
{
"epoch": 30.0,
"learning_rate": 8.194915254237288e-05,
"loss": 0.7013,
"step": 2070
},
{
"epoch": 30.14,
"learning_rate": 8.177966101694916e-05,
"loss": 0.7469,
"step": 2080
},
{
"epoch": 30.29,
"learning_rate": 8.161016949152543e-05,
"loss": 0.8841,
"step": 2090
},
{
"epoch": 30.43,
"learning_rate": 8.14406779661017e-05,
"loss": 0.7999,
"step": 2100
},
{
"epoch": 30.58,
"learning_rate": 8.127118644067797e-05,
"loss": 0.875,
"step": 2110
},
{
"epoch": 30.72,
"learning_rate": 8.110169491525424e-05,
"loss": 0.7745,
"step": 2120
},
{
"epoch": 30.87,
"learning_rate": 8.093220338983051e-05,
"loss": 0.8997,
"step": 2130
},
{
"epoch": 31.01,
"learning_rate": 8.076271186440679e-05,
"loss": 0.8064,
"step": 2140
},
{
"epoch": 31.16,
"learning_rate": 8.059322033898305e-05,
"loss": 0.8057,
"step": 2150
},
{
"epoch": 31.3,
"learning_rate": 8.042372881355933e-05,
"loss": 0.7763,
"step": 2160
},
{
"epoch": 31.45,
"learning_rate": 8.025423728813559e-05,
"loss": 0.7625,
"step": 2170
},
{
"epoch": 31.59,
"learning_rate": 8.008474576271187e-05,
"loss": 0.7622,
"step": 2180
},
{
"epoch": 31.74,
"learning_rate": 7.991525423728813e-05,
"loss": 0.8415,
"step": 2190
},
{
"epoch": 31.88,
"learning_rate": 7.974576271186441e-05,
"loss": 0.7549,
"step": 2200
},
{
"epoch": 32.03,
"learning_rate": 7.957627118644068e-05,
"loss": 0.8678,
"step": 2210
},
{
"epoch": 32.17,
"learning_rate": 7.940677966101695e-05,
"loss": 0.7861,
"step": 2220
},
{
"epoch": 32.32,
"learning_rate": 7.923728813559323e-05,
"loss": 0.7887,
"step": 2230
},
{
"epoch": 32.46,
"learning_rate": 7.906779661016949e-05,
"loss": 0.6937,
"step": 2240
},
{
"epoch": 32.61,
"learning_rate": 7.889830508474577e-05,
"loss": 0.746,
"step": 2250
},
{
"epoch": 32.75,
"learning_rate": 7.872881355932204e-05,
"loss": 0.7307,
"step": 2260
},
{
"epoch": 32.9,
"learning_rate": 7.85593220338983e-05,
"loss": 0.758,
"step": 2270
},
{
"epoch": 33.04,
"learning_rate": 7.838983050847458e-05,
"loss": 0.7016,
"step": 2280
},
{
"epoch": 33.19,
"learning_rate": 7.822033898305085e-05,
"loss": 0.7936,
"step": 2290
},
{
"epoch": 33.33,
"learning_rate": 7.805084745762712e-05,
"loss": 0.7138,
"step": 2300
},
{
"epoch": 33.48,
"learning_rate": 7.78813559322034e-05,
"loss": 0.8223,
"step": 2310
},
{
"epoch": 33.62,
"learning_rate": 7.771186440677966e-05,
"loss": 0.8025,
"step": 2320
},
{
"epoch": 33.77,
"learning_rate": 7.754237288135594e-05,
"loss": 0.9172,
"step": 2330
},
{
"epoch": 33.91,
"learning_rate": 7.73728813559322e-05,
"loss": 0.914,
"step": 2340
},
{
"epoch": 34.06,
"learning_rate": 7.720338983050848e-05,
"loss": 0.7792,
"step": 2350
},
{
"epoch": 34.2,
"learning_rate": 7.703389830508476e-05,
"loss": 0.7518,
"step": 2360
},
{
"epoch": 34.35,
"learning_rate": 7.686440677966102e-05,
"loss": 0.7291,
"step": 2370
},
{
"epoch": 34.49,
"learning_rate": 7.669491525423729e-05,
"loss": 0.7946,
"step": 2380
},
{
"epoch": 34.64,
"learning_rate": 7.652542372881356e-05,
"loss": 0.721,
"step": 2390
},
{
"epoch": 34.78,
"learning_rate": 7.635593220338984e-05,
"loss": 0.725,
"step": 2400
},
{
"epoch": 34.93,
"learning_rate": 7.618644067796612e-05,
"loss": 0.7771,
"step": 2410
},
{
"epoch": 35.07,
"learning_rate": 7.601694915254237e-05,
"loss": 0.7069,
"step": 2420
},
{
"epoch": 35.22,
"learning_rate": 7.584745762711865e-05,
"loss": 0.7194,
"step": 2430
},
{
"epoch": 35.36,
"learning_rate": 7.567796610169491e-05,
"loss": 0.7174,
"step": 2440
},
{
"epoch": 35.51,
"learning_rate": 7.55084745762712e-05,
"loss": 0.752,
"step": 2450
},
{
"epoch": 35.65,
"learning_rate": 7.533898305084746e-05,
"loss": 0.7291,
"step": 2460
},
{
"epoch": 35.8,
"learning_rate": 7.516949152542373e-05,
"loss": 0.8244,
"step": 2470
},
{
"epoch": 35.94,
"learning_rate": 7.500000000000001e-05,
"loss": 0.7068,
"step": 2480
},
{
"epoch": 36.09,
"learning_rate": 7.483050847457627e-05,
"loss": 0.8703,
"step": 2490
},
{
"epoch": 36.23,
"learning_rate": 7.466101694915255e-05,
"loss": 0.764,
"step": 2500
},
{
"epoch": 36.23,
"eval_loss": 0.1974831521511078,
"eval_runtime": 581.603,
"eval_samples_per_second": 5.836,
"eval_steps_per_second": 0.731,
"eval_wer": 0.1567931366023223,
"step": 2500
},
{
"epoch": 36.38,
"learning_rate": 7.449152542372882e-05,
"loss": 0.7373,
"step": 2510
},
{
"epoch": 36.52,
"learning_rate": 7.432203389830509e-05,
"loss": 0.7291,
"step": 2520
},
{
"epoch": 36.67,
"learning_rate": 7.415254237288137e-05,
"loss": 0.8097,
"step": 2530
},
{
"epoch": 36.81,
"learning_rate": 7.398305084745763e-05,
"loss": 0.738,
"step": 2540
},
{
"epoch": 36.96,
"learning_rate": 7.38135593220339e-05,
"loss": 0.7335,
"step": 2550
},
{
"epoch": 37.1,
"learning_rate": 7.364406779661018e-05,
"loss": 0.7293,
"step": 2560
},
{
"epoch": 37.25,
"learning_rate": 7.347457627118645e-05,
"loss": 0.7483,
"step": 2570
},
{
"epoch": 37.39,
"learning_rate": 7.330508474576272e-05,
"loss": 0.6923,
"step": 2580
},
{
"epoch": 37.54,
"learning_rate": 7.313559322033898e-05,
"loss": 0.663,
"step": 2590
},
{
"epoch": 37.68,
"learning_rate": 7.296610169491526e-05,
"loss": 0.7579,
"step": 2600
},
{
"epoch": 37.83,
"learning_rate": 7.279661016949152e-05,
"loss": 0.6966,
"step": 2610
},
{
"epoch": 37.97,
"learning_rate": 7.26271186440678e-05,
"loss": 0.7184,
"step": 2620
},
{
"epoch": 38.12,
"learning_rate": 7.245762711864407e-05,
"loss": 0.7068,
"step": 2630
},
{
"epoch": 38.26,
"learning_rate": 7.228813559322034e-05,
"loss": 0.6998,
"step": 2640
},
{
"epoch": 38.41,
"learning_rate": 7.211864406779662e-05,
"loss": 0.7044,
"step": 2650
},
{
"epoch": 38.55,
"learning_rate": 7.194915254237288e-05,
"loss": 0.6947,
"step": 2660
},
{
"epoch": 38.7,
"learning_rate": 7.177966101694915e-05,
"loss": 0.6872,
"step": 2670
},
{
"epoch": 38.84,
"learning_rate": 7.161016949152543e-05,
"loss": 0.7555,
"step": 2680
},
{
"epoch": 38.99,
"learning_rate": 7.14406779661017e-05,
"loss": 0.7157,
"step": 2690
},
{
"epoch": 39.13,
"learning_rate": 7.127118644067798e-05,
"loss": 0.751,
"step": 2700
},
{
"epoch": 39.28,
"learning_rate": 7.110169491525424e-05,
"loss": 0.7603,
"step": 2710
},
{
"epoch": 39.42,
"learning_rate": 7.093220338983051e-05,
"loss": 0.7847,
"step": 2720
},
{
"epoch": 39.57,
"learning_rate": 7.076271186440679e-05,
"loss": 0.7842,
"step": 2730
},
{
"epoch": 39.71,
"learning_rate": 7.059322033898305e-05,
"loss": 0.7265,
"step": 2740
},
{
"epoch": 39.86,
"learning_rate": 7.042372881355933e-05,
"loss": 0.7328,
"step": 2750
},
{
"epoch": 40.0,
"learning_rate": 7.025423728813559e-05,
"loss": 0.6276,
"step": 2760
},
{
"epoch": 40.14,
"learning_rate": 7.008474576271187e-05,
"loss": 0.6662,
"step": 2770
},
{
"epoch": 40.29,
"learning_rate": 6.991525423728815e-05,
"loss": 0.6845,
"step": 2780
},
{
"epoch": 40.43,
"learning_rate": 6.974576271186441e-05,
"loss": 0.76,
"step": 2790
},
{
"epoch": 40.58,
"learning_rate": 6.957627118644068e-05,
"loss": 0.7495,
"step": 2800
},
{
"epoch": 40.72,
"learning_rate": 6.940677966101695e-05,
"loss": 0.631,
"step": 2810
},
{
"epoch": 40.87,
"learning_rate": 6.923728813559323e-05,
"loss": 0.7702,
"step": 2820
},
{
"epoch": 41.01,
"learning_rate": 6.906779661016949e-05,
"loss": 0.7912,
"step": 2830
},
{
"epoch": 41.16,
"learning_rate": 6.889830508474576e-05,
"loss": 0.6516,
"step": 2840
},
{
"epoch": 41.3,
"learning_rate": 6.872881355932204e-05,
"loss": 0.8212,
"step": 2850
},
{
"epoch": 41.45,
"learning_rate": 6.85593220338983e-05,
"loss": 0.7247,
"step": 2860
},
{
"epoch": 41.59,
"learning_rate": 6.838983050847459e-05,
"loss": 0.7021,
"step": 2870
},
{
"epoch": 41.74,
"learning_rate": 6.822033898305085e-05,
"loss": 0.7079,
"step": 2880
},
{
"epoch": 41.88,
"learning_rate": 6.805084745762712e-05,
"loss": 0.7184,
"step": 2890
},
{
"epoch": 42.03,
"learning_rate": 6.78813559322034e-05,
"loss": 0.8249,
"step": 2900
},
{
"epoch": 42.17,
"learning_rate": 6.771186440677966e-05,
"loss": 0.6589,
"step": 2910
},
{
"epoch": 42.32,
"learning_rate": 6.754237288135593e-05,
"loss": 0.6485,
"step": 2920
},
{
"epoch": 42.46,
"learning_rate": 6.737288135593221e-05,
"loss": 0.7682,
"step": 2930
},
{
"epoch": 42.61,
"learning_rate": 6.720338983050848e-05,
"loss": 0.7569,
"step": 2940
},
{
"epoch": 42.75,
"learning_rate": 6.703389830508476e-05,
"loss": 0.6338,
"step": 2950
},
{
"epoch": 42.9,
"learning_rate": 6.686440677966101e-05,
"loss": 0.731,
"step": 2960
},
{
"epoch": 43.04,
"learning_rate": 6.669491525423729e-05,
"loss": 0.6278,
"step": 2970
},
{
"epoch": 43.19,
"learning_rate": 6.652542372881356e-05,
"loss": 0.6538,
"step": 2980
},
{
"epoch": 43.33,
"learning_rate": 6.635593220338984e-05,
"loss": 0.7682,
"step": 2990
},
{
"epoch": 43.48,
"learning_rate": 6.61864406779661e-05,
"loss": 0.7332,
"step": 3000
},
{
"epoch": 43.48,
"eval_loss": 0.18117694556713104,
"eval_runtime": 588.8092,
"eval_samples_per_second": 5.764,
"eval_steps_per_second": 0.722,
"eval_wer": 0.14625808413250171,
"step": 3000
},
{
"epoch": 43.62,
"learning_rate": 6.601694915254237e-05,
"loss": 0.6994,
"step": 3010
},
{
"epoch": 43.77,
"learning_rate": 6.584745762711865e-05,
"loss": 0.6738,
"step": 3020
},
{
"epoch": 43.91,
"learning_rate": 6.567796610169492e-05,
"loss": 0.6887,
"step": 3030
},
{
"epoch": 44.06,
"learning_rate": 6.55084745762712e-05,
"loss": 0.6816,
"step": 3040
},
{
"epoch": 44.2,
"learning_rate": 6.533898305084746e-05,
"loss": 0.7649,
"step": 3050
},
{
"epoch": 44.35,
"learning_rate": 6.516949152542373e-05,
"loss": 0.6712,
"step": 3060
},
{
"epoch": 44.49,
"learning_rate": 6.500000000000001e-05,
"loss": 0.6683,
"step": 3070
},
{
"epoch": 44.64,
"learning_rate": 6.483050847457627e-05,
"loss": 0.696,
"step": 3080
},
{
"epoch": 44.78,
"learning_rate": 6.466101694915254e-05,
"loss": 0.8165,
"step": 3090
},
{
"epoch": 44.93,
"learning_rate": 6.449152542372882e-05,
"loss": 0.7633,
"step": 3100
},
{
"epoch": 45.07,
"learning_rate": 6.432203389830509e-05,
"loss": 0.6813,
"step": 3110
},
{
"epoch": 45.22,
"learning_rate": 6.415254237288137e-05,
"loss": 0.6951,
"step": 3120
},
{
"epoch": 45.36,
"learning_rate": 6.398305084745762e-05,
"loss": 0.7011,
"step": 3130
},
{
"epoch": 45.51,
"learning_rate": 6.38135593220339e-05,
"loss": 0.6952,
"step": 3140
},
{
"epoch": 45.65,
"learning_rate": 6.364406779661018e-05,
"loss": 0.6875,
"step": 3150
},
{
"epoch": 45.8,
"learning_rate": 6.347457627118645e-05,
"loss": 0.689,
"step": 3160
},
{
"epoch": 45.94,
"learning_rate": 6.330508474576271e-05,
"loss": 0.7438,
"step": 3170
},
{
"epoch": 46.09,
"learning_rate": 6.313559322033898e-05,
"loss": 0.7675,
"step": 3180
},
{
"epoch": 46.23,
"learning_rate": 6.296610169491526e-05,
"loss": 0.6308,
"step": 3190
},
{
"epoch": 46.38,
"learning_rate": 6.279661016949154e-05,
"loss": 0.6733,
"step": 3200
},
{
"epoch": 46.52,
"learning_rate": 6.262711864406779e-05,
"loss": 0.7072,
"step": 3210
},
{
"epoch": 46.67,
"learning_rate": 6.245762711864407e-05,
"loss": 0.6429,
"step": 3220
},
{
"epoch": 46.81,
"learning_rate": 6.228813559322034e-05,
"loss": 0.7316,
"step": 3230
},
{
"epoch": 46.96,
"learning_rate": 6.211864406779662e-05,
"loss": 0.6912,
"step": 3240
},
{
"epoch": 47.1,
"learning_rate": 6.194915254237288e-05,
"loss": 0.7583,
"step": 3250
},
{
"epoch": 47.25,
"learning_rate": 6.177966101694915e-05,
"loss": 0.6429,
"step": 3260
},
{
"epoch": 47.39,
"learning_rate": 6.161016949152543e-05,
"loss": 0.7152,
"step": 3270
},
{
"epoch": 47.54,
"learning_rate": 6.14406779661017e-05,
"loss": 0.8123,
"step": 3280
},
{
"epoch": 47.68,
"learning_rate": 6.127118644067798e-05,
"loss": 0.737,
"step": 3290
},
{
"epoch": 47.83,
"learning_rate": 6.110169491525424e-05,
"loss": 0.6072,
"step": 3300
},
{
"epoch": 47.97,
"learning_rate": 6.093220338983051e-05,
"loss": 0.681,
"step": 3310
},
{
"epoch": 48.12,
"learning_rate": 6.076271186440678e-05,
"loss": 0.6064,
"step": 3320
},
{
"epoch": 48.26,
"learning_rate": 6.0593220338983056e-05,
"loss": 0.6178,
"step": 3330
},
{
"epoch": 48.41,
"learning_rate": 6.042372881355932e-05,
"loss": 0.7126,
"step": 3340
},
{
"epoch": 48.55,
"learning_rate": 6.0254237288135595e-05,
"loss": 0.7283,
"step": 3350
},
{
"epoch": 48.7,
"learning_rate": 6.008474576271187e-05,
"loss": 0.6796,
"step": 3360
},
{
"epoch": 48.84,
"learning_rate": 5.991525423728814e-05,
"loss": 0.7529,
"step": 3370
},
{
"epoch": 48.99,
"learning_rate": 5.974576271186441e-05,
"loss": 0.6973,
"step": 3380
},
{
"epoch": 49.13,
"learning_rate": 5.957627118644068e-05,
"loss": 0.6489,
"step": 3390
},
{
"epoch": 49.28,
"learning_rate": 5.9406779661016954e-05,
"loss": 0.7316,
"step": 3400
},
{
"epoch": 49.42,
"learning_rate": 5.923728813559323e-05,
"loss": 0.7525,
"step": 3410
},
{
"epoch": 49.57,
"learning_rate": 5.906779661016949e-05,
"loss": 0.6554,
"step": 3420
},
{
"epoch": 49.71,
"learning_rate": 5.889830508474577e-05,
"loss": 0.6275,
"step": 3430
},
{
"epoch": 49.86,
"learning_rate": 5.872881355932204e-05,
"loss": 0.6468,
"step": 3440
},
{
"epoch": 50.0,
"learning_rate": 5.855932203389831e-05,
"loss": 0.6908,
"step": 3450
},
{
"epoch": 50.14,
"learning_rate": 5.838983050847457e-05,
"loss": 0.6363,
"step": 3460
},
{
"epoch": 50.29,
"learning_rate": 5.8220338983050846e-05,
"loss": 0.7075,
"step": 3470
},
{
"epoch": 50.43,
"learning_rate": 5.805084745762712e-05,
"loss": 0.6981,
"step": 3480
},
{
"epoch": 50.58,
"learning_rate": 5.78813559322034e-05,
"loss": 0.6621,
"step": 3490
},
{
"epoch": 50.72,
"learning_rate": 5.771186440677966e-05,
"loss": 0.5952,
"step": 3500
},
{
"epoch": 50.72,
"eval_loss": 0.1922890692949295,
"eval_runtime": 587.7417,
"eval_samples_per_second": 5.775,
"eval_steps_per_second": 0.723,
"eval_wer": 0.1427820098776389,
"step": 3500
},
{
"epoch": 50.87,
"learning_rate": 5.754237288135593e-05,
"loss": 0.6752,
"step": 3510
},
{
"epoch": 51.01,
"learning_rate": 5.7372881355932205e-05,
"loss": 0.6024,
"step": 3520
},
{
"epoch": 51.16,
"learning_rate": 5.720338983050848e-05,
"loss": 0.7204,
"step": 3530
},
{
"epoch": 51.3,
"learning_rate": 5.7033898305084744e-05,
"loss": 0.7309,
"step": 3540
},
{
"epoch": 51.45,
"learning_rate": 5.686440677966102e-05,
"loss": 0.6726,
"step": 3550
},
{
"epoch": 51.59,
"learning_rate": 5.669491525423729e-05,
"loss": 0.6414,
"step": 3560
},
{
"epoch": 51.74,
"learning_rate": 5.6525423728813564e-05,
"loss": 0.6915,
"step": 3570
},
{
"epoch": 51.88,
"learning_rate": 5.635593220338984e-05,
"loss": 0.6821,
"step": 3580
},
{
"epoch": 52.03,
"learning_rate": 5.6186440677966103e-05,
"loss": 0.6443,
"step": 3590
},
{
"epoch": 52.17,
"learning_rate": 5.6016949152542377e-05,
"loss": 0.6501,
"step": 3600
},
{
"epoch": 52.32,
"learning_rate": 5.584745762711865e-05,
"loss": 0.7031,
"step": 3610
},
{
"epoch": 52.46,
"learning_rate": 5.567796610169492e-05,
"loss": 0.7238,
"step": 3620
},
{
"epoch": 52.61,
"learning_rate": 5.550847457627118e-05,
"loss": 0.6266,
"step": 3630
},
{
"epoch": 52.75,
"learning_rate": 5.533898305084746e-05,
"loss": 0.6443,
"step": 3640
},
{
"epoch": 52.9,
"learning_rate": 5.5169491525423736e-05,
"loss": 0.7794,
"step": 3650
},
{
"epoch": 53.04,
"learning_rate": 5.500000000000001e-05,
"loss": 0.629,
"step": 3660
},
{
"epoch": 53.19,
"learning_rate": 5.483050847457627e-05,
"loss": 0.67,
"step": 3670
},
{
"epoch": 53.33,
"learning_rate": 5.466101694915254e-05,
"loss": 0.6931,
"step": 3680
},
{
"epoch": 53.48,
"learning_rate": 5.4491525423728815e-05,
"loss": 0.5592,
"step": 3690
},
{
"epoch": 53.62,
"learning_rate": 5.4322033898305095e-05,
"loss": 0.6047,
"step": 3700
},
{
"epoch": 53.77,
"learning_rate": 5.4152542372881354e-05,
"loss": 0.6677,
"step": 3710
},
{
"epoch": 53.91,
"learning_rate": 5.398305084745763e-05,
"loss": 0.6204,
"step": 3720
},
{
"epoch": 54.06,
"learning_rate": 5.38135593220339e-05,
"loss": 0.6107,
"step": 3730
},
{
"epoch": 54.2,
"learning_rate": 5.3644067796610174e-05,
"loss": 0.6793,
"step": 3740
},
{
"epoch": 54.35,
"learning_rate": 5.347457627118644e-05,
"loss": 0.6442,
"step": 3750
},
{
"epoch": 54.49,
"learning_rate": 5.330508474576271e-05,
"loss": 0.618,
"step": 3760
},
{
"epoch": 54.64,
"learning_rate": 5.3135593220338986e-05,
"loss": 0.691,
"step": 3770
},
{
"epoch": 54.78,
"learning_rate": 5.296610169491526e-05,
"loss": 0.6762,
"step": 3780
},
{
"epoch": 54.93,
"learning_rate": 5.2796610169491526e-05,
"loss": 0.6501,
"step": 3790
},
{
"epoch": 55.07,
"learning_rate": 5.26271186440678e-05,
"loss": 0.6203,
"step": 3800
},
{
"epoch": 55.22,
"learning_rate": 5.245762711864407e-05,
"loss": 0.6739,
"step": 3810
},
{
"epoch": 55.36,
"learning_rate": 5.2288135593220345e-05,
"loss": 0.6109,
"step": 3820
},
{
"epoch": 55.51,
"learning_rate": 5.211864406779662e-05,
"loss": 0.6775,
"step": 3830
},
{
"epoch": 55.65,
"learning_rate": 5.194915254237288e-05,
"loss": 0.6144,
"step": 3840
},
{
"epoch": 55.8,
"learning_rate": 5.177966101694916e-05,
"loss": 0.6182,
"step": 3850
},
{
"epoch": 55.94,
"learning_rate": 5.161016949152543e-05,
"loss": 0.6425,
"step": 3860
},
{
"epoch": 56.09,
"learning_rate": 5.1440677966101704e-05,
"loss": 0.6439,
"step": 3870
},
{
"epoch": 56.23,
"learning_rate": 5.1271186440677964e-05,
"loss": 0.6336,
"step": 3880
},
{
"epoch": 56.38,
"learning_rate": 5.110169491525424e-05,
"loss": 0.6612,
"step": 3890
},
{
"epoch": 56.52,
"learning_rate": 5.093220338983051e-05,
"loss": 0.6779,
"step": 3900
},
{
"epoch": 56.67,
"learning_rate": 5.076271186440678e-05,
"loss": 0.6551,
"step": 3910
},
{
"epoch": 56.81,
"learning_rate": 5.059322033898305e-05,
"loss": 0.7081,
"step": 3920
},
{
"epoch": 56.96,
"learning_rate": 5.042372881355932e-05,
"loss": 0.5887,
"step": 3930
},
{
"epoch": 57.1,
"learning_rate": 5.0254237288135596e-05,
"loss": 0.5609,
"step": 3940
},
{
"epoch": 57.25,
"learning_rate": 5.008474576271187e-05,
"loss": 0.6164,
"step": 3950
},
{
"epoch": 57.39,
"learning_rate": 4.991525423728814e-05,
"loss": 0.6744,
"step": 3960
},
{
"epoch": 57.54,
"learning_rate": 4.974576271186441e-05,
"loss": 0.6463,
"step": 3970
},
{
"epoch": 57.68,
"learning_rate": 4.957627118644068e-05,
"loss": 0.5984,
"step": 3980
},
{
"epoch": 57.83,
"learning_rate": 4.940677966101695e-05,
"loss": 0.6503,
"step": 3990
},
{
"epoch": 57.97,
"learning_rate": 4.923728813559322e-05,
"loss": 0.6655,
"step": 4000
},
{
"epoch": 57.97,
"eval_loss": 0.18997956812381744,
"eval_runtime": 588.4214,
"eval_samples_per_second": 5.768,
"eval_steps_per_second": 0.722,
"eval_wer": 0.14038245034473124,
"step": 4000
},
{
"epoch": 58.12,
"learning_rate": 4.9067796610169495e-05,
"loss": 0.6322,
"step": 4010
},
{
"epoch": 58.26,
"learning_rate": 4.889830508474577e-05,
"loss": 0.62,
"step": 4020
},
{
"epoch": 58.41,
"learning_rate": 4.8728813559322034e-05,
"loss": 0.6211,
"step": 4030
},
{
"epoch": 58.55,
"learning_rate": 4.855932203389831e-05,
"loss": 0.5958,
"step": 4040
},
{
"epoch": 58.7,
"learning_rate": 4.8389830508474574e-05,
"loss": 0.6592,
"step": 4050
},
{
"epoch": 58.84,
"learning_rate": 4.822033898305085e-05,
"loss": 0.6164,
"step": 4060
},
{
"epoch": 58.99,
"learning_rate": 4.805084745762712e-05,
"loss": 0.5693,
"step": 4070
},
{
"epoch": 59.13,
"learning_rate": 4.788135593220339e-05,
"loss": 0.6635,
"step": 4080
},
{
"epoch": 59.28,
"learning_rate": 4.7711864406779666e-05,
"loss": 0.6637,
"step": 4090
},
{
"epoch": 59.42,
"learning_rate": 4.754237288135593e-05,
"loss": 0.6043,
"step": 4100
},
{
"epoch": 59.57,
"learning_rate": 4.7372881355932206e-05,
"loss": 0.6564,
"step": 4110
},
{
"epoch": 59.71,
"learning_rate": 4.720338983050848e-05,
"loss": 0.5801,
"step": 4120
},
{
"epoch": 59.86,
"learning_rate": 4.703389830508475e-05,
"loss": 0.643,
"step": 4130
},
{
"epoch": 60.0,
"learning_rate": 4.686440677966102e-05,
"loss": 0.6152,
"step": 4140
},
{
"epoch": 60.14,
"learning_rate": 4.669491525423729e-05,
"loss": 0.5629,
"step": 4150
},
{
"epoch": 60.29,
"learning_rate": 4.652542372881356e-05,
"loss": 0.6618,
"step": 4160
},
{
"epoch": 60.43,
"learning_rate": 4.635593220338984e-05,
"loss": 0.6955,
"step": 4170
},
{
"epoch": 60.58,
"learning_rate": 4.6186440677966104e-05,
"loss": 0.5983,
"step": 4180
},
{
"epoch": 60.72,
"learning_rate": 4.601694915254238e-05,
"loss": 0.6647,
"step": 4190
},
{
"epoch": 60.87,
"learning_rate": 4.5847457627118644e-05,
"loss": 0.7206,
"step": 4200
},
{
"epoch": 61.01,
"learning_rate": 4.567796610169492e-05,
"loss": 0.6092,
"step": 4210
},
{
"epoch": 61.16,
"learning_rate": 4.550847457627119e-05,
"loss": 0.7258,
"step": 4220
},
{
"epoch": 61.3,
"learning_rate": 4.533898305084746e-05,
"loss": 0.5842,
"step": 4230
},
{
"epoch": 61.45,
"learning_rate": 4.516949152542373e-05,
"loss": 0.5988,
"step": 4240
},
{
"epoch": 61.59,
"learning_rate": 4.5e-05,
"loss": 0.816,
"step": 4250
},
{
"epoch": 61.74,
"learning_rate": 4.483050847457627e-05,
"loss": 0.6105,
"step": 4260
},
{
"epoch": 61.88,
"learning_rate": 4.466101694915254e-05,
"loss": 0.6099,
"step": 4270
},
{
"epoch": 62.03,
"learning_rate": 4.4491525423728816e-05,
"loss": 0.6852,
"step": 4280
},
{
"epoch": 62.17,
"learning_rate": 4.432203389830509e-05,
"loss": 0.6235,
"step": 4290
},
{
"epoch": 62.32,
"learning_rate": 4.4152542372881355e-05,
"loss": 0.5611,
"step": 4300
},
{
"epoch": 62.46,
"learning_rate": 4.398305084745763e-05,
"loss": 0.6345,
"step": 4310
},
{
"epoch": 62.61,
"learning_rate": 4.38135593220339e-05,
"loss": 0.5963,
"step": 4320
},
{
"epoch": 62.75,
"learning_rate": 4.3644067796610175e-05,
"loss": 0.6105,
"step": 4330
},
{
"epoch": 62.9,
"learning_rate": 4.347457627118644e-05,
"loss": 0.6964,
"step": 4340
},
{
"epoch": 63.04,
"learning_rate": 4.3305084745762714e-05,
"loss": 0.627,
"step": 4350
},
{
"epoch": 63.19,
"learning_rate": 4.313559322033899e-05,
"loss": 0.7093,
"step": 4360
},
{
"epoch": 63.33,
"learning_rate": 4.2966101694915254e-05,
"loss": 0.8016,
"step": 4370
},
{
"epoch": 63.48,
"learning_rate": 4.279661016949153e-05,
"loss": 0.6789,
"step": 4380
},
{
"epoch": 63.62,
"learning_rate": 4.26271186440678e-05,
"loss": 0.633,
"step": 4390
},
{
"epoch": 63.77,
"learning_rate": 4.245762711864407e-05,
"loss": 0.5965,
"step": 4400
},
{
"epoch": 63.91,
"learning_rate": 4.228813559322034e-05,
"loss": 0.6236,
"step": 4410
},
{
"epoch": 64.06,
"learning_rate": 4.211864406779661e-05,
"loss": 0.6159,
"step": 4420
},
{
"epoch": 64.2,
"learning_rate": 4.1949152542372886e-05,
"loss": 0.7223,
"step": 4430
},
{
"epoch": 64.35,
"learning_rate": 4.177966101694916e-05,
"loss": 0.6515,
"step": 4440
},
{
"epoch": 64.49,
"learning_rate": 4.1610169491525425e-05,
"loss": 0.6059,
"step": 4450
},
{
"epoch": 64.64,
"learning_rate": 4.14406779661017e-05,
"loss": 0.6254,
"step": 4460
},
{
"epoch": 64.78,
"learning_rate": 4.1271186440677965e-05,
"loss": 0.5723,
"step": 4470
},
{
"epoch": 64.93,
"learning_rate": 4.110169491525424e-05,
"loss": 0.6953,
"step": 4480
},
{
"epoch": 65.07,
"learning_rate": 4.093220338983051e-05,
"loss": 0.6141,
"step": 4490
},
{
"epoch": 65.22,
"learning_rate": 4.0762711864406784e-05,
"loss": 0.574,
"step": 4500
},
{
"epoch": 65.22,
"eval_loss": 0.18217810988426208,
"eval_runtime": 586.1117,
"eval_samples_per_second": 5.791,
"eval_steps_per_second": 0.725,
"eval_wer": 0.13701320579510062,
"step": 4500
},
{
"epoch": 65.36,
"learning_rate": 4.059322033898305e-05,
"loss": 0.6026,
"step": 4510
},
{
"epoch": 65.51,
"learning_rate": 4.0423728813559324e-05,
"loss": 0.533,
"step": 4520
},
{
"epoch": 65.65,
"learning_rate": 4.025423728813559e-05,
"loss": 0.6359,
"step": 4530
},
{
"epoch": 65.8,
"learning_rate": 4.008474576271187e-05,
"loss": 0.6229,
"step": 4540
},
{
"epoch": 65.94,
"learning_rate": 3.9915254237288136e-05,
"loss": 0.6072,
"step": 4550
},
{
"epoch": 66.09,
"learning_rate": 3.974576271186441e-05,
"loss": 0.6953,
"step": 4560
},
{
"epoch": 66.23,
"learning_rate": 3.9576271186440676e-05,
"loss": 0.6184,
"step": 4570
},
{
"epoch": 66.38,
"learning_rate": 3.940677966101695e-05,
"loss": 0.6346,
"step": 4580
},
{
"epoch": 66.52,
"learning_rate": 3.923728813559322e-05,
"loss": 0.5897,
"step": 4590
},
{
"epoch": 66.67,
"learning_rate": 3.9067796610169495e-05,
"loss": 0.6499,
"step": 4600
},
{
"epoch": 66.81,
"learning_rate": 3.889830508474576e-05,
"loss": 0.6347,
"step": 4610
},
{
"epoch": 66.96,
"learning_rate": 3.8728813559322035e-05,
"loss": 0.6815,
"step": 4620
},
{
"epoch": 67.1,
"learning_rate": 3.855932203389831e-05,
"loss": 0.6266,
"step": 4630
},
{
"epoch": 67.25,
"learning_rate": 3.838983050847458e-05,
"loss": 0.6055,
"step": 4640
},
{
"epoch": 67.39,
"learning_rate": 3.8220338983050854e-05,
"loss": 0.6518,
"step": 4650
},
{
"epoch": 67.54,
"learning_rate": 3.805084745762712e-05,
"loss": 0.6208,
"step": 4660
},
{
"epoch": 67.68,
"learning_rate": 3.7881355932203394e-05,
"loss": 0.5473,
"step": 4670
},
{
"epoch": 67.83,
"learning_rate": 3.771186440677966e-05,
"loss": 0.6044,
"step": 4680
},
{
"epoch": 67.97,
"learning_rate": 3.7542372881355934e-05,
"loss": 0.5821,
"step": 4690
},
{
"epoch": 68.12,
"learning_rate": 3.737288135593221e-05,
"loss": 0.7136,
"step": 4700
},
{
"epoch": 68.26,
"learning_rate": 3.720338983050848e-05,
"loss": 0.6163,
"step": 4710
},
{
"epoch": 68.41,
"learning_rate": 3.7033898305084746e-05,
"loss": 0.6651,
"step": 4720
},
{
"epoch": 68.55,
"learning_rate": 3.686440677966102e-05,
"loss": 0.5881,
"step": 4730
},
{
"epoch": 68.7,
"learning_rate": 3.6694915254237286e-05,
"loss": 0.6139,
"step": 4740
},
{
"epoch": 68.84,
"learning_rate": 3.6525423728813566e-05,
"loss": 0.6667,
"step": 4750
},
{
"epoch": 68.99,
"learning_rate": 3.635593220338983e-05,
"loss": 0.6623,
"step": 4760
},
{
"epoch": 69.13,
"learning_rate": 3.6186440677966105e-05,
"loss": 0.6268,
"step": 4770
},
{
"epoch": 69.28,
"learning_rate": 3.601694915254237e-05,
"loss": 0.7324,
"step": 4780
},
{
"epoch": 69.42,
"learning_rate": 3.5847457627118645e-05,
"loss": 0.6088,
"step": 4790
},
{
"epoch": 69.57,
"learning_rate": 3.567796610169492e-05,
"loss": 0.5793,
"step": 4800
},
{
"epoch": 69.71,
"learning_rate": 3.550847457627119e-05,
"loss": 0.6164,
"step": 4810
},
{
"epoch": 69.86,
"learning_rate": 3.533898305084746e-05,
"loss": 0.5903,
"step": 4820
},
{
"epoch": 70.0,
"learning_rate": 3.516949152542373e-05,
"loss": 0.5692,
"step": 4830
},
{
"epoch": 70.14,
"learning_rate": 3.5e-05,
"loss": 0.6192,
"step": 4840
},
{
"epoch": 70.29,
"learning_rate": 3.483050847457627e-05,
"loss": 0.5691,
"step": 4850
},
{
"epoch": 70.43,
"learning_rate": 3.466101694915254e-05,
"loss": 0.6212,
"step": 4860
},
{
"epoch": 70.58,
"learning_rate": 3.4491525423728816e-05,
"loss": 0.5972,
"step": 4870
},
{
"epoch": 70.72,
"learning_rate": 3.432203389830508e-05,
"loss": 0.643,
"step": 4880
},
{
"epoch": 70.87,
"learning_rate": 3.4152542372881356e-05,
"loss": 0.6119,
"step": 4890
},
{
"epoch": 71.01,
"learning_rate": 3.398305084745763e-05,
"loss": 0.6299,
"step": 4900
},
{
"epoch": 71.16,
"learning_rate": 3.38135593220339e-05,
"loss": 0.5901,
"step": 4910
},
{
"epoch": 71.3,
"learning_rate": 3.3644067796610175e-05,
"loss": 0.5598,
"step": 4920
},
{
"epoch": 71.45,
"learning_rate": 3.347457627118644e-05,
"loss": 0.6103,
"step": 4930
},
{
"epoch": 71.59,
"learning_rate": 3.3305084745762715e-05,
"loss": 0.6357,
"step": 4940
},
{
"epoch": 71.74,
"learning_rate": 3.313559322033898e-05,
"loss": 0.6267,
"step": 4950
},
{
"epoch": 71.88,
"learning_rate": 3.296610169491526e-05,
"loss": 0.6761,
"step": 4960
},
{
"epoch": 72.03,
"learning_rate": 3.279661016949153e-05,
"loss": 0.5721,
"step": 4970
},
{
"epoch": 72.17,
"learning_rate": 3.26271186440678e-05,
"loss": 0.606,
"step": 4980
},
{
"epoch": 72.32,
"learning_rate": 3.245762711864407e-05,
"loss": 0.5572,
"step": 4990
},
{
"epoch": 72.46,
"learning_rate": 3.228813559322034e-05,
"loss": 0.6211,
"step": 5000
},
{
"epoch": 72.46,
"eval_loss": 0.19374500215053558,
"eval_runtime": 586.2574,
"eval_samples_per_second": 5.789,
"eval_steps_per_second": 0.725,
"eval_wer": 0.13547650157368374,
"step": 5000
},
{
"epoch": 72.61,
"learning_rate": 3.2118644067796613e-05,
"loss": 0.5981,
"step": 5010
},
{
"epoch": 72.75,
"learning_rate": 3.1949152542372887e-05,
"loss": 0.5553,
"step": 5020
},
{
"epoch": 72.9,
"learning_rate": 3.177966101694915e-05,
"loss": 0.5557,
"step": 5030
},
{
"epoch": 73.04,
"learning_rate": 3.1610169491525426e-05,
"loss": 0.6467,
"step": 5040
},
{
"epoch": 73.19,
"learning_rate": 3.144067796610169e-05,
"loss": 0.572,
"step": 5050
},
{
"epoch": 73.33,
"learning_rate": 3.1271186440677966e-05,
"loss": 0.5939,
"step": 5060
},
{
"epoch": 73.48,
"learning_rate": 3.110169491525424e-05,
"loss": 0.6428,
"step": 5070
},
{
"epoch": 73.62,
"learning_rate": 3.093220338983051e-05,
"loss": 0.5347,
"step": 5080
},
{
"epoch": 73.77,
"learning_rate": 3.076271186440678e-05,
"loss": 0.6837,
"step": 5090
},
{
"epoch": 73.91,
"learning_rate": 3.059322033898305e-05,
"loss": 0.6088,
"step": 5100
},
{
"epoch": 74.06,
"learning_rate": 3.042372881355932e-05,
"loss": 0.6286,
"step": 5110
},
{
"epoch": 74.2,
"learning_rate": 3.0254237288135594e-05,
"loss": 0.6553,
"step": 5120
},
{
"epoch": 74.35,
"learning_rate": 3.0084745762711864e-05,
"loss": 0.6107,
"step": 5130
},
{
"epoch": 74.49,
"learning_rate": 2.9915254237288137e-05,
"loss": 0.589,
"step": 5140
},
{
"epoch": 74.64,
"learning_rate": 2.9745762711864407e-05,
"loss": 0.5846,
"step": 5150
},
{
"epoch": 74.78,
"learning_rate": 2.957627118644068e-05,
"loss": 0.5878,
"step": 5160
},
{
"epoch": 74.93,
"learning_rate": 2.9406779661016953e-05,
"loss": 0.7024,
"step": 5170
},
{
"epoch": 75.07,
"learning_rate": 2.9237288135593223e-05,
"loss": 0.6198,
"step": 5180
},
{
"epoch": 75.22,
"learning_rate": 2.9067796610169496e-05,
"loss": 0.6065,
"step": 5190
},
{
"epoch": 75.36,
"learning_rate": 2.8898305084745763e-05,
"loss": 0.5833,
"step": 5200
},
{
"epoch": 75.51,
"learning_rate": 2.8728813559322036e-05,
"loss": 0.6323,
"step": 5210
},
{
"epoch": 75.65,
"learning_rate": 2.8559322033898306e-05,
"loss": 0.5818,
"step": 5220
},
{
"epoch": 75.8,
"learning_rate": 2.838983050847458e-05,
"loss": 0.6146,
"step": 5230
},
{
"epoch": 75.94,
"learning_rate": 2.822033898305085e-05,
"loss": 0.5587,
"step": 5240
},
{
"epoch": 76.09,
"learning_rate": 2.8050847457627122e-05,
"loss": 0.5374,
"step": 5250
},
{
"epoch": 76.23,
"learning_rate": 2.788135593220339e-05,
"loss": 0.6266,
"step": 5260
},
{
"epoch": 76.38,
"learning_rate": 2.7711864406779665e-05,
"loss": 0.6307,
"step": 5270
},
{
"epoch": 76.52,
"learning_rate": 2.754237288135593e-05,
"loss": 0.586,
"step": 5280
},
{
"epoch": 76.67,
"learning_rate": 2.7372881355932208e-05,
"loss": 0.6525,
"step": 5290
},
{
"epoch": 76.81,
"learning_rate": 2.7203389830508474e-05,
"loss": 0.5892,
"step": 5300
},
{
"epoch": 76.96,
"learning_rate": 2.7033898305084747e-05,
"loss": 0.6079,
"step": 5310
},
{
"epoch": 77.1,
"learning_rate": 2.6864406779661017e-05,
"loss": 0.5914,
"step": 5320
},
{
"epoch": 77.25,
"learning_rate": 2.669491525423729e-05,
"loss": 0.5444,
"step": 5330
},
{
"epoch": 77.39,
"learning_rate": 2.652542372881356e-05,
"loss": 0.6253,
"step": 5340
},
{
"epoch": 77.54,
"learning_rate": 2.6355932203389833e-05,
"loss": 0.6257,
"step": 5350
},
{
"epoch": 77.68,
"learning_rate": 2.61864406779661e-05,
"loss": 0.5631,
"step": 5360
},
{
"epoch": 77.83,
"learning_rate": 2.6016949152542376e-05,
"loss": 0.5509,
"step": 5370
},
{
"epoch": 77.97,
"learning_rate": 2.5847457627118642e-05,
"loss": 0.6038,
"step": 5380
},
{
"epoch": 78.12,
"learning_rate": 2.5677966101694915e-05,
"loss": 0.6923,
"step": 5390
},
{
"epoch": 78.26,
"learning_rate": 2.5508474576271185e-05,
"loss": 0.6687,
"step": 5400
},
{
"epoch": 78.41,
"learning_rate": 2.5338983050847458e-05,
"loss": 0.6324,
"step": 5410
},
{
"epoch": 78.55,
"learning_rate": 2.5169491525423728e-05,
"loss": 0.584,
"step": 5420
},
{
"epoch": 78.7,
"learning_rate": 2.5e-05,
"loss": 0.5833,
"step": 5430
},
{
"epoch": 78.84,
"learning_rate": 2.483050847457627e-05,
"loss": 0.5061,
"step": 5440
},
{
"epoch": 78.99,
"learning_rate": 2.4661016949152544e-05,
"loss": 0.5788,
"step": 5450
},
{
"epoch": 79.13,
"learning_rate": 2.4491525423728814e-05,
"loss": 0.783,
"step": 5460
},
{
"epoch": 79.28,
"learning_rate": 2.4322033898305087e-05,
"loss": 0.5854,
"step": 5470
},
{
"epoch": 79.42,
"learning_rate": 2.4152542372881357e-05,
"loss": 0.6223,
"step": 5480
},
{
"epoch": 79.57,
"learning_rate": 2.3983050847457627e-05,
"loss": 0.5626,
"step": 5490
},
{
"epoch": 79.71,
"learning_rate": 2.38135593220339e-05,
"loss": 0.5883,
"step": 5500
},
{
"epoch": 79.71,
"eval_loss": 0.18716615438461304,
"eval_runtime": 588.0258,
"eval_samples_per_second": 5.772,
"eval_steps_per_second": 0.723,
"eval_wer": 0.1334960431920716,
"step": 5500
},
{
"epoch": 79.86,
"learning_rate": 2.364406779661017e-05,
"loss": 0.5428,
"step": 5510
},
{
"epoch": 80.0,
"learning_rate": 2.347457627118644e-05,
"loss": 0.6025,
"step": 5520
},
{
"epoch": 80.14,
"learning_rate": 2.3305084745762712e-05,
"loss": 0.5627,
"step": 5530
},
{
"epoch": 80.29,
"learning_rate": 2.3135593220338982e-05,
"loss": 0.5748,
"step": 5540
},
{
"epoch": 80.43,
"learning_rate": 2.2966101694915255e-05,
"loss": 0.5737,
"step": 5550
},
{
"epoch": 80.58,
"learning_rate": 2.2796610169491525e-05,
"loss": 0.5734,
"step": 5560
},
{
"epoch": 80.72,
"learning_rate": 2.2627118644067798e-05,
"loss": 0.5766,
"step": 5570
},
{
"epoch": 80.87,
"learning_rate": 2.245762711864407e-05,
"loss": 0.5437,
"step": 5580
},
{
"epoch": 81.01,
"learning_rate": 2.228813559322034e-05,
"loss": 0.6554,
"step": 5590
},
{
"epoch": 81.16,
"learning_rate": 2.211864406779661e-05,
"loss": 0.593,
"step": 5600
},
{
"epoch": 81.3,
"learning_rate": 2.1949152542372884e-05,
"loss": 0.5838,
"step": 5610
},
{
"epoch": 81.45,
"learning_rate": 2.1779661016949154e-05,
"loss": 0.6307,
"step": 5620
},
{
"epoch": 81.59,
"learning_rate": 2.1610169491525427e-05,
"loss": 0.582,
"step": 5630
},
{
"epoch": 81.74,
"learning_rate": 2.1440677966101697e-05,
"loss": 0.5314,
"step": 5640
},
{
"epoch": 81.88,
"learning_rate": 2.1271186440677967e-05,
"loss": 0.6731,
"step": 5650
},
{
"epoch": 82.03,
"learning_rate": 2.110169491525424e-05,
"loss": 0.558,
"step": 5660
},
{
"epoch": 82.17,
"learning_rate": 2.093220338983051e-05,
"loss": 0.5859,
"step": 5670
},
{
"epoch": 82.32,
"learning_rate": 2.076271186440678e-05,
"loss": 0.5677,
"step": 5680
},
{
"epoch": 82.46,
"learning_rate": 2.0593220338983052e-05,
"loss": 0.537,
"step": 5690
},
{
"epoch": 82.61,
"learning_rate": 2.0423728813559322e-05,
"loss": 0.6172,
"step": 5700
},
{
"epoch": 82.75,
"learning_rate": 2.0254237288135595e-05,
"loss": 0.592,
"step": 5710
},
{
"epoch": 82.9,
"learning_rate": 2.0084745762711865e-05,
"loss": 0.6172,
"step": 5720
},
{
"epoch": 83.04,
"learning_rate": 1.9915254237288135e-05,
"loss": 0.5772,
"step": 5730
},
{
"epoch": 83.19,
"learning_rate": 1.9745762711864408e-05,
"loss": 0.58,
"step": 5740
},
{
"epoch": 83.33,
"learning_rate": 1.9576271186440678e-05,
"loss": 0.5454,
"step": 5750
},
{
"epoch": 83.48,
"learning_rate": 1.940677966101695e-05,
"loss": 0.5959,
"step": 5760
},
{
"epoch": 83.62,
"learning_rate": 1.923728813559322e-05,
"loss": 0.5758,
"step": 5770
},
{
"epoch": 83.77,
"learning_rate": 1.906779661016949e-05,
"loss": 0.5605,
"step": 5780
},
{
"epoch": 83.91,
"learning_rate": 1.8898305084745764e-05,
"loss": 0.5798,
"step": 5790
},
{
"epoch": 84.06,
"learning_rate": 1.8728813559322033e-05,
"loss": 0.5482,
"step": 5800
},
{
"epoch": 84.2,
"learning_rate": 1.8559322033898307e-05,
"loss": 0.5288,
"step": 5810
},
{
"epoch": 84.35,
"learning_rate": 1.8389830508474576e-05,
"loss": 0.5644,
"step": 5820
},
{
"epoch": 84.49,
"learning_rate": 1.8220338983050846e-05,
"loss": 0.5637,
"step": 5830
},
{
"epoch": 84.64,
"learning_rate": 1.805084745762712e-05,
"loss": 0.6117,
"step": 5840
},
{
"epoch": 84.78,
"learning_rate": 1.7881355932203392e-05,
"loss": 0.5939,
"step": 5850
},
{
"epoch": 84.93,
"learning_rate": 1.7711864406779662e-05,
"loss": 0.6476,
"step": 5860
},
{
"epoch": 85.07,
"learning_rate": 1.7542372881355935e-05,
"loss": 0.5519,
"step": 5870
},
{
"epoch": 85.22,
"learning_rate": 1.7372881355932205e-05,
"loss": 0.5438,
"step": 5880
},
{
"epoch": 85.36,
"learning_rate": 1.7203389830508475e-05,
"loss": 0.5756,
"step": 5890
},
{
"epoch": 85.51,
"learning_rate": 1.7033898305084748e-05,
"loss": 0.507,
"step": 5900
},
{
"epoch": 85.65,
"learning_rate": 1.6864406779661018e-05,
"loss": 0.6028,
"step": 5910
},
{
"epoch": 85.8,
"learning_rate": 1.669491525423729e-05,
"loss": 0.6395,
"step": 5920
},
{
"epoch": 85.94,
"learning_rate": 1.652542372881356e-05,
"loss": 0.544,
"step": 5930
},
{
"epoch": 86.09,
"learning_rate": 1.635593220338983e-05,
"loss": 0.6061,
"step": 5940
},
{
"epoch": 86.23,
"learning_rate": 1.6186440677966104e-05,
"loss": 0.5938,
"step": 5950
},
{
"epoch": 86.38,
"learning_rate": 1.6016949152542373e-05,
"loss": 0.5194,
"step": 5960
},
{
"epoch": 86.52,
"learning_rate": 1.5847457627118646e-05,
"loss": 0.5893,
"step": 5970
},
{
"epoch": 86.67,
"learning_rate": 1.5677966101694916e-05,
"loss": 0.6398,
"step": 5980
},
{
"epoch": 86.81,
"learning_rate": 1.5508474576271186e-05,
"loss": 0.5758,
"step": 5990
},
{
"epoch": 86.96,
"learning_rate": 1.533898305084746e-05,
"loss": 0.5666,
"step": 6000
},
{
"epoch": 86.96,
"eval_loss": 0.18741416931152344,
"eval_runtime": 591.1329,
"eval_samples_per_second": 5.742,
"eval_steps_per_second": 0.719,
"eval_wer": 0.13238665779158346,
"step": 6000
},
{
"epoch": 87.1,
"learning_rate": 6.0898437500000006e-05,
"loss": 0.561,
"step": 6010
},
{
"epoch": 87.25,
"learning_rate": 6.0820312499999995e-05,
"loss": 0.6303,
"step": 6020
},
{
"epoch": 87.39,
"learning_rate": 6.0742187500000005e-05,
"loss": 0.5688,
"step": 6030
},
{
"epoch": 87.54,
"learning_rate": 6.066406250000001e-05,
"loss": 0.5866,
"step": 6040
},
{
"epoch": 87.68,
"learning_rate": 6.05859375e-05,
"loss": 0.5475,
"step": 6050
},
{
"epoch": 87.83,
"learning_rate": 6.050781250000001e-05,
"loss": 0.5986,
"step": 6060
},
{
"epoch": 87.97,
"learning_rate": 6.04296875e-05,
"loss": 0.5642,
"step": 6070
},
{
"epoch": 88.12,
"learning_rate": 6.03515625e-05,
"loss": 0.5845,
"step": 6080
},
{
"epoch": 88.26,
"learning_rate": 6.027343750000001e-05,
"loss": 0.5742,
"step": 6090
},
{
"epoch": 88.41,
"learning_rate": 6.01953125e-05,
"loss": 0.6203,
"step": 6100
},
{
"epoch": 88.55,
"learning_rate": 6.0117187500000004e-05,
"loss": 0.5252,
"step": 6110
},
{
"epoch": 88.7,
"learning_rate": 6.00390625e-05,
"loss": 0.6267,
"step": 6120
},
{
"epoch": 88.84,
"learning_rate": 5.99609375e-05,
"loss": 0.5775,
"step": 6130
},
{
"epoch": 88.99,
"learning_rate": 5.9882812500000006e-05,
"loss": 0.574,
"step": 6140
},
{
"epoch": 89.13,
"learning_rate": 5.98046875e-05,
"loss": 0.6589,
"step": 6150
},
{
"epoch": 89.28,
"learning_rate": 5.9726562500000006e-05,
"loss": 0.5757,
"step": 6160
},
{
"epoch": 89.42,
"learning_rate": 5.9648437499999996e-05,
"loss": 0.5718,
"step": 6170
},
{
"epoch": 89.57,
"learning_rate": 5.9570312500000005e-05,
"loss": 0.6193,
"step": 6180
},
{
"epoch": 89.71,
"learning_rate": 5.949218750000001e-05,
"loss": 0.5329,
"step": 6190
},
{
"epoch": 89.86,
"learning_rate": 5.94140625e-05,
"loss": 0.5745,
"step": 6200
},
{
"epoch": 90.0,
"learning_rate": 5.933593750000001e-05,
"loss": 0.5338,
"step": 6210
},
{
"epoch": 90.14,
"learning_rate": 5.92578125e-05,
"loss": 0.5571,
"step": 6220
},
{
"epoch": 90.29,
"learning_rate": 5.91796875e-05,
"loss": 0.6241,
"step": 6230
},
{
"epoch": 90.43,
"learning_rate": 5.9101562500000004e-05,
"loss": 0.5515,
"step": 6240
},
{
"epoch": 90.58,
"learning_rate": 5.90234375e-05,
"loss": 0.5821,
"step": 6250
},
{
"epoch": 90.72,
"learning_rate": 5.8945312500000004e-05,
"loss": 0.5679,
"step": 6260
},
{
"epoch": 90.87,
"learning_rate": 5.88671875e-05,
"loss": 0.5915,
"step": 6270
},
{
"epoch": 91.01,
"learning_rate": 5.8789062500000003e-05,
"loss": 0.6186,
"step": 6280
},
{
"epoch": 91.16,
"learning_rate": 5.8710937500000007e-05,
"loss": 0.607,
"step": 6290
},
{
"epoch": 91.3,
"learning_rate": 5.86328125e-05,
"loss": 0.5881,
"step": 6300
},
{
"epoch": 91.45,
"learning_rate": 5.8554687500000006e-05,
"loss": 0.59,
"step": 6310
},
{
"epoch": 91.59,
"learning_rate": 5.8476562499999996e-05,
"loss": 0.5548,
"step": 6320
},
{
"epoch": 91.74,
"learning_rate": 5.8398437500000006e-05,
"loss": 0.6262,
"step": 6330
},
{
"epoch": 91.88,
"learning_rate": 5.832031250000001e-05,
"loss": 0.5653,
"step": 6340
},
{
"epoch": 92.03,
"learning_rate": 5.82421875e-05,
"loss": 0.5882,
"step": 6350
},
{
"epoch": 92.17,
"learning_rate": 5.81640625e-05,
"loss": 0.57,
"step": 6360
},
{
"epoch": 92.32,
"learning_rate": 5.80859375e-05,
"loss": 0.5849,
"step": 6370
},
{
"epoch": 92.46,
"learning_rate": 5.80078125e-05,
"loss": 0.6534,
"step": 6380
},
{
"epoch": 92.61,
"learning_rate": 5.7929687500000005e-05,
"loss": 0.5327,
"step": 6390
},
{
"epoch": 92.75,
"learning_rate": 5.78515625e-05,
"loss": 0.5681,
"step": 6400
},
{
"epoch": 92.9,
"learning_rate": 5.7773437500000004e-05,
"loss": 0.6235,
"step": 6410
},
{
"epoch": 93.04,
"learning_rate": 5.76953125e-05,
"loss": 0.6378,
"step": 6420
},
{
"epoch": 93.19,
"learning_rate": 5.7617187500000004e-05,
"loss": 0.5202,
"step": 6430
},
{
"epoch": 93.33,
"learning_rate": 5.753906250000001e-05,
"loss": 0.5707,
"step": 6440
},
{
"epoch": 93.48,
"learning_rate": 5.74609375e-05,
"loss": 0.6858,
"step": 6450
},
{
"epoch": 93.62,
"learning_rate": 5.7382812500000007e-05,
"loss": 0.6192,
"step": 6460
},
{
"epoch": 93.77,
"learning_rate": 5.7304687499999996e-05,
"loss": 0.5724,
"step": 6470
},
{
"epoch": 93.91,
"learning_rate": 5.7226562500000006e-05,
"loss": 0.6433,
"step": 6480
},
{
"epoch": 94.06,
"learning_rate": 5.714843750000001e-05,
"loss": 0.6129,
"step": 6490
},
{
"epoch": 94.2,
"learning_rate": 5.70703125e-05,
"loss": 0.5526,
"step": 6500
},
{
"epoch": 94.2,
"eval_loss": 0.19980017840862274,
"eval_runtime": 598.6552,
"eval_samples_per_second": 5.669,
"eval_steps_per_second": 0.71,
"eval_wer": 0.13683241706316923,
"step": 6500
},
{
"epoch": 94.35,
"learning_rate": 5.69921875e-05,
"loss": 0.6583,
"step": 6510
},
{
"epoch": 94.49,
"learning_rate": 5.69140625e-05,
"loss": 0.5648,
"step": 6520
},
{
"epoch": 94.64,
"learning_rate": 5.68359375e-05,
"loss": 0.6398,
"step": 6530
},
{
"epoch": 94.78,
"learning_rate": 5.6757812500000005e-05,
"loss": 0.6695,
"step": 6540
},
{
"epoch": 94.93,
"learning_rate": 5.66796875e-05,
"loss": 0.6127,
"step": 6550
},
{
"epoch": 95.07,
"learning_rate": 5.6601562500000004e-05,
"loss": 0.594,
"step": 6560
},
{
"epoch": 95.22,
"learning_rate": 5.65234375e-05,
"loss": 0.6028,
"step": 6570
},
{
"epoch": 95.36,
"learning_rate": 5.6445312500000004e-05,
"loss": 0.5395,
"step": 6580
},
{
"epoch": 95.51,
"learning_rate": 5.636718750000001e-05,
"loss": 0.5202,
"step": 6590
},
{
"epoch": 95.65,
"learning_rate": 5.6289062500000004e-05,
"loss": 0.5872,
"step": 6600
},
{
"epoch": 95.8,
"learning_rate": 5.621093750000001e-05,
"loss": 0.6221,
"step": 6610
},
{
"epoch": 95.94,
"learning_rate": 5.6132812499999996e-05,
"loss": 0.6163,
"step": 6620
},
{
"epoch": 96.09,
"learning_rate": 5.60546875e-05,
"loss": 0.5511,
"step": 6630
},
{
"epoch": 96.23,
"learning_rate": 5.597656250000001e-05,
"loss": 0.5376,
"step": 6640
},
{
"epoch": 96.38,
"learning_rate": 5.58984375e-05,
"loss": 0.5835,
"step": 6650
},
{
"epoch": 96.52,
"learning_rate": 5.58203125e-05,
"loss": 0.5781,
"step": 6660
},
{
"epoch": 96.67,
"learning_rate": 5.57421875e-05,
"loss": 0.5538,
"step": 6670
},
{
"epoch": 96.81,
"learning_rate": 5.56640625e-05,
"loss": 0.5428,
"step": 6680
},
{
"epoch": 96.96,
"learning_rate": 5.5585937500000005e-05,
"loss": 0.7287,
"step": 6690
},
{
"epoch": 97.1,
"learning_rate": 5.55078125e-05,
"loss": 0.5216,
"step": 6700
},
{
"epoch": 97.25,
"learning_rate": 5.5429687500000005e-05,
"loss": 0.6409,
"step": 6710
},
{
"epoch": 97.39,
"learning_rate": 5.53515625e-05,
"loss": 0.6091,
"step": 6720
},
{
"epoch": 97.54,
"learning_rate": 5.5273437500000004e-05,
"loss": 0.5856,
"step": 6730
},
{
"epoch": 97.68,
"learning_rate": 5.519531250000001e-05,
"loss": 0.6319,
"step": 6740
},
{
"epoch": 97.83,
"learning_rate": 5.51171875e-05,
"loss": 0.5647,
"step": 6750
},
{
"epoch": 97.97,
"learning_rate": 5.503906250000001e-05,
"loss": 0.5605,
"step": 6760
},
{
"epoch": 98.12,
"learning_rate": 5.49609375e-05,
"loss": 0.6009,
"step": 6770
},
{
"epoch": 98.26,
"learning_rate": 5.48828125e-05,
"loss": 0.5321,
"step": 6780
},
{
"epoch": 98.41,
"learning_rate": 5.480468750000001e-05,
"loss": 0.6852,
"step": 6790
},
{
"epoch": 98.55,
"learning_rate": 5.47265625e-05,
"loss": 0.6208,
"step": 6800
},
{
"epoch": 98.7,
"learning_rate": 5.46484375e-05,
"loss": 0.649,
"step": 6810
},
{
"epoch": 98.84,
"learning_rate": 5.45703125e-05,
"loss": 0.5526,
"step": 6820
},
{
"epoch": 98.99,
"learning_rate": 5.44921875e-05,
"loss": 0.6026,
"step": 6830
},
{
"epoch": 99.13,
"learning_rate": 5.4414062500000006e-05,
"loss": 0.5693,
"step": 6840
},
{
"epoch": 99.28,
"learning_rate": 5.43359375e-05,
"loss": 0.628,
"step": 6850
},
{
"epoch": 99.42,
"learning_rate": 5.4257812500000005e-05,
"loss": 0.6071,
"step": 6860
},
{
"epoch": 99.57,
"learning_rate": 5.41796875e-05,
"loss": 0.6488,
"step": 6870
},
{
"epoch": 99.71,
"learning_rate": 5.4101562500000005e-05,
"loss": 0.6021,
"step": 6880
},
{
"epoch": 99.86,
"learning_rate": 5.402343750000001e-05,
"loss": 0.5946,
"step": 6890
},
{
"epoch": 100.0,
"learning_rate": 5.39453125e-05,
"loss": 0.544,
"step": 6900
},
{
"epoch": 100.14,
"learning_rate": 5.386718750000001e-05,
"loss": 0.6492,
"step": 6910
},
{
"epoch": 100.29,
"learning_rate": 5.37890625e-05,
"loss": 0.5121,
"step": 6920
},
{
"epoch": 100.43,
"learning_rate": 5.37109375e-05,
"loss": 0.5502,
"step": 6930
},
{
"epoch": 100.58,
"learning_rate": 5.363281250000001e-05,
"loss": 0.5826,
"step": 6940
},
{
"epoch": 100.72,
"learning_rate": 5.35546875e-05,
"loss": 0.6859,
"step": 6950
},
{
"epoch": 100.87,
"learning_rate": 5.34765625e-05,
"loss": 0.5431,
"step": 6960
},
{
"epoch": 101.01,
"learning_rate": 5.33984375e-05,
"loss": 0.5633,
"step": 6970
},
{
"epoch": 101.16,
"learning_rate": 5.33203125e-05,
"loss": 0.5923,
"step": 6980
},
{
"epoch": 101.3,
"learning_rate": 5.3242187500000006e-05,
"loss": 0.6335,
"step": 6990
},
{
"epoch": 101.45,
"learning_rate": 5.3171875e-05,
"loss": 0.5671,
"step": 7000
},
{
"epoch": 101.45,
"eval_loss": 0.205404132604599,
"eval_runtime": 576.5636,
"eval_samples_per_second": 5.887,
"eval_steps_per_second": 0.737,
"eval_wer": 0.13647905726893966,
"step": 7000
},
{
"epoch": 101.59,
"learning_rate": 5.309375e-05,
"loss": 0.5167,
"step": 7010
},
{
"epoch": 101.74,
"learning_rate": 5.3015625e-05,
"loss": 0.4972,
"step": 7020
},
{
"epoch": 101.88,
"learning_rate": 5.2937500000000006e-05,
"loss": 0.6213,
"step": 7030
},
{
"epoch": 102.03,
"learning_rate": 5.2859375e-05,
"loss": 0.5761,
"step": 7040
},
{
"epoch": 102.17,
"learning_rate": 5.2781250000000006e-05,
"loss": 0.5627,
"step": 7050
},
{
"epoch": 102.32,
"learning_rate": 5.2703124999999995e-05,
"loss": 0.5706,
"step": 7060
},
{
"epoch": 102.46,
"learning_rate": 5.2625000000000005e-05,
"loss": 0.591,
"step": 7070
},
{
"epoch": 102.61,
"learning_rate": 5.254687500000001e-05,
"loss": 0.5516,
"step": 7080
},
{
"epoch": 102.75,
"learning_rate": 5.246875e-05,
"loss": 0.5392,
"step": 7090
},
{
"epoch": 102.9,
"learning_rate": 5.2390625e-05,
"loss": 0.55,
"step": 7100
},
{
"epoch": 103.04,
"learning_rate": 5.23125e-05,
"loss": 0.4633,
"step": 7110
},
{
"epoch": 103.19,
"learning_rate": 5.2234375e-05,
"loss": 0.5683,
"step": 7120
},
{
"epoch": 103.33,
"learning_rate": 5.2156250000000004e-05,
"loss": 0.5863,
"step": 7130
},
{
"epoch": 103.48,
"learning_rate": 5.2078125e-05,
"loss": 0.5442,
"step": 7140
},
{
"epoch": 103.62,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.657,
"step": 7150
},
{
"epoch": 103.77,
"learning_rate": 5.1921875e-05,
"loss": 0.6041,
"step": 7160
},
{
"epoch": 103.91,
"learning_rate": 5.184375e-05,
"loss": 0.5324,
"step": 7170
},
{
"epoch": 104.06,
"learning_rate": 5.1765625000000006e-05,
"loss": 0.5533,
"step": 7180
},
{
"epoch": 104.2,
"learning_rate": 5.16875e-05,
"loss": 0.5202,
"step": 7190
},
{
"epoch": 104.35,
"learning_rate": 5.1609375000000006e-05,
"loss": 0.5488,
"step": 7200
},
{
"epoch": 104.49,
"learning_rate": 5.1531249999999996e-05,
"loss": 0.5089,
"step": 7210
},
{
"epoch": 104.64,
"learning_rate": 5.1453125e-05,
"loss": 0.5576,
"step": 7220
},
{
"epoch": 104.78,
"learning_rate": 5.137500000000001e-05,
"loss": 0.5677,
"step": 7230
},
{
"epoch": 104.93,
"learning_rate": 5.1296875e-05,
"loss": 0.5546,
"step": 7240
},
{
"epoch": 105.07,
"learning_rate": 5.121875e-05,
"loss": 0.5928,
"step": 7250
},
{
"epoch": 105.22,
"learning_rate": 5.1140625e-05,
"loss": 0.519,
"step": 7260
},
{
"epoch": 105.36,
"learning_rate": 5.10625e-05,
"loss": 0.5748,
"step": 7270
},
{
"epoch": 105.51,
"learning_rate": 5.0984375000000004e-05,
"loss": 0.5331,
"step": 7280
},
{
"epoch": 105.65,
"learning_rate": 5.090625e-05,
"loss": 0.6033,
"step": 7290
},
{
"epoch": 105.8,
"learning_rate": 5.0828125000000004e-05,
"loss": 0.5647,
"step": 7300
},
{
"epoch": 105.94,
"learning_rate": 5.075e-05,
"loss": 0.577,
"step": 7310
},
{
"epoch": 106.09,
"learning_rate": 5.0671875000000003e-05,
"loss": 0.5794,
"step": 7320
},
{
"epoch": 106.23,
"learning_rate": 5.059375000000001e-05,
"loss": 0.651,
"step": 7330
},
{
"epoch": 106.38,
"learning_rate": 5.0515625e-05,
"loss": 0.5955,
"step": 7340
},
{
"epoch": 106.52,
"learning_rate": 5.0437500000000006e-05,
"loss": 0.5274,
"step": 7350
},
{
"epoch": 106.67,
"learning_rate": 5.0359374999999996e-05,
"loss": 0.5578,
"step": 7360
},
{
"epoch": 106.81,
"learning_rate": 5.028125e-05,
"loss": 0.5788,
"step": 7370
},
{
"epoch": 106.96,
"learning_rate": 5.020312500000001e-05,
"loss": 0.5461,
"step": 7380
},
{
"epoch": 107.1,
"learning_rate": 5.0125e-05,
"loss": 0.6086,
"step": 7390
},
{
"epoch": 107.25,
"learning_rate": 5.0046875e-05,
"loss": 0.5851,
"step": 7400
},
{
"epoch": 107.39,
"learning_rate": 4.9968750000000005e-05,
"loss": 0.5803,
"step": 7410
},
{
"epoch": 107.54,
"learning_rate": 4.9890625e-05,
"loss": 0.5434,
"step": 7420
},
{
"epoch": 107.68,
"learning_rate": 4.98125e-05,
"loss": 0.5838,
"step": 7430
},
{
"epoch": 107.83,
"learning_rate": 4.9734375e-05,
"loss": 0.6052,
"step": 7440
},
{
"epoch": 107.97,
"learning_rate": 4.9656250000000004e-05,
"loss": 0.5245,
"step": 7450
},
{
"epoch": 108.12,
"learning_rate": 4.9578125e-05,
"loss": 0.5706,
"step": 7460
},
{
"epoch": 108.26,
"learning_rate": 4.9500000000000004e-05,
"loss": 0.593,
"step": 7470
},
{
"epoch": 108.41,
"learning_rate": 4.9421875e-05,
"loss": 0.7001,
"step": 7480
},
{
"epoch": 108.55,
"learning_rate": 4.9343749999999997e-05,
"loss": 0.5745,
"step": 7490
},
{
"epoch": 108.7,
"learning_rate": 4.9265625000000007e-05,
"loss": 0.5514,
"step": 7500
},
{
"epoch": 108.7,
"eval_loss": 0.19866465032100677,
"eval_runtime": 581.4592,
"eval_samples_per_second": 5.837,
"eval_steps_per_second": 0.731,
"eval_wer": 0.1339891033700663,
"step": 7500
},
{
"epoch": 108.84,
"learning_rate": 4.91875e-05,
"loss": 0.6588,
"step": 7510
},
{
"epoch": 108.99,
"learning_rate": 4.9109375e-05,
"loss": 0.5171,
"step": 7520
},
{
"epoch": 109.13,
"learning_rate": 4.903125e-05,
"loss": 0.6426,
"step": 7530
},
{
"epoch": 109.28,
"learning_rate": 4.8953125e-05,
"loss": 0.6569,
"step": 7540
},
{
"epoch": 109.42,
"learning_rate": 4.8875e-05,
"loss": 0.5613,
"step": 7550
},
{
"epoch": 109.57,
"learning_rate": 4.8796875000000005e-05,
"loss": 0.5761,
"step": 7560
},
{
"epoch": 109.71,
"learning_rate": 4.871875e-05,
"loss": 0.6034,
"step": 7570
},
{
"epoch": 109.86,
"learning_rate": 4.8640625e-05,
"loss": 0.579,
"step": 7580
},
{
"epoch": 110.0,
"learning_rate": 4.85625e-05,
"loss": 0.4977,
"step": 7590
},
{
"epoch": 110.14,
"learning_rate": 4.8484375000000005e-05,
"loss": 0.5772,
"step": 7600
},
{
"epoch": 110.29,
"learning_rate": 4.840625e-05,
"loss": 0.5254,
"step": 7610
},
{
"epoch": 110.43,
"learning_rate": 4.8328125000000004e-05,
"loss": 0.5754,
"step": 7620
},
{
"epoch": 110.58,
"learning_rate": 4.825e-05,
"loss": 0.5472,
"step": 7630
},
{
"epoch": 110.72,
"learning_rate": 4.8171875e-05,
"loss": 0.5534,
"step": 7640
},
{
"epoch": 110.87,
"learning_rate": 4.809375000000001e-05,
"loss": 0.5449,
"step": 7650
},
{
"epoch": 111.01,
"learning_rate": 4.8015625e-05,
"loss": 0.6021,
"step": 7660
},
{
"epoch": 111.16,
"learning_rate": 4.79375e-05,
"loss": 0.6354,
"step": 7670
},
{
"epoch": 111.3,
"learning_rate": 4.7859375e-05,
"loss": 0.6301,
"step": 7680
},
{
"epoch": 111.45,
"learning_rate": 4.778125e-05,
"loss": 0.5631,
"step": 7690
},
{
"epoch": 111.59,
"learning_rate": 4.7703125e-05,
"loss": 0.5286,
"step": 7700
},
{
"epoch": 111.74,
"learning_rate": 4.7625000000000006e-05,
"loss": 0.6512,
"step": 7710
},
{
"epoch": 111.88,
"learning_rate": 4.7546875e-05,
"loss": 0.6631,
"step": 7720
},
{
"epoch": 112.03,
"learning_rate": 4.746875e-05,
"loss": 0.5116,
"step": 7730
},
{
"epoch": 112.17,
"learning_rate": 4.7390625e-05,
"loss": 0.5235,
"step": 7740
},
{
"epoch": 112.32,
"learning_rate": 4.7312500000000005e-05,
"loss": 0.4947,
"step": 7750
},
{
"epoch": 112.46,
"learning_rate": 4.7234375e-05,
"loss": 0.6046,
"step": 7760
},
{
"epoch": 112.61,
"learning_rate": 4.7156250000000004e-05,
"loss": 0.5819,
"step": 7770
},
{
"epoch": 112.75,
"learning_rate": 4.7078125e-05,
"loss": 0.5525,
"step": 7780
},
{
"epoch": 112.9,
"learning_rate": 4.7e-05,
"loss": 0.5515,
"step": 7790
},
{
"epoch": 113.04,
"learning_rate": 4.692187500000001e-05,
"loss": 0.5442,
"step": 7800
},
{
"epoch": 113.19,
"learning_rate": 4.6843750000000004e-05,
"loss": 0.4829,
"step": 7810
},
{
"epoch": 113.33,
"learning_rate": 4.6765625e-05,
"loss": 0.6145,
"step": 7820
},
{
"epoch": 113.48,
"learning_rate": 4.66875e-05,
"loss": 0.5503,
"step": 7830
},
{
"epoch": 113.62,
"learning_rate": 4.6609375e-05,
"loss": 0.5404,
"step": 7840
},
{
"epoch": 113.77,
"learning_rate": 4.653125e-05,
"loss": 0.6141,
"step": 7850
},
{
"epoch": 113.91,
"learning_rate": 4.6453125000000006e-05,
"loss": 0.5014,
"step": 7860
},
{
"epoch": 114.06,
"learning_rate": 4.6375e-05,
"loss": 0.5724,
"step": 7870
},
{
"epoch": 114.2,
"learning_rate": 4.6296875e-05,
"loss": 0.4928,
"step": 7880
},
{
"epoch": 114.35,
"learning_rate": 4.621875e-05,
"loss": 0.6444,
"step": 7890
},
{
"epoch": 114.49,
"learning_rate": 4.6140625000000005e-05,
"loss": 0.4999,
"step": 7900
},
{
"epoch": 114.64,
"learning_rate": 4.60625e-05,
"loss": 0.5315,
"step": 7910
},
{
"epoch": 114.78,
"learning_rate": 4.5984375000000005e-05,
"loss": 0.5117,
"step": 7920
},
{
"epoch": 114.93,
"learning_rate": 4.590625e-05,
"loss": 0.4851,
"step": 7930
},
{
"epoch": 115.07,
"learning_rate": 4.5828125e-05,
"loss": 0.5751,
"step": 7940
},
{
"epoch": 115.22,
"learning_rate": 4.575e-05,
"loss": 0.5408,
"step": 7950
},
{
"epoch": 115.36,
"learning_rate": 4.5671875000000004e-05,
"loss": 0.5763,
"step": 7960
},
{
"epoch": 115.51,
"learning_rate": 4.559375e-05,
"loss": 0.6231,
"step": 7970
},
{
"epoch": 115.65,
"learning_rate": 4.5515625000000004e-05,
"loss": 0.5413,
"step": 7980
},
{
"epoch": 115.8,
"learning_rate": 4.54375e-05,
"loss": 0.5532,
"step": 7990
},
{
"epoch": 115.94,
"learning_rate": 4.5359375e-05,
"loss": 0.5382,
"step": 8000
},
{
"epoch": 115.94,
"eval_loss": 0.21039190888404846,
"eval_runtime": 576.6919,
"eval_samples_per_second": 5.885,
"eval_steps_per_second": 0.737,
"eval_wer": 0.13444107519989482,
"step": 8000
},
{
"epoch": 116.09,
"learning_rate": 4.528125e-05,
"loss": 0.5209,
"step": 8010
},
{
"epoch": 116.23,
"learning_rate": 4.5203125e-05,
"loss": 0.5247,
"step": 8020
},
{
"epoch": 116.38,
"learning_rate": 4.5125e-05,
"loss": 0.5546,
"step": 8030
},
{
"epoch": 116.52,
"learning_rate": 4.5046875e-05,
"loss": 0.5698,
"step": 8040
},
{
"epoch": 116.67,
"learning_rate": 4.4968750000000005e-05,
"loss": 0.5687,
"step": 8050
},
{
"epoch": 116.81,
"learning_rate": 4.4890625e-05,
"loss": 0.5796,
"step": 8060
},
{
"epoch": 116.96,
"learning_rate": 4.4812500000000005e-05,
"loss": 0.522,
"step": 8070
},
{
"epoch": 117.1,
"learning_rate": 4.4734375e-05,
"loss": 0.658,
"step": 8080
},
{
"epoch": 117.25,
"learning_rate": 4.465625e-05,
"loss": 0.5765,
"step": 8090
},
{
"epoch": 117.39,
"learning_rate": 4.4578125e-05,
"loss": 0.581,
"step": 8100
},
{
"epoch": 117.54,
"learning_rate": 4.4500000000000004e-05,
"loss": 0.554,
"step": 8110
},
{
"epoch": 117.68,
"learning_rate": 4.4421875e-05,
"loss": 0.5084,
"step": 8120
},
{
"epoch": 117.83,
"learning_rate": 4.4343750000000004e-05,
"loss": 0.5309,
"step": 8130
},
{
"epoch": 117.97,
"learning_rate": 4.4265625e-05,
"loss": 0.5207,
"step": 8140
},
{
"epoch": 118.12,
"learning_rate": 4.4187500000000003e-05,
"loss": 0.5846,
"step": 8150
},
{
"epoch": 118.26,
"learning_rate": 4.4109375e-05,
"loss": 0.5859,
"step": 8160
},
{
"epoch": 118.41,
"learning_rate": 4.403125e-05,
"loss": 0.528,
"step": 8170
},
{
"epoch": 118.55,
"learning_rate": 4.3953125e-05,
"loss": 0.5681,
"step": 8180
},
{
"epoch": 118.7,
"learning_rate": 4.3875e-05,
"loss": 0.6339,
"step": 8190
},
{
"epoch": 118.84,
"learning_rate": 4.3796875000000006e-05,
"loss": 0.5926,
"step": 8200
},
{
"epoch": 118.99,
"learning_rate": 4.371875e-05,
"loss": 0.5677,
"step": 8210
},
{
"epoch": 119.13,
"learning_rate": 4.3640625e-05,
"loss": 0.6357,
"step": 8220
},
{
"epoch": 119.28,
"learning_rate": 4.35625e-05,
"loss": 0.4987,
"step": 8230
},
{
"epoch": 119.42,
"learning_rate": 4.3484375e-05,
"loss": 0.5703,
"step": 8240
},
{
"epoch": 119.57,
"learning_rate": 4.340625e-05,
"loss": 0.5854,
"step": 8250
},
{
"epoch": 119.71,
"learning_rate": 4.3328125000000005e-05,
"loss": 0.5502,
"step": 8260
},
{
"epoch": 119.86,
"learning_rate": 4.325e-05,
"loss": 0.5789,
"step": 8270
},
{
"epoch": 120.0,
"learning_rate": 4.3171875e-05,
"loss": 0.5338,
"step": 8280
},
{
"epoch": 120.14,
"learning_rate": 4.309375e-05,
"loss": 0.5498,
"step": 8290
},
{
"epoch": 120.29,
"learning_rate": 4.3015625000000004e-05,
"loss": 0.6334,
"step": 8300
},
{
"epoch": 120.43,
"learning_rate": 4.29375e-05,
"loss": 0.5499,
"step": 8310
},
{
"epoch": 120.58,
"learning_rate": 4.2859375e-05,
"loss": 0.5306,
"step": 8320
},
{
"epoch": 120.72,
"learning_rate": 4.278125e-05,
"loss": 0.5019,
"step": 8330
},
{
"epoch": 120.87,
"learning_rate": 4.2703124999999996e-05,
"loss": 0.5885,
"step": 8340
},
{
"epoch": 121.01,
"learning_rate": 4.2625000000000006e-05,
"loss": 0.5798,
"step": 8350
},
{
"epoch": 121.16,
"learning_rate": 4.2546875e-05,
"loss": 0.5493,
"step": 8360
},
{
"epoch": 121.3,
"learning_rate": 4.246875e-05,
"loss": 0.5413,
"step": 8370
},
{
"epoch": 121.45,
"learning_rate": 4.2390625e-05,
"loss": 0.5363,
"step": 8380
},
{
"epoch": 121.59,
"learning_rate": 4.23125e-05,
"loss": 0.5851,
"step": 8390
},
{
"epoch": 121.74,
"learning_rate": 4.2234375e-05,
"loss": 0.5319,
"step": 8400
},
{
"epoch": 121.88,
"learning_rate": 4.2156250000000005e-05,
"loss": 0.5567,
"step": 8410
},
{
"epoch": 122.03,
"learning_rate": 4.2078125e-05,
"loss": 0.5466,
"step": 8420
},
{
"epoch": 122.17,
"learning_rate": 4.2e-05,
"loss": 0.5854,
"step": 8430
},
{
"epoch": 122.32,
"learning_rate": 4.1921875e-05,
"loss": 0.5405,
"step": 8440
},
{
"epoch": 122.46,
"learning_rate": 4.1843750000000004e-05,
"loss": 0.6046,
"step": 8450
},
{
"epoch": 122.61,
"learning_rate": 4.1765625e-05,
"loss": 0.5083,
"step": 8460
},
{
"epoch": 122.75,
"learning_rate": 4.1687500000000004e-05,
"loss": 0.5405,
"step": 8470
},
{
"epoch": 122.9,
"learning_rate": 4.1609375e-05,
"loss": 0.4826,
"step": 8480
},
{
"epoch": 123.04,
"learning_rate": 4.1531249999999996e-05,
"loss": 0.5092,
"step": 8490
},
{
"epoch": 123.19,
"learning_rate": 4.1453125000000006e-05,
"loss": 0.5819,
"step": 8500
},
{
"epoch": 123.19,
"eval_loss": 0.21253542602062225,
"eval_runtime": 579.9796,
"eval_samples_per_second": 5.852,
"eval_steps_per_second": 0.733,
"eval_wer": 0.13341386649573914,
"step": 8500
},
{
"epoch": 123.33,
"learning_rate": 4.1375e-05,
"loss": 0.5768,
"step": 8510
},
{
"epoch": 123.48,
"learning_rate": 4.1296875e-05,
"loss": 0.5244,
"step": 8520
},
{
"epoch": 123.62,
"learning_rate": 4.121875e-05,
"loss": 0.6989,
"step": 8530
},
{
"epoch": 123.77,
"learning_rate": 4.1140625e-05,
"loss": 0.5204,
"step": 8540
},
{
"epoch": 123.91,
"learning_rate": 4.10625e-05,
"loss": 0.5902,
"step": 8550
},
{
"epoch": 124.06,
"learning_rate": 4.0984375000000005e-05,
"loss": 0.4826,
"step": 8560
},
{
"epoch": 124.2,
"learning_rate": 4.090625e-05,
"loss": 0.5641,
"step": 8570
},
{
"epoch": 124.35,
"learning_rate": 4.0828125e-05,
"loss": 0.5823,
"step": 8580
},
{
"epoch": 124.49,
"learning_rate": 4.075e-05,
"loss": 0.4906,
"step": 8590
},
{
"epoch": 124.64,
"learning_rate": 4.0671875000000004e-05,
"loss": 0.5423,
"step": 8600
},
{
"epoch": 124.78,
"learning_rate": 4.059375e-05,
"loss": 0.5489,
"step": 8610
},
{
"epoch": 124.93,
"learning_rate": 4.0515625000000004e-05,
"loss": 0.56,
"step": 8620
},
{
"epoch": 125.07,
"learning_rate": 4.04375e-05,
"loss": 0.5411,
"step": 8630
},
{
"epoch": 125.22,
"learning_rate": 4.0359375e-05,
"loss": 0.5912,
"step": 8640
},
{
"epoch": 125.36,
"learning_rate": 4.028125000000001e-05,
"loss": 0.5835,
"step": 8650
},
{
"epoch": 125.51,
"learning_rate": 4.0203125e-05,
"loss": 0.5686,
"step": 8660
},
{
"epoch": 125.65,
"learning_rate": 4.0125e-05,
"loss": 0.5911,
"step": 8670
},
{
"epoch": 125.8,
"learning_rate": 4.0046875e-05,
"loss": 0.527,
"step": 8680
},
{
"epoch": 125.94,
"learning_rate": 3.996875e-05,
"loss": 0.4846,
"step": 8690
},
{
"epoch": 126.09,
"learning_rate": 3.9890625e-05,
"loss": 0.6386,
"step": 8700
},
{
"epoch": 126.23,
"learning_rate": 3.9812500000000005e-05,
"loss": 0.5246,
"step": 8710
},
{
"epoch": 126.38,
"learning_rate": 3.9734375e-05,
"loss": 0.5456,
"step": 8720
},
{
"epoch": 126.52,
"learning_rate": 3.965625e-05,
"loss": 0.5249,
"step": 8730
},
{
"epoch": 126.67,
"learning_rate": 3.9578125e-05,
"loss": 0.6076,
"step": 8740
},
{
"epoch": 126.81,
"learning_rate": 3.9500000000000005e-05,
"loss": 0.4845,
"step": 8750
},
{
"epoch": 126.96,
"learning_rate": 3.9421875e-05,
"loss": 0.5607,
"step": 8760
},
{
"epoch": 127.1,
"learning_rate": 3.9343750000000004e-05,
"loss": 0.5276,
"step": 8770
},
{
"epoch": 127.25,
"learning_rate": 3.9265625e-05,
"loss": 0.5282,
"step": 8780
},
{
"epoch": 127.39,
"learning_rate": 3.91875e-05,
"loss": 0.5207,
"step": 8790
},
{
"epoch": 127.54,
"learning_rate": 3.910937500000001e-05,
"loss": 0.5607,
"step": 8800
},
{
"epoch": 127.68,
"learning_rate": 3.9031250000000003e-05,
"loss": 0.508,
"step": 8810
},
{
"epoch": 127.83,
"learning_rate": 3.8953125e-05,
"loss": 0.5262,
"step": 8820
},
{
"epoch": 127.97,
"learning_rate": 3.8875e-05,
"loss": 0.5026,
"step": 8830
},
{
"epoch": 128.12,
"learning_rate": 3.8796875e-05,
"loss": 0.5932,
"step": 8840
},
{
"epoch": 128.26,
"learning_rate": 3.871875e-05,
"loss": 0.6483,
"step": 8850
},
{
"epoch": 128.41,
"learning_rate": 3.8640625000000006e-05,
"loss": 0.6052,
"step": 8860
},
{
"epoch": 128.55,
"learning_rate": 3.85625e-05,
"loss": 0.521,
"step": 8870
},
{
"epoch": 128.7,
"learning_rate": 3.8484375e-05,
"loss": 0.5174,
"step": 8880
},
{
"epoch": 128.84,
"learning_rate": 3.840625e-05,
"loss": 0.4972,
"step": 8890
},
{
"epoch": 128.99,
"learning_rate": 3.8328125000000005e-05,
"loss": 0.5328,
"step": 8900
},
{
"epoch": 129.13,
"learning_rate": 3.825e-05,
"loss": 0.5884,
"step": 8910
},
{
"epoch": 129.28,
"learning_rate": 3.8171875000000005e-05,
"loss": 0.5271,
"step": 8920
},
{
"epoch": 129.42,
"learning_rate": 3.809375e-05,
"loss": 0.5299,
"step": 8930
},
{
"epoch": 129.57,
"learning_rate": 3.8015625e-05,
"loss": 0.5224,
"step": 8940
},
{
"epoch": 129.71,
"learning_rate": 3.79375e-05,
"loss": 0.4938,
"step": 8950
},
{
"epoch": 129.86,
"learning_rate": 3.7859375000000004e-05,
"loss": 0.5526,
"step": 8960
},
{
"epoch": 130.0,
"learning_rate": 3.778125e-05,
"loss": 0.4794,
"step": 8970
},
{
"epoch": 130.14,
"learning_rate": 3.7703125e-05,
"loss": 0.5212,
"step": 8980
},
{
"epoch": 130.29,
"learning_rate": 3.7625e-05,
"loss": 0.4934,
"step": 8990
},
{
"epoch": 130.43,
"learning_rate": 3.7546875e-05,
"loss": 0.5277,
"step": 9000
},
{
"epoch": 130.43,
"eval_loss": 0.2063010185956955,
"eval_runtime": 579.7764,
"eval_samples_per_second": 5.854,
"eval_steps_per_second": 0.733,
"eval_wer": 0.13300298301407687,
"step": 9000
},
{
"epoch": 130.58,
"learning_rate": 3.746875e-05,
"loss": 0.5609,
"step": 9010
},
{
"epoch": 130.72,
"learning_rate": 3.7390625e-05,
"loss": 0.5351,
"step": 9020
},
{
"epoch": 130.87,
"learning_rate": 3.73125e-05,
"loss": 0.5104,
"step": 9030
},
{
"epoch": 131.01,
"learning_rate": 3.7234375e-05,
"loss": 0.5557,
"step": 9040
},
{
"epoch": 131.16,
"learning_rate": 3.7156250000000005e-05,
"loss": 0.5155,
"step": 9050
},
{
"epoch": 131.3,
"learning_rate": 3.7078125e-05,
"loss": 0.6127,
"step": 9060
},
{
"epoch": 131.45,
"learning_rate": 3.7e-05,
"loss": 0.5371,
"step": 9070
},
{
"epoch": 131.59,
"learning_rate": 3.6921875e-05,
"loss": 0.477,
"step": 9080
},
{
"epoch": 131.74,
"learning_rate": 3.684375e-05,
"loss": 0.4657,
"step": 9090
},
{
"epoch": 131.88,
"learning_rate": 3.6765625e-05,
"loss": 0.5669,
"step": 9100
},
{
"epoch": 132.03,
"learning_rate": 3.6687500000000004e-05,
"loss": 0.4683,
"step": 9110
},
{
"epoch": 132.17,
"learning_rate": 3.6609375e-05,
"loss": 0.5235,
"step": 9120
},
{
"epoch": 132.32,
"learning_rate": 3.653125e-05,
"loss": 0.5178,
"step": 9130
},
{
"epoch": 132.46,
"learning_rate": 3.6453125e-05,
"loss": 0.4834,
"step": 9140
},
{
"epoch": 132.61,
"learning_rate": 3.6375e-05,
"loss": 0.5815,
"step": 9150
},
{
"epoch": 132.75,
"learning_rate": 3.6296875e-05,
"loss": 0.5547,
"step": 9160
},
{
"epoch": 132.9,
"learning_rate": 3.621875e-05,
"loss": 0.5386,
"step": 9170
},
{
"epoch": 133.04,
"learning_rate": 3.6140625e-05,
"loss": 0.5692,
"step": 9180
},
{
"epoch": 133.19,
"learning_rate": 3.60625e-05,
"loss": 0.5479,
"step": 9190
},
{
"epoch": 133.33,
"learning_rate": 3.5984375000000006e-05,
"loss": 0.4641,
"step": 9200
},
{
"epoch": 133.48,
"learning_rate": 3.590625e-05,
"loss": 0.4902,
"step": 9210
},
{
"epoch": 133.62,
"learning_rate": 3.5828125e-05,
"loss": 0.563,
"step": 9220
},
{
"epoch": 133.77,
"learning_rate": 3.575e-05,
"loss": 0.5269,
"step": 9230
},
{
"epoch": 133.91,
"learning_rate": 3.5671875e-05,
"loss": 0.623,
"step": 9240
},
{
"epoch": 134.06,
"learning_rate": 3.559375e-05,
"loss": 0.5057,
"step": 9250
},
{
"epoch": 134.2,
"learning_rate": 3.5515625000000004e-05,
"loss": 0.5614,
"step": 9260
},
{
"epoch": 134.35,
"learning_rate": 3.54375e-05,
"loss": 0.5777,
"step": 9270
},
{
"epoch": 134.49,
"learning_rate": 3.5359375e-05,
"loss": 0.4745,
"step": 9280
},
{
"epoch": 134.64,
"learning_rate": 3.528125e-05,
"loss": 0.5169,
"step": 9290
},
{
"epoch": 134.78,
"learning_rate": 3.5203125000000004e-05,
"loss": 0.5377,
"step": 9300
},
{
"epoch": 134.93,
"learning_rate": 3.5125e-05,
"loss": 0.5445,
"step": 9310
},
{
"epoch": 135.07,
"learning_rate": 3.5046875e-05,
"loss": 0.5448,
"step": 9320
},
{
"epoch": 135.22,
"learning_rate": 3.496875e-05,
"loss": 0.4979,
"step": 9330
},
{
"epoch": 135.36,
"learning_rate": 3.4890624999999996e-05,
"loss": 0.5021,
"step": 9340
},
{
"epoch": 135.51,
"learning_rate": 3.4812500000000006e-05,
"loss": 0.5783,
"step": 9350
},
{
"epoch": 135.65,
"learning_rate": 3.4734375e-05,
"loss": 0.5397,
"step": 9360
},
{
"epoch": 135.8,
"learning_rate": 3.465625e-05,
"loss": 0.5309,
"step": 9370
},
{
"epoch": 135.94,
"learning_rate": 3.4578125e-05,
"loss": 0.5264,
"step": 9380
},
{
"epoch": 136.09,
"learning_rate": 3.45e-05,
"loss": 0.5904,
"step": 9390
},
{
"epoch": 136.23,
"learning_rate": 3.4421875e-05,
"loss": 0.5292,
"step": 9400
},
{
"epoch": 136.38,
"learning_rate": 3.4343750000000005e-05,
"loss": 0.5631,
"step": 9410
},
{
"epoch": 136.52,
"learning_rate": 3.4265625e-05,
"loss": 0.4449,
"step": 9420
},
{
"epoch": 136.67,
"learning_rate": 3.41875e-05,
"loss": 0.5252,
"step": 9430
},
{
"epoch": 136.81,
"learning_rate": 3.4109375e-05,
"loss": 0.478,
"step": 9440
},
{
"epoch": 136.96,
"learning_rate": 3.4031250000000004e-05,
"loss": 0.6081,
"step": 9450
},
{
"epoch": 137.1,
"learning_rate": 3.3953125e-05,
"loss": 0.5528,
"step": 9460
},
{
"epoch": 137.25,
"learning_rate": 3.3875000000000003e-05,
"loss": 0.6549,
"step": 9470
},
{
"epoch": 137.39,
"learning_rate": 3.3796875e-05,
"loss": 0.5099,
"step": 9480
},
{
"epoch": 137.54,
"learning_rate": 3.3718749999999996e-05,
"loss": 0.5454,
"step": 9490
},
{
"epoch": 137.68,
"learning_rate": 3.3640625000000006e-05,
"loss": 0.4626,
"step": 9500
},
{
"epoch": 137.68,
"eval_loss": 0.2105015516281128,
"eval_runtime": 578.1395,
"eval_samples_per_second": 5.871,
"eval_steps_per_second": 0.735,
"eval_wer": 0.13102252463246472,
"step": 9500
},
{
"epoch": 137.83,
"learning_rate": 3.35625e-05,
"loss": 0.5226,
"step": 9510
},
{
"epoch": 137.97,
"learning_rate": 3.3484375e-05,
"loss": 0.5355,
"step": 9520
},
{
"epoch": 138.12,
"learning_rate": 3.340625e-05,
"loss": 0.5445,
"step": 9530
},
{
"epoch": 138.26,
"learning_rate": 3.3328125e-05,
"loss": 0.4949,
"step": 9540
},
{
"epoch": 138.41,
"learning_rate": 3.325e-05,
"loss": 0.4967,
"step": 9550
},
{
"epoch": 138.55,
"learning_rate": 3.3171875000000005e-05,
"loss": 0.4727,
"step": 9560
},
{
"epoch": 138.7,
"learning_rate": 3.309375e-05,
"loss": 0.4876,
"step": 9570
},
{
"epoch": 138.84,
"learning_rate": 3.3015625e-05,
"loss": 0.5354,
"step": 9580
},
{
"epoch": 138.99,
"learning_rate": 3.29375e-05,
"loss": 0.4908,
"step": 9590
},
{
"epoch": 139.13,
"learning_rate": 3.2859375000000004e-05,
"loss": 0.5092,
"step": 9600
},
{
"epoch": 139.28,
"learning_rate": 3.278125e-05,
"loss": 0.5326,
"step": 9610
},
{
"epoch": 139.42,
"learning_rate": 3.2703125000000004e-05,
"loss": 0.5484,
"step": 9620
},
{
"epoch": 139.57,
"learning_rate": 3.2625e-05,
"loss": 0.5084,
"step": 9630
},
{
"epoch": 139.71,
"learning_rate": 3.2546874999999997e-05,
"loss": 0.5468,
"step": 9640
},
{
"epoch": 139.86,
"learning_rate": 3.2468750000000007e-05,
"loss": 0.6011,
"step": 9650
},
{
"epoch": 140.0,
"learning_rate": 3.2390625e-05,
"loss": 0.5875,
"step": 9660
},
{
"epoch": 140.14,
"learning_rate": 3.23125e-05,
"loss": 0.5429,
"step": 9670
},
{
"epoch": 140.29,
"learning_rate": 3.2234375e-05,
"loss": 0.5103,
"step": 9680
},
{
"epoch": 140.43,
"learning_rate": 3.215625e-05,
"loss": 0.4965,
"step": 9690
},
{
"epoch": 140.58,
"learning_rate": 3.2078125e-05,
"loss": 0.4877,
"step": 9700
},
{
"epoch": 140.72,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.5853,
"step": 9710
},
{
"epoch": 140.87,
"learning_rate": 3.1921875e-05,
"loss": 0.5065,
"step": 9720
},
{
"epoch": 141.01,
"learning_rate": 3.184375e-05,
"loss": 0.5068,
"step": 9730
},
{
"epoch": 141.16,
"learning_rate": 3.1765625e-05,
"loss": 0.5398,
"step": 9740
},
{
"epoch": 141.3,
"learning_rate": 3.1687500000000005e-05,
"loss": 0.4639,
"step": 9750
},
{
"epoch": 141.45,
"learning_rate": 3.1609375e-05,
"loss": 0.461,
"step": 9760
},
{
"epoch": 141.59,
"learning_rate": 3.1531250000000004e-05,
"loss": 0.4832,
"step": 9770
},
{
"epoch": 141.74,
"learning_rate": 3.1453125e-05,
"loss": 0.5249,
"step": 9780
},
{
"epoch": 141.88,
"learning_rate": 3.1375e-05,
"loss": 0.5077,
"step": 9790
},
{
"epoch": 142.03,
"learning_rate": 3.1296875e-05,
"loss": 0.5561,
"step": 9800
},
{
"epoch": 142.17,
"learning_rate": 3.121875e-05,
"loss": 0.6738,
"step": 9810
},
{
"epoch": 142.32,
"learning_rate": 3.1140625e-05,
"loss": 0.539,
"step": 9820
},
{
"epoch": 142.46,
"learning_rate": 3.10625e-05,
"loss": 0.552,
"step": 9830
},
{
"epoch": 142.61,
"learning_rate": 3.0984375e-05,
"loss": 0.5254,
"step": 9840
},
{
"epoch": 142.75,
"learning_rate": 3.090625e-05,
"loss": 0.5628,
"step": 9850
},
{
"epoch": 142.9,
"learning_rate": 3.0828125e-05,
"loss": 0.5149,
"step": 9860
},
{
"epoch": 143.04,
"learning_rate": 3.075e-05,
"loss": 0.6018,
"step": 9870
},
{
"epoch": 143.19,
"learning_rate": 3.0671875e-05,
"loss": 0.5492,
"step": 9880
},
{
"epoch": 143.33,
"learning_rate": 3.059375e-05,
"loss": 0.48,
"step": 9890
},
{
"epoch": 143.48,
"learning_rate": 3.0515625000000005e-05,
"loss": 0.5205,
"step": 9900
},
{
"epoch": 143.62,
"learning_rate": 3.04375e-05,
"loss": 0.5436,
"step": 9910
},
{
"epoch": 143.77,
"learning_rate": 3.0359375e-05,
"loss": 0.5217,
"step": 9920
},
{
"epoch": 143.91,
"learning_rate": 3.028125e-05,
"loss": 0.4908,
"step": 9930
},
{
"epoch": 144.06,
"learning_rate": 3.0203124999999997e-05,
"loss": 0.4808,
"step": 9940
},
{
"epoch": 144.2,
"learning_rate": 3.0125000000000004e-05,
"loss": 0.5112,
"step": 9950
},
{
"epoch": 144.35,
"learning_rate": 3.0046875000000004e-05,
"loss": 0.4901,
"step": 9960
},
{
"epoch": 144.49,
"learning_rate": 2.996875e-05,
"loss": 0.5146,
"step": 9970
},
{
"epoch": 144.64,
"learning_rate": 2.9890625e-05,
"loss": 0.5309,
"step": 9980
},
{
"epoch": 144.78,
"learning_rate": 2.98125e-05,
"loss": 0.4538,
"step": 9990
},
{
"epoch": 144.93,
"learning_rate": 2.9734375000000003e-05,
"loss": 0.5842,
"step": 10000
},
{
"epoch": 144.93,
"eval_loss": 0.20865434408187866,
"eval_runtime": 584.5036,
"eval_samples_per_second": 5.807,
"eval_steps_per_second": 0.727,
"eval_wer": 0.1306527294989687,
"step": 10000
},
{
"epoch": 145.07,
"learning_rate": 2.9656250000000003e-05,
"loss": 0.5556,
"step": 10010
},
{
"epoch": 145.22,
"learning_rate": 2.9578125000000002e-05,
"loss": 0.5339,
"step": 10020
},
{
"epoch": 145.36,
"learning_rate": 2.95e-05,
"loss": 0.5321,
"step": 10030
},
{
"epoch": 145.51,
"learning_rate": 2.9421875e-05,
"loss": 0.5274,
"step": 10040
},
{
"epoch": 145.65,
"learning_rate": 2.9343750000000002e-05,
"loss": 0.533,
"step": 10050
},
{
"epoch": 145.8,
"learning_rate": 2.9265625e-05,
"loss": 0.5515,
"step": 10060
},
{
"epoch": 145.94,
"learning_rate": 2.91875e-05,
"loss": 0.5548,
"step": 10070
},
{
"epoch": 146.09,
"learning_rate": 2.9109375e-05,
"loss": 0.4882,
"step": 10080
},
{
"epoch": 146.23,
"learning_rate": 2.9031249999999998e-05,
"loss": 0.4592,
"step": 10090
},
{
"epoch": 146.38,
"learning_rate": 2.8953125000000004e-05,
"loss": 0.4733,
"step": 10100
},
{
"epoch": 146.52,
"learning_rate": 2.8875e-05,
"loss": 0.4897,
"step": 10110
},
{
"epoch": 146.67,
"learning_rate": 2.8796875e-05,
"loss": 0.565,
"step": 10120
},
{
"epoch": 146.81,
"learning_rate": 2.871875e-05,
"loss": 0.5052,
"step": 10130
},
{
"epoch": 146.96,
"learning_rate": 2.8640625e-05,
"loss": 0.4882,
"step": 10140
},
{
"epoch": 147.1,
"learning_rate": 2.8562500000000003e-05,
"loss": 0.4909,
"step": 10150
},
{
"epoch": 147.25,
"learning_rate": 2.8484375000000003e-05,
"loss": 0.4724,
"step": 10160
},
{
"epoch": 147.39,
"learning_rate": 2.840625e-05,
"loss": 0.59,
"step": 10170
},
{
"epoch": 147.54,
"learning_rate": 2.8328125e-05,
"loss": 0.497,
"step": 10180
},
{
"epoch": 147.68,
"learning_rate": 2.825e-05,
"loss": 0.5728,
"step": 10190
},
{
"epoch": 147.83,
"learning_rate": 2.8171875000000002e-05,
"loss": 0.5683,
"step": 10200
},
{
"epoch": 147.97,
"learning_rate": 2.8093750000000002e-05,
"loss": 0.42,
"step": 10210
},
{
"epoch": 148.12,
"learning_rate": 2.8015625e-05,
"loss": 0.5232,
"step": 10220
},
{
"epoch": 148.26,
"learning_rate": 2.79375e-05,
"loss": 0.4986,
"step": 10230
},
{
"epoch": 148.41,
"learning_rate": 2.7859374999999998e-05,
"loss": 0.5065,
"step": 10240
},
{
"epoch": 148.55,
"learning_rate": 2.7781250000000004e-05,
"loss": 0.4544,
"step": 10250
},
{
"epoch": 148.7,
"learning_rate": 2.7703125e-05,
"loss": 0.5544,
"step": 10260
},
{
"epoch": 148.84,
"learning_rate": 2.7625e-05,
"loss": 0.4994,
"step": 10270
},
{
"epoch": 148.99,
"learning_rate": 2.7546875e-05,
"loss": 0.561,
"step": 10280
},
{
"epoch": 149.13,
"learning_rate": 2.746875e-05,
"loss": 0.5616,
"step": 10290
},
{
"epoch": 149.28,
"learning_rate": 2.7390625000000003e-05,
"loss": 0.6495,
"step": 10300
},
{
"epoch": 149.42,
"learning_rate": 2.7312500000000003e-05,
"loss": 0.4772,
"step": 10310
},
{
"epoch": 149.57,
"learning_rate": 2.7234375e-05,
"loss": 0.5294,
"step": 10320
},
{
"epoch": 149.71,
"learning_rate": 2.715625e-05,
"loss": 0.5192,
"step": 10330
},
{
"epoch": 149.86,
"learning_rate": 2.7078125e-05,
"loss": 0.592,
"step": 10340
},
{
"epoch": 150.0,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.5129,
"step": 10350
},
{
"epoch": 150.14,
"learning_rate": 2.6921875000000002e-05,
"loss": 0.5053,
"step": 10360
},
{
"epoch": 150.29,
"learning_rate": 2.6843750000000002e-05,
"loss": 0.5152,
"step": 10370
},
{
"epoch": 150.43,
"learning_rate": 2.6765625e-05,
"loss": 0.5233,
"step": 10380
},
{
"epoch": 150.58,
"learning_rate": 2.6687499999999998e-05,
"loss": 0.5546,
"step": 10390
},
{
"epoch": 150.72,
"learning_rate": 2.6609375000000005e-05,
"loss": 0.5218,
"step": 10400
},
{
"epoch": 150.87,
"learning_rate": 2.653125e-05,
"loss": 0.6183,
"step": 10410
},
{
"epoch": 151.01,
"learning_rate": 2.6453125e-05,
"loss": 0.4911,
"step": 10420
},
{
"epoch": 151.16,
"learning_rate": 2.6375e-05,
"loss": 0.5415,
"step": 10430
},
{
"epoch": 151.3,
"learning_rate": 2.6296874999999997e-05,
"loss": 0.4911,
"step": 10440
},
{
"epoch": 151.45,
"learning_rate": 2.6218750000000004e-05,
"loss": 0.4329,
"step": 10450
},
{
"epoch": 151.59,
"learning_rate": 2.6140625000000004e-05,
"loss": 0.5477,
"step": 10460
},
{
"epoch": 151.74,
"learning_rate": 2.60625e-05,
"loss": 0.5363,
"step": 10470
},
{
"epoch": 151.88,
"learning_rate": 2.5984375e-05,
"loss": 0.4905,
"step": 10480
},
{
"epoch": 152.03,
"learning_rate": 2.590625e-05,
"loss": 0.4725,
"step": 10490
},
{
"epoch": 152.17,
"learning_rate": 2.5828125000000003e-05,
"loss": 0.535,
"step": 10500
},
{
"epoch": 152.17,
"eval_loss": 0.21369755268096924,
"eval_runtime": 580.9794,
"eval_samples_per_second": 5.842,
"eval_steps_per_second": 0.732,
"eval_wer": 0.1309074772575993,
"step": 10500
},
{
"epoch": 152.32,
"learning_rate": 2.5750000000000002e-05,
"loss": 0.5524,
"step": 10510
},
{
"epoch": 152.46,
"learning_rate": 2.5671875000000002e-05,
"loss": 0.5334,
"step": 10520
},
{
"epoch": 152.61,
"learning_rate": 2.5601562500000003e-05,
"loss": 0.4802,
"step": 10530
},
{
"epoch": 152.75,
"learning_rate": 2.5523437500000003e-05,
"loss": 0.5052,
"step": 10540
},
{
"epoch": 152.9,
"learning_rate": 2.5445312500000003e-05,
"loss": 0.4649,
"step": 10550
},
{
"epoch": 153.04,
"learning_rate": 2.53671875e-05,
"loss": 0.53,
"step": 10560
},
{
"epoch": 153.19,
"learning_rate": 2.52890625e-05,
"loss": 0.476,
"step": 10570
},
{
"epoch": 153.33,
"learning_rate": 2.5210937500000002e-05,
"loss": 0.5251,
"step": 10580
},
{
"epoch": 153.48,
"learning_rate": 2.5132812500000002e-05,
"loss": 0.5359,
"step": 10590
},
{
"epoch": 153.62,
"learning_rate": 2.50546875e-05,
"loss": 0.4981,
"step": 10600
},
{
"epoch": 153.77,
"learning_rate": 2.49765625e-05,
"loss": 0.5616,
"step": 10610
},
{
"epoch": 153.91,
"learning_rate": 2.48984375e-05,
"loss": 0.5577,
"step": 10620
},
{
"epoch": 154.06,
"learning_rate": 2.48203125e-05,
"loss": 0.5518,
"step": 10630
},
{
"epoch": 154.2,
"learning_rate": 2.47421875e-05,
"loss": 0.5383,
"step": 10640
},
{
"epoch": 154.35,
"learning_rate": 2.46640625e-05,
"loss": 0.529,
"step": 10650
},
{
"epoch": 154.49,
"learning_rate": 2.45859375e-05,
"loss": 0.5187,
"step": 10660
},
{
"epoch": 154.64,
"learning_rate": 2.4507812500000004e-05,
"loss": 0.5833,
"step": 10670
},
{
"epoch": 154.78,
"learning_rate": 2.44296875e-05,
"loss": 0.4893,
"step": 10680
},
{
"epoch": 154.93,
"learning_rate": 2.4351562500000003e-05,
"loss": 0.5028,
"step": 10690
},
{
"epoch": 155.07,
"learning_rate": 2.4273437500000003e-05,
"loss": 0.4743,
"step": 10700
},
{
"epoch": 155.22,
"learning_rate": 2.41953125e-05,
"loss": 0.5569,
"step": 10710
},
{
"epoch": 155.36,
"learning_rate": 2.4117187500000003e-05,
"loss": 0.5493,
"step": 10720
},
{
"epoch": 155.51,
"learning_rate": 2.4046875000000003e-05,
"loss": 0.4621,
"step": 10730
},
{
"epoch": 155.65,
"learning_rate": 2.396875e-05,
"loss": 0.4801,
"step": 10740
},
{
"epoch": 155.8,
"learning_rate": 2.3890625e-05,
"loss": 0.514,
"step": 10750
},
{
"epoch": 155.94,
"learning_rate": 2.3812500000000003e-05,
"loss": 0.6346,
"step": 10760
},
{
"epoch": 156.09,
"learning_rate": 2.3734375e-05,
"loss": 0.4917,
"step": 10770
},
{
"epoch": 156.23,
"learning_rate": 2.3656250000000002e-05,
"loss": 0.4929,
"step": 10780
},
{
"epoch": 156.38,
"learning_rate": 2.3578125000000002e-05,
"loss": 0.5315,
"step": 10790
},
{
"epoch": 156.52,
"learning_rate": 2.35e-05,
"loss": 0.479,
"step": 10800
},
{
"epoch": 156.67,
"learning_rate": 2.3421875000000002e-05,
"loss": 0.4674,
"step": 10810
},
{
"epoch": 156.81,
"learning_rate": 2.334375e-05,
"loss": 0.521,
"step": 10820
},
{
"epoch": 156.96,
"learning_rate": 2.3265625e-05,
"loss": 0.4866,
"step": 10830
},
{
"epoch": 157.1,
"learning_rate": 2.31875e-05,
"loss": 0.472,
"step": 10840
},
{
"epoch": 157.25,
"learning_rate": 2.3109375e-05,
"loss": 0.5237,
"step": 10850
},
{
"epoch": 157.39,
"learning_rate": 2.303125e-05,
"loss": 0.5383,
"step": 10860
},
{
"epoch": 157.54,
"learning_rate": 2.2953125e-05,
"loss": 0.4599,
"step": 10870
},
{
"epoch": 157.68,
"learning_rate": 2.2875e-05,
"loss": 0.4511,
"step": 10880
},
{
"epoch": 157.83,
"learning_rate": 2.2796875e-05,
"loss": 0.5596,
"step": 10890
},
{
"epoch": 157.97,
"learning_rate": 2.271875e-05,
"loss": 0.455,
"step": 10900
},
{
"epoch": 158.12,
"learning_rate": 2.2640625e-05,
"loss": 0.5119,
"step": 10910
},
{
"epoch": 158.26,
"learning_rate": 2.25625e-05,
"loss": 0.517,
"step": 10920
},
{
"epoch": 158.41,
"learning_rate": 2.2484375000000003e-05,
"loss": 0.5602,
"step": 10930
},
{
"epoch": 158.55,
"learning_rate": 2.2406250000000003e-05,
"loss": 0.5124,
"step": 10940
},
{
"epoch": 158.7,
"learning_rate": 2.2328125e-05,
"loss": 0.5453,
"step": 10950
},
{
"epoch": 158.84,
"learning_rate": 2.2250000000000002e-05,
"loss": 0.5732,
"step": 10960
},
{
"epoch": 158.99,
"learning_rate": 2.2171875000000002e-05,
"loss": 0.4786,
"step": 10970
},
{
"epoch": 159.13,
"learning_rate": 2.2093750000000002e-05,
"loss": 0.4969,
"step": 10980
},
{
"epoch": 159.28,
"learning_rate": 2.2015625e-05,
"loss": 0.6077,
"step": 10990
},
{
"epoch": 159.42,
"learning_rate": 2.19375e-05,
"loss": 0.5081,
"step": 11000
},
{
"epoch": 159.42,
"eval_loss": 0.2215370386838913,
"eval_runtime": 583.8085,
"eval_samples_per_second": 5.814,
"eval_steps_per_second": 0.728,
"eval_wer": 0.13015966932097395,
"step": 11000
},
{
"epoch": 159.57,
"learning_rate": 2.1859375e-05,
"loss": 0.5492,
"step": 11010
},
{
"epoch": 159.71,
"learning_rate": 2.178125e-05,
"loss": 0.517,
"step": 11020
},
{
"epoch": 159.86,
"learning_rate": 2.1703125e-05,
"loss": 0.5535,
"step": 11030
},
{
"epoch": 160.0,
"learning_rate": 2.1625e-05,
"loss": 0.4738,
"step": 11040
},
{
"epoch": 160.14,
"learning_rate": 2.1546875e-05,
"loss": 0.4542,
"step": 11050
},
{
"epoch": 160.29,
"learning_rate": 2.146875e-05,
"loss": 0.5163,
"step": 11060
},
{
"epoch": 160.43,
"learning_rate": 2.1390625e-05,
"loss": 0.4631,
"step": 11070
},
{
"epoch": 160.58,
"learning_rate": 2.1312500000000003e-05,
"loss": 0.4819,
"step": 11080
},
{
"epoch": 160.72,
"learning_rate": 2.1234375e-05,
"loss": 0.5004,
"step": 11090
},
{
"epoch": 160.87,
"learning_rate": 2.115625e-05,
"loss": 0.4754,
"step": 11100
},
{
"epoch": 161.01,
"learning_rate": 2.1078125000000002e-05,
"loss": 0.6113,
"step": 11110
},
{
"epoch": 161.16,
"learning_rate": 2.1e-05,
"loss": 0.4625,
"step": 11120
},
{
"epoch": 161.3,
"learning_rate": 2.0921875000000002e-05,
"loss": 0.5523,
"step": 11130
},
{
"epoch": 161.45,
"learning_rate": 2.0843750000000002e-05,
"loss": 0.4649,
"step": 11140
},
{
"epoch": 161.59,
"learning_rate": 2.0765624999999998e-05,
"loss": 0.4813,
"step": 11150
},
{
"epoch": 161.74,
"learning_rate": 2.06875e-05,
"loss": 0.4891,
"step": 11160
},
{
"epoch": 161.88,
"learning_rate": 2.0609375e-05,
"loss": 0.6146,
"step": 11170
},
{
"epoch": 162.03,
"learning_rate": 2.053125e-05,
"loss": 0.5304,
"step": 11180
},
{
"epoch": 162.17,
"learning_rate": 2.0453125e-05,
"loss": 0.465,
"step": 11190
},
{
"epoch": 162.32,
"learning_rate": 2.0375e-05,
"loss": 0.4719,
"step": 11200
},
{
"epoch": 162.46,
"learning_rate": 2.0296875e-05,
"loss": 0.567,
"step": 11210
},
{
"epoch": 162.61,
"learning_rate": 2.021875e-05,
"loss": 0.4991,
"step": 11220
},
{
"epoch": 162.75,
"learning_rate": 2.0140625000000003e-05,
"loss": 0.4783,
"step": 11230
},
{
"epoch": 162.9,
"learning_rate": 2.00625e-05,
"loss": 0.5601,
"step": 11240
},
{
"epoch": 163.04,
"learning_rate": 1.9984375e-05,
"loss": 0.4829,
"step": 11250
},
{
"epoch": 163.19,
"learning_rate": 1.9906250000000003e-05,
"loss": 0.4697,
"step": 11260
},
{
"epoch": 163.33,
"learning_rate": 1.9828125e-05,
"loss": 0.4769,
"step": 11270
},
{
"epoch": 163.48,
"learning_rate": 1.9750000000000002e-05,
"loss": 0.4977,
"step": 11280
},
{
"epoch": 163.62,
"learning_rate": 1.9671875000000002e-05,
"loss": 0.6251,
"step": 11290
},
{
"epoch": 163.77,
"learning_rate": 1.959375e-05,
"loss": 0.4934,
"step": 11300
},
{
"epoch": 163.91,
"learning_rate": 1.9515625000000002e-05,
"loss": 0.56,
"step": 11310
},
{
"epoch": 164.06,
"learning_rate": 1.94375e-05,
"loss": 0.528,
"step": 11320
},
{
"epoch": 164.2,
"learning_rate": 1.9359375e-05,
"loss": 0.4802,
"step": 11330
},
{
"epoch": 164.35,
"learning_rate": 1.928125e-05,
"loss": 0.5461,
"step": 11340
},
{
"epoch": 164.49,
"learning_rate": 1.9203125e-05,
"loss": 0.4683,
"step": 11350
},
{
"epoch": 164.64,
"learning_rate": 1.9125e-05,
"loss": 0.513,
"step": 11360
},
{
"epoch": 164.78,
"learning_rate": 1.9046875e-05,
"loss": 0.5508,
"step": 11370
},
{
"epoch": 164.93,
"learning_rate": 1.896875e-05,
"loss": 0.5198,
"step": 11380
},
{
"epoch": 165.07,
"learning_rate": 1.8890625e-05,
"loss": 0.5817,
"step": 11390
},
{
"epoch": 165.22,
"learning_rate": 1.88125e-05,
"loss": 0.4556,
"step": 11400
},
{
"epoch": 165.36,
"learning_rate": 1.8734375e-05,
"loss": 0.5387,
"step": 11410
},
{
"epoch": 165.51,
"learning_rate": 1.865625e-05,
"loss": 0.4885,
"step": 11420
},
{
"epoch": 165.65,
"learning_rate": 1.8578125000000003e-05,
"loss": 0.5143,
"step": 11430
},
{
"epoch": 165.8,
"learning_rate": 1.85e-05,
"loss": 0.5244,
"step": 11440
},
{
"epoch": 165.94,
"learning_rate": 1.8421875e-05,
"loss": 0.5515,
"step": 11450
},
{
"epoch": 166.09,
"learning_rate": 1.8343750000000002e-05,
"loss": 0.4702,
"step": 11460
},
{
"epoch": 166.23,
"learning_rate": 1.8265625e-05,
"loss": 0.4558,
"step": 11470
},
{
"epoch": 166.38,
"learning_rate": 1.81875e-05,
"loss": 0.4932,
"step": 11480
},
{
"epoch": 166.52,
"learning_rate": 1.8109375e-05,
"loss": 0.54,
"step": 11490
},
{
"epoch": 166.67,
"learning_rate": 1.803125e-05,
"loss": 0.6033,
"step": 11500
},
{
"epoch": 166.67,
"eval_loss": 0.21619954705238342,
"eval_runtime": 579.504,
"eval_samples_per_second": 5.857,
"eval_steps_per_second": 0.733,
"eval_wer": 0.13017610466024046,
"step": 11500
},
{
"epoch": 166.81,
"learning_rate": 1.7953125e-05,
"loss": 0.4635,
"step": 11510
},
{
"epoch": 166.96,
"learning_rate": 1.7875e-05,
"loss": 0.514,
"step": 11520
},
{
"epoch": 167.1,
"learning_rate": 1.7796875e-05,
"loss": 0.4505,
"step": 11530
},
{
"epoch": 167.25,
"learning_rate": 1.771875e-05,
"loss": 0.4928,
"step": 11540
},
{
"epoch": 167.39,
"learning_rate": 1.7640625e-05,
"loss": 0.5362,
"step": 11550
},
{
"epoch": 167.54,
"learning_rate": 1.75625e-05,
"loss": 0.5496,
"step": 11560
},
{
"epoch": 167.68,
"learning_rate": 1.7484375e-05,
"loss": 0.5066,
"step": 11570
},
{
"epoch": 167.83,
"learning_rate": 1.7406250000000003e-05,
"loss": 0.4494,
"step": 11580
},
{
"epoch": 167.97,
"learning_rate": 1.7328125e-05,
"loss": 0.4754,
"step": 11590
},
{
"epoch": 168.12,
"learning_rate": 1.725e-05,
"loss": 0.5764,
"step": 11600
},
{
"epoch": 168.26,
"learning_rate": 1.7171875000000002e-05,
"loss": 0.5123,
"step": 11610
},
{
"epoch": 168.41,
"learning_rate": 1.709375e-05,
"loss": 0.51,
"step": 11620
},
{
"epoch": 168.55,
"learning_rate": 1.7015625000000002e-05,
"loss": 0.5291,
"step": 11630
},
{
"epoch": 168.7,
"learning_rate": 1.6937500000000002e-05,
"loss": 0.4547,
"step": 11640
},
{
"epoch": 168.84,
"learning_rate": 1.6859374999999998e-05,
"loss": 0.5122,
"step": 11650
},
{
"epoch": 168.99,
"learning_rate": 1.678125e-05,
"loss": 0.4678,
"step": 11660
},
{
"epoch": 169.13,
"learning_rate": 1.6703125e-05,
"loss": 0.4955,
"step": 11670
},
{
"epoch": 169.28,
"learning_rate": 1.6625e-05,
"loss": 0.4622,
"step": 11680
},
{
"epoch": 169.42,
"learning_rate": 1.6546875e-05,
"loss": 0.6592,
"step": 11690
},
{
"epoch": 169.57,
"learning_rate": 1.646875e-05,
"loss": 0.4831,
"step": 11700
},
{
"epoch": 169.71,
"learning_rate": 1.6390625e-05,
"loss": 0.5051,
"step": 11710
},
{
"epoch": 169.86,
"learning_rate": 1.63125e-05,
"loss": 0.5178,
"step": 11720
},
{
"epoch": 170.0,
"learning_rate": 1.6234375000000003e-05,
"loss": 0.4765,
"step": 11730
},
{
"epoch": 170.14,
"learning_rate": 1.615625e-05,
"loss": 0.4831,
"step": 11740
},
{
"epoch": 170.29,
"learning_rate": 1.6078125e-05,
"loss": 0.4228,
"step": 11750
},
{
"epoch": 170.43,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.4984,
"step": 11760
},
{
"epoch": 170.58,
"learning_rate": 1.5921875e-05,
"loss": 0.5128,
"step": 11770
},
{
"epoch": 170.72,
"learning_rate": 1.5843750000000002e-05,
"loss": 0.5014,
"step": 11780
},
{
"epoch": 170.87,
"learning_rate": 1.5765625000000002e-05,
"loss": 0.5092,
"step": 11790
},
{
"epoch": 171.01,
"learning_rate": 1.56875e-05,
"loss": 0.4447,
"step": 11800
},
{
"epoch": 171.16,
"learning_rate": 1.5609375e-05,
"loss": 0.4987,
"step": 11810
},
{
"epoch": 171.3,
"learning_rate": 1.553125e-05,
"loss": 0.4938,
"step": 11820
},
{
"epoch": 171.45,
"learning_rate": 1.5453125e-05,
"loss": 0.4087,
"step": 11830
},
{
"epoch": 171.59,
"learning_rate": 1.5375e-05,
"loss": 0.6079,
"step": 11840
},
{
"epoch": 171.74,
"learning_rate": 1.5296875e-05,
"loss": 0.5057,
"step": 11850
},
{
"epoch": 171.88,
"learning_rate": 1.521875e-05,
"loss": 0.4802,
"step": 11860
},
{
"epoch": 172.03,
"learning_rate": 1.5140625e-05,
"loss": 0.5016,
"step": 11870
},
{
"epoch": 172.17,
"learning_rate": 1.5062500000000002e-05,
"loss": 0.5046,
"step": 11880
},
{
"epoch": 172.32,
"learning_rate": 1.4984375e-05,
"loss": 0.5413,
"step": 11890
},
{
"epoch": 172.46,
"learning_rate": 1.490625e-05,
"loss": 0.5058,
"step": 11900
},
{
"epoch": 172.61,
"learning_rate": 1.4828125000000001e-05,
"loss": 0.5098,
"step": 11910
},
{
"epoch": 172.75,
"learning_rate": 1.475e-05,
"loss": 0.4732,
"step": 11920
},
{
"epoch": 172.9,
"learning_rate": 1.4671875000000001e-05,
"loss": 0.587,
"step": 11930
},
{
"epoch": 173.04,
"learning_rate": 1.459375e-05,
"loss": 0.5069,
"step": 11940
},
{
"epoch": 173.19,
"learning_rate": 1.4515624999999999e-05,
"loss": 0.5098,
"step": 11950
},
{
"epoch": 173.33,
"learning_rate": 1.44375e-05,
"loss": 0.5517,
"step": 11960
},
{
"epoch": 173.48,
"learning_rate": 1.4359375e-05,
"loss": 0.4534,
"step": 11970
},
{
"epoch": 173.62,
"learning_rate": 1.4281250000000002e-05,
"loss": 0.4916,
"step": 11980
},
{
"epoch": 173.77,
"learning_rate": 1.4203125e-05,
"loss": 0.5054,
"step": 11990
},
{
"epoch": 173.91,
"learning_rate": 1.4125e-05,
"loss": 0.5549,
"step": 12000
},
{
"epoch": 173.91,
"eval_loss": 0.2198302298784256,
"eval_runtime": 580.3441,
"eval_samples_per_second": 5.848,
"eval_steps_per_second": 0.732,
"eval_wer": 0.12856544141212434,
"step": 12000
},
{
"epoch": 174.06,
"learning_rate": 1.4046875000000001e-05,
"loss": 0.477,
"step": 12010
},
{
"epoch": 174.2,
"learning_rate": 1.396875e-05,
"loss": 0.472,
"step": 12020
},
{
"epoch": 174.35,
"learning_rate": 1.3890625000000002e-05,
"loss": 0.5718,
"step": 12030
},
{
"epoch": 174.49,
"learning_rate": 1.38125e-05,
"loss": 0.4869,
"step": 12040
},
{
"epoch": 174.64,
"learning_rate": 1.3734375e-05,
"loss": 0.5412,
"step": 12050
},
{
"epoch": 174.78,
"learning_rate": 1.3656250000000002e-05,
"loss": 0.4829,
"step": 12060
},
{
"epoch": 174.93,
"learning_rate": 1.3578125e-05,
"loss": 0.5476,
"step": 12070
},
{
"epoch": 175.07,
"learning_rate": 1.3500000000000001e-05,
"loss": 0.5515,
"step": 12080
},
{
"epoch": 175.22,
"learning_rate": 1.3421875000000001e-05,
"loss": 0.5623,
"step": 12090
},
{
"epoch": 175.36,
"learning_rate": 1.3343749999999999e-05,
"loss": 0.5047,
"step": 12100
},
{
"epoch": 175.51,
"learning_rate": 1.3265625e-05,
"loss": 0.4376,
"step": 12110
},
{
"epoch": 175.65,
"learning_rate": 1.31875e-05,
"loss": 0.4785,
"step": 12120
},
{
"epoch": 175.8,
"learning_rate": 1.3109375000000002e-05,
"loss": 0.4634,
"step": 12130
},
{
"epoch": 175.94,
"learning_rate": 1.303125e-05,
"loss": 0.5694,
"step": 12140
},
{
"epoch": 176.09,
"learning_rate": 1.2953125e-05,
"loss": 0.4439,
"step": 12150
},
{
"epoch": 176.23,
"learning_rate": 1.2875000000000001e-05,
"loss": 0.4977,
"step": 12160
},
{
"epoch": 176.38,
"learning_rate": 1.2796875e-05,
"loss": 0.4528,
"step": 12170
},
{
"epoch": 176.52,
"learning_rate": 1.271875e-05,
"loss": 0.487,
"step": 12180
},
{
"epoch": 176.67,
"learning_rate": 1.2640625e-05,
"loss": 0.4883,
"step": 12190
},
{
"epoch": 176.81,
"learning_rate": 1.2562499999999999e-05,
"loss": 0.5206,
"step": 12200
},
{
"epoch": 176.96,
"learning_rate": 1.2484375e-05,
"loss": 0.4211,
"step": 12210
},
{
"epoch": 177.1,
"learning_rate": 1.2406250000000002e-05,
"loss": 0.4504,
"step": 12220
},
{
"epoch": 177.25,
"learning_rate": 1.2328125e-05,
"loss": 0.4702,
"step": 12230
},
{
"epoch": 177.39,
"learning_rate": 1.225e-05,
"loss": 0.4387,
"step": 12240
},
{
"epoch": 177.54,
"learning_rate": 1.2171875000000001e-05,
"loss": 0.4792,
"step": 12250
},
{
"epoch": 177.68,
"learning_rate": 1.2093750000000001e-05,
"loss": 0.5884,
"step": 12260
},
{
"epoch": 177.83,
"learning_rate": 1.2015625e-05,
"loss": 0.5292,
"step": 12270
},
{
"epoch": 177.97,
"learning_rate": 1.19375e-05,
"loss": 0.4682,
"step": 12280
},
{
"epoch": 178.12,
"learning_rate": 1.1859375e-05,
"loss": 0.5257,
"step": 12290
},
{
"epoch": 178.26,
"learning_rate": 1.178125e-05,
"loss": 0.4924,
"step": 12300
},
{
"epoch": 178.41,
"learning_rate": 1.1703125000000002e-05,
"loss": 0.526,
"step": 12310
},
{
"epoch": 178.55,
"learning_rate": 1.1625000000000001e-05,
"loss": 0.4996,
"step": 12320
},
{
"epoch": 178.7,
"learning_rate": 1.1546875e-05,
"loss": 0.5087,
"step": 12330
},
{
"epoch": 178.84,
"learning_rate": 1.1468750000000001e-05,
"loss": 0.5062,
"step": 12340
},
{
"epoch": 178.99,
"learning_rate": 1.1390625e-05,
"loss": 0.4584,
"step": 12350
},
{
"epoch": 179.13,
"learning_rate": 1.13125e-05,
"loss": 0.5106,
"step": 12360
},
{
"epoch": 179.28,
"learning_rate": 1.1234375e-05,
"loss": 0.474,
"step": 12370
},
{
"epoch": 179.42,
"learning_rate": 1.115625e-05,
"loss": 0.4744,
"step": 12380
},
{
"epoch": 179.57,
"learning_rate": 1.1078125e-05,
"loss": 0.5273,
"step": 12390
},
{
"epoch": 179.71,
"learning_rate": 1.1000000000000001e-05,
"loss": 0.5007,
"step": 12400
},
{
"epoch": 179.86,
"learning_rate": 1.0921875000000001e-05,
"loss": 0.5867,
"step": 12410
},
{
"epoch": 180.0,
"learning_rate": 1.0843750000000001e-05,
"loss": 0.4109,
"step": 12420
},
{
"epoch": 180.14,
"learning_rate": 1.0765625e-05,
"loss": 0.4856,
"step": 12430
},
{
"epoch": 180.29,
"learning_rate": 1.06875e-05,
"loss": 0.5211,
"step": 12440
},
{
"epoch": 180.43,
"learning_rate": 1.0609375e-05,
"loss": 0.5894,
"step": 12450
},
{
"epoch": 180.58,
"learning_rate": 1.053125e-05,
"loss": 0.5188,
"step": 12460
},
{
"epoch": 180.72,
"learning_rate": 1.0453125000000002e-05,
"loss": 0.4689,
"step": 12470
},
{
"epoch": 180.87,
"learning_rate": 1.0375e-05,
"loss": 0.5239,
"step": 12480
},
{
"epoch": 181.01,
"learning_rate": 1.0296875e-05,
"loss": 0.4538,
"step": 12490
},
{
"epoch": 181.16,
"learning_rate": 1.0218750000000001e-05,
"loss": 0.5389,
"step": 12500
},
{
"epoch": 181.16,
"eval_loss": 0.22414202988147736,
"eval_runtime": 582.1464,
"eval_samples_per_second": 5.83,
"eval_steps_per_second": 0.73,
"eval_wer": 0.1293132493487497,
"step": 12500
},
{
"epoch": 181.3,
"learning_rate": 1.0140625000000001e-05,
"loss": 0.5074,
"step": 12510
},
{
"epoch": 181.45,
"learning_rate": 1.00625e-05,
"loss": 0.4644,
"step": 12520
},
{
"epoch": 181.59,
"learning_rate": 9.984375e-06,
"loss": 0.4589,
"step": 12530
},
{
"epoch": 181.74,
"learning_rate": 9.90625e-06,
"loss": 0.4653,
"step": 12540
},
{
"epoch": 181.88,
"learning_rate": 9.828125e-06,
"loss": 0.5025,
"step": 12550
},
{
"epoch": 182.03,
"learning_rate": 9.750000000000002e-06,
"loss": 0.4598,
"step": 12560
},
{
"epoch": 182.17,
"learning_rate": 9.671875000000001e-06,
"loss": 0.4409,
"step": 12570
},
{
"epoch": 182.32,
"learning_rate": 9.59375e-06,
"loss": 0.4618,
"step": 12580
},
{
"epoch": 182.46,
"learning_rate": 9.515625000000001e-06,
"loss": 0.4892,
"step": 12590
},
{
"epoch": 182.61,
"learning_rate": 9.4375e-06,
"loss": 0.5488,
"step": 12600
},
{
"epoch": 182.75,
"learning_rate": 9.359375e-06,
"loss": 0.4365,
"step": 12610
},
{
"epoch": 182.9,
"learning_rate": 9.28125e-06,
"loss": 0.5411,
"step": 12620
},
{
"epoch": 183.04,
"learning_rate": 9.203125e-06,
"loss": 0.5429,
"step": 12630
},
{
"epoch": 183.19,
"learning_rate": 9.125e-06,
"loss": 0.4526,
"step": 12640
},
{
"epoch": 183.33,
"learning_rate": 9.046875e-06,
"loss": 0.4557,
"step": 12650
},
{
"epoch": 183.48,
"learning_rate": 8.968750000000001e-06,
"loss": 0.4643,
"step": 12660
},
{
"epoch": 183.62,
"learning_rate": 8.890625000000001e-06,
"loss": 0.4796,
"step": 12670
},
{
"epoch": 183.77,
"learning_rate": 8.8125e-06,
"loss": 0.4711,
"step": 12680
},
{
"epoch": 183.91,
"learning_rate": 8.734375e-06,
"loss": 0.4718,
"step": 12690
},
{
"epoch": 184.06,
"learning_rate": 8.65625e-06,
"loss": 0.5175,
"step": 12700
},
{
"epoch": 184.2,
"learning_rate": 8.578125e-06,
"loss": 0.4741,
"step": 12710
},
{
"epoch": 184.35,
"learning_rate": 8.500000000000002e-06,
"loss": 0.4344,
"step": 12720
},
{
"epoch": 184.49,
"learning_rate": 8.421875e-06,
"loss": 0.4871,
"step": 12730
},
{
"epoch": 184.64,
"learning_rate": 8.34375e-06,
"loss": 0.5269,
"step": 12740
},
{
"epoch": 184.78,
"learning_rate": 8.265625000000001e-06,
"loss": 0.5285,
"step": 12750
},
{
"epoch": 184.93,
"learning_rate": 8.1875e-06,
"loss": 0.474,
"step": 12760
},
{
"epoch": 185.07,
"learning_rate": 8.109375e-06,
"loss": 0.5291,
"step": 12770
},
{
"epoch": 185.22,
"learning_rate": 8.03125e-06,
"loss": 0.4532,
"step": 12780
},
{
"epoch": 185.36,
"learning_rate": 7.953125e-06,
"loss": 0.5102,
"step": 12790
},
{
"epoch": 185.51,
"learning_rate": 7.875e-06,
"loss": 0.4729,
"step": 12800
},
{
"epoch": 185.65,
"learning_rate": 7.796875000000001e-06,
"loss": 0.4955,
"step": 12810
},
{
"epoch": 185.8,
"learning_rate": 7.718750000000001e-06,
"loss": 0.4854,
"step": 12820
},
{
"epoch": 185.94,
"learning_rate": 7.640625e-06,
"loss": 0.4601,
"step": 12830
},
{
"epoch": 186.09,
"learning_rate": 7.5625e-06,
"loss": 0.5263,
"step": 12840
},
{
"epoch": 186.23,
"learning_rate": 7.484375000000001e-06,
"loss": 0.4531,
"step": 12850
},
{
"epoch": 186.38,
"learning_rate": 7.4062500000000005e-06,
"loss": 0.5865,
"step": 12860
},
{
"epoch": 186.52,
"learning_rate": 7.328125000000001e-06,
"loss": 0.4925,
"step": 12870
},
{
"epoch": 186.67,
"learning_rate": 7.25e-06,
"loss": 0.5284,
"step": 12880
},
{
"epoch": 186.81,
"learning_rate": 7.171875e-06,
"loss": 0.4878,
"step": 12890
},
{
"epoch": 186.96,
"learning_rate": 7.0937500000000005e-06,
"loss": 0.4905,
"step": 12900
},
{
"epoch": 187.1,
"learning_rate": 7.015625e-06,
"loss": 0.5737,
"step": 12910
},
{
"epoch": 187.25,
"learning_rate": 6.937500000000001e-06,
"loss": 0.4844,
"step": 12920
},
{
"epoch": 187.39,
"learning_rate": 6.859375e-06,
"loss": 0.4853,
"step": 12930
},
{
"epoch": 187.54,
"learning_rate": 6.7812500000000005e-06,
"loss": 0.477,
"step": 12940
},
{
"epoch": 187.68,
"learning_rate": 6.703125e-06,
"loss": 0.5496,
"step": 12950
},
{
"epoch": 187.83,
"learning_rate": 6.625000000000001e-06,
"loss": 0.5362,
"step": 12960
},
{
"epoch": 187.97,
"learning_rate": 6.546875000000001e-06,
"loss": 0.492,
"step": 12970
},
{
"epoch": 188.12,
"learning_rate": 6.46875e-06,
"loss": 0.4541,
"step": 12980
},
{
"epoch": 188.26,
"learning_rate": 6.390625e-06,
"loss": 0.4969,
"step": 12990
},
{
"epoch": 188.41,
"learning_rate": 6.3125e-06,
"loss": 0.4912,
"step": 13000
},
{
"epoch": 188.41,
"eval_loss": 0.21899156272411346,
"eval_runtime": 580.6731,
"eval_samples_per_second": 5.845,
"eval_steps_per_second": 0.732,
"eval_wer": 0.12903384858121933,
"step": 13000
},
{
"epoch": 188.55,
"learning_rate": 6.234375000000001e-06,
"loss": 0.4943,
"step": 13010
},
{
"epoch": 188.7,
"learning_rate": 6.1562500000000006e-06,
"loss": 0.4609,
"step": 13020
},
{
"epoch": 188.84,
"learning_rate": 6.078125e-06,
"loss": 0.5267,
"step": 13030
},
{
"epoch": 188.99,
"learning_rate": 6e-06,
"loss": 0.4918,
"step": 13040
},
{
"epoch": 189.13,
"learning_rate": 5.921875e-06,
"loss": 0.4454,
"step": 13050
},
{
"epoch": 189.28,
"learning_rate": 5.843750000000001e-06,
"loss": 0.4695,
"step": 13060
},
{
"epoch": 189.42,
"learning_rate": 5.765625e-06,
"loss": 0.5225,
"step": 13070
},
{
"epoch": 189.57,
"learning_rate": 5.6875e-06,
"loss": 0.5047,
"step": 13080
},
{
"epoch": 189.71,
"learning_rate": 5.609375e-06,
"loss": 0.498,
"step": 13090
},
{
"epoch": 189.86,
"learning_rate": 5.531250000000001e-06,
"loss": 0.5432,
"step": 13100
},
{
"epoch": 190.0,
"learning_rate": 5.453125e-06,
"loss": 0.5632,
"step": 13110
},
{
"epoch": 190.14,
"learning_rate": 5.375e-06,
"loss": 0.5208,
"step": 13120
},
{
"epoch": 190.29,
"learning_rate": 5.296875000000001e-06,
"loss": 0.4989,
"step": 13130
},
{
"epoch": 190.43,
"learning_rate": 5.21875e-06,
"loss": 0.4867,
"step": 13140
},
{
"epoch": 190.58,
"learning_rate": 5.1406250000000004e-06,
"loss": 0.5129,
"step": 13150
},
{
"epoch": 190.72,
"learning_rate": 5.0625e-06,
"loss": 0.4555,
"step": 13160
},
{
"epoch": 190.87,
"learning_rate": 4.984375e-06,
"loss": 0.5945,
"step": 13170
},
{
"epoch": 191.01,
"learning_rate": 4.906250000000001e-06,
"loss": 0.4669,
"step": 13180
},
{
"epoch": 191.16,
"learning_rate": 4.835937500000001e-06,
"loss": 0.4884,
"step": 13190
},
{
"epoch": 191.3,
"learning_rate": 4.7578125000000005e-06,
"loss": 0.479,
"step": 13200
},
{
"epoch": 191.45,
"learning_rate": 4.6796875e-06,
"loss": 0.4985,
"step": 13210
},
{
"epoch": 191.59,
"learning_rate": 4.6015625e-06,
"loss": 0.4734,
"step": 13220
},
{
"epoch": 191.74,
"learning_rate": 4.5234375e-06,
"loss": 0.4611,
"step": 13230
},
{
"epoch": 191.88,
"learning_rate": 4.4453125000000005e-06,
"loss": 0.5031,
"step": 13240
},
{
"epoch": 192.03,
"learning_rate": 4.3671875e-06,
"loss": 0.438,
"step": 13250
},
{
"epoch": 192.17,
"learning_rate": 4.2890625e-06,
"loss": 0.5531,
"step": 13260
},
{
"epoch": 192.32,
"learning_rate": 4.2109375e-06,
"loss": 0.4612,
"step": 13270
},
{
"epoch": 192.46,
"learning_rate": 4.1328125000000005e-06,
"loss": 0.4861,
"step": 13280
},
{
"epoch": 192.61,
"learning_rate": 4.0546875e-06,
"loss": 0.5104,
"step": 13290
},
{
"epoch": 192.75,
"learning_rate": 3.9765625e-06,
"loss": 0.5779,
"step": 13300
},
{
"epoch": 192.9,
"learning_rate": 3.898437500000001e-06,
"loss": 0.4896,
"step": 13310
},
{
"epoch": 193.04,
"learning_rate": 3.8203125e-06,
"loss": 0.5076,
"step": 13320
},
{
"epoch": 193.19,
"learning_rate": 3.7421875000000003e-06,
"loss": 0.55,
"step": 13330
},
{
"epoch": 193.33,
"learning_rate": 3.6640625000000005e-06,
"loss": 0.4505,
"step": 13340
},
{
"epoch": 193.48,
"learning_rate": 3.5859375e-06,
"loss": 0.4858,
"step": 13350
},
{
"epoch": 193.62,
"learning_rate": 3.5078125e-06,
"loss": 0.4736,
"step": 13360
},
{
"epoch": 193.77,
"learning_rate": 3.4296875e-06,
"loss": 0.4495,
"step": 13370
},
{
"epoch": 193.91,
"learning_rate": 3.3515625e-06,
"loss": 0.5031,
"step": 13380
},
{
"epoch": 194.06,
"learning_rate": 3.2734375000000004e-06,
"loss": 0.4883,
"step": 13390
},
{
"epoch": 194.2,
"learning_rate": 3.1953125e-06,
"loss": 0.5101,
"step": 13400
},
{
"epoch": 194.35,
"learning_rate": 3.1171875000000004e-06,
"loss": 0.4354,
"step": 13410
},
{
"epoch": 194.49,
"learning_rate": 3.0390625e-06,
"loss": 0.5526,
"step": 13420
},
{
"epoch": 194.64,
"learning_rate": 2.9609375e-06,
"loss": 0.5359,
"step": 13430
},
{
"epoch": 194.78,
"learning_rate": 2.8828125e-06,
"loss": 0.4603,
"step": 13440
},
{
"epoch": 194.93,
"learning_rate": 2.8046875e-06,
"loss": 0.527,
"step": 13450
},
{
"epoch": 195.07,
"learning_rate": 2.7265625e-06,
"loss": 0.4818,
"step": 13460
},
{
"epoch": 195.22,
"learning_rate": 2.6484375000000004e-06,
"loss": 0.4948,
"step": 13470
},
{
"epoch": 195.36,
"learning_rate": 2.5703125000000002e-06,
"loss": 0.4553,
"step": 13480
},
{
"epoch": 195.51,
"learning_rate": 2.4921875e-06,
"loss": 0.4966,
"step": 13490
},
{
"epoch": 195.65,
"learning_rate": 2.4140625e-06,
"loss": 0.4671,
"step": 13500
},
{
"epoch": 195.65,
"eval_loss": 0.2217516154050827,
"eval_runtime": 595.0791,
"eval_samples_per_second": 5.703,
"eval_steps_per_second": 0.714,
"eval_wer": 0.12895167188488688,
"step": 13500
},
{
"epoch": 195.8,
"learning_rate": 2.3359375000000005e-06,
"loss": 0.465,
"step": 13510
},
{
"epoch": 195.94,
"learning_rate": 2.2578125000000002e-06,
"loss": 0.4578,
"step": 13520
},
{
"epoch": 196.09,
"learning_rate": 2.1796875e-06,
"loss": 0.4644,
"step": 13530
},
{
"epoch": 196.23,
"learning_rate": 2.1015625000000003e-06,
"loss": 0.5305,
"step": 13540
},
{
"epoch": 196.38,
"learning_rate": 2.0234375e-06,
"loss": 0.4801,
"step": 13550
},
{
"epoch": 196.52,
"learning_rate": 1.9453125000000003e-06,
"loss": 0.484,
"step": 13560
},
{
"epoch": 196.67,
"learning_rate": 1.8671875000000003e-06,
"loss": 0.533,
"step": 13570
},
{
"epoch": 196.81,
"learning_rate": 1.7890625e-06,
"loss": 0.5135,
"step": 13580
},
{
"epoch": 196.96,
"learning_rate": 1.7109375e-06,
"loss": 0.5307,
"step": 13590
},
{
"epoch": 197.1,
"learning_rate": 1.6328124999999999e-06,
"loss": 0.5312,
"step": 13600
},
{
"epoch": 197.25,
"learning_rate": 1.5546875e-06,
"loss": 0.5042,
"step": 13610
},
{
"epoch": 197.39,
"learning_rate": 1.4765624999999999e-06,
"loss": 0.4879,
"step": 13620
},
{
"epoch": 197.54,
"learning_rate": 1.3984375000000001e-06,
"loss": 0.4124,
"step": 13630
},
{
"epoch": 197.68,
"learning_rate": 1.3203125000000001e-06,
"loss": 0.5127,
"step": 13640
},
{
"epoch": 197.83,
"learning_rate": 1.2421875000000001e-06,
"loss": 0.4804,
"step": 13650
},
{
"epoch": 197.97,
"learning_rate": 1.1640625000000001e-06,
"loss": 0.4548,
"step": 13660
},
{
"epoch": 198.12,
"learning_rate": 1.0859375e-06,
"loss": 0.5534,
"step": 13670
},
{
"epoch": 198.26,
"learning_rate": 1.0078125000000001e-06,
"loss": 0.5236,
"step": 13680
},
{
"epoch": 198.41,
"learning_rate": 9.296875e-07,
"loss": 0.4812,
"step": 13690
},
{
"epoch": 198.55,
"learning_rate": 8.515625e-07,
"loss": 0.5428,
"step": 13700
},
{
"epoch": 198.7,
"learning_rate": 7.734375000000001e-07,
"loss": 0.5043,
"step": 13710
},
{
"epoch": 198.84,
"learning_rate": 6.953125000000001e-07,
"loss": 0.4774,
"step": 13720
},
{
"epoch": 198.99,
"learning_rate": 6.171875000000001e-07,
"loss": 0.5232,
"step": 13730
},
{
"epoch": 199.13,
"learning_rate": 5.390625e-07,
"loss": 0.4748,
"step": 13740
},
{
"epoch": 199.28,
"learning_rate": 4.609375e-07,
"loss": 0.4954,
"step": 13750
},
{
"epoch": 199.42,
"learning_rate": 3.828125e-07,
"loss": 0.5045,
"step": 13760
},
{
"epoch": 199.57,
"learning_rate": 3.0468750000000005e-07,
"loss": 0.5149,
"step": 13770
},
{
"epoch": 199.71,
"learning_rate": 2.265625e-07,
"loss": 0.4863,
"step": 13780
},
{
"epoch": 199.86,
"learning_rate": 1.484375e-07,
"loss": 0.4924,
"step": 13790
},
{
"epoch": 200.0,
"learning_rate": 7.03125e-08,
"loss": 0.4795,
"step": 13800
},
{
"epoch": 200.0,
"step": 13800,
"total_flos": 2.0051176394243018e+20,
"train_loss": 0.3013287970985191,
"train_runtime": 60590.9255,
"train_samples_per_second": 7.242,
"train_steps_per_second": 0.228
}
],
"max_steps": 13800,
"num_train_epochs": 200,
"total_flos": 2.0051176394243018e+20,
"trial_name": null,
"trial_params": null
}