patrickvonplaten's picture
End of training
a51adf5
raw
history blame
86.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"global_step": 6900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"learning_rate": 7.000000000000001e-07,
"loss": 11.2514,
"step": 10
},
{
"epoch": 0.29,
"learning_rate": 1.7000000000000002e-06,
"loss": 12.1614,
"step": 20
},
{
"epoch": 0.43,
"learning_rate": 2.7e-06,
"loss": 12.0394,
"step": 30
},
{
"epoch": 0.58,
"learning_rate": 3.7e-06,
"loss": 11.8399,
"step": 40
},
{
"epoch": 0.72,
"learning_rate": 4.7e-06,
"loss": 12.1568,
"step": 50
},
{
"epoch": 0.87,
"learning_rate": 5.7000000000000005e-06,
"loss": 11.1578,
"step": 60
},
{
"epoch": 1.01,
"learning_rate": 6.700000000000001e-06,
"loss": 12.6691,
"step": 70
},
{
"epoch": 1.16,
"learning_rate": 7.7e-06,
"loss": 10.8955,
"step": 80
},
{
"epoch": 1.3,
"learning_rate": 8.7e-06,
"loss": 12.1493,
"step": 90
},
{
"epoch": 1.45,
"learning_rate": 9.7e-06,
"loss": 11.5678,
"step": 100
},
{
"epoch": 1.59,
"learning_rate": 1.0700000000000001e-05,
"loss": 10.1412,
"step": 110
},
{
"epoch": 1.74,
"learning_rate": 1.1700000000000001e-05,
"loss": 10.6275,
"step": 120
},
{
"epoch": 1.88,
"learning_rate": 1.27e-05,
"loss": 9.1186,
"step": 130
},
{
"epoch": 2.03,
"learning_rate": 1.3700000000000001e-05,
"loss": 7.4903,
"step": 140
},
{
"epoch": 2.17,
"learning_rate": 1.47e-05,
"loss": 6.6567,
"step": 150
},
{
"epoch": 2.32,
"learning_rate": 1.5700000000000002e-05,
"loss": 4.9907,
"step": 160
},
{
"epoch": 2.46,
"learning_rate": 1.6700000000000003e-05,
"loss": 5.5332,
"step": 170
},
{
"epoch": 2.61,
"learning_rate": 1.77e-05,
"loss": 4.5061,
"step": 180
},
{
"epoch": 2.75,
"learning_rate": 1.87e-05,
"loss": 4.2245,
"step": 190
},
{
"epoch": 2.9,
"learning_rate": 1.97e-05,
"loss": 4.057,
"step": 200
},
{
"epoch": 3.04,
"learning_rate": 2.07e-05,
"loss": 3.588,
"step": 210
},
{
"epoch": 3.19,
"learning_rate": 2.1700000000000002e-05,
"loss": 3.5628,
"step": 220
},
{
"epoch": 3.33,
"learning_rate": 2.2700000000000003e-05,
"loss": 3.4737,
"step": 230
},
{
"epoch": 3.48,
"learning_rate": 2.37e-05,
"loss": 3.3541,
"step": 240
},
{
"epoch": 3.62,
"learning_rate": 2.47e-05,
"loss": 3.2015,
"step": 250
},
{
"epoch": 3.77,
"learning_rate": 2.57e-05,
"loss": 3.2003,
"step": 260
},
{
"epoch": 3.91,
"learning_rate": 2.6700000000000002e-05,
"loss": 3.4131,
"step": 270
},
{
"epoch": 4.06,
"learning_rate": 2.7700000000000002e-05,
"loss": 3.0602,
"step": 280
},
{
"epoch": 4.2,
"learning_rate": 2.87e-05,
"loss": 3.27,
"step": 290
},
{
"epoch": 4.35,
"learning_rate": 2.97e-05,
"loss": 3.0795,
"step": 300
},
{
"epoch": 4.49,
"learning_rate": 3.06e-05,
"loss": 3.0245,
"step": 310
},
{
"epoch": 4.64,
"learning_rate": 3.16e-05,
"loss": 2.9565,
"step": 320
},
{
"epoch": 4.78,
"learning_rate": 3.26e-05,
"loss": 2.9714,
"step": 330
},
{
"epoch": 4.93,
"learning_rate": 3.3600000000000004e-05,
"loss": 3.06,
"step": 340
},
{
"epoch": 5.07,
"learning_rate": 3.46e-05,
"loss": 2.9985,
"step": 350
},
{
"epoch": 5.22,
"learning_rate": 3.56e-05,
"loss": 3.0004,
"step": 360
},
{
"epoch": 5.36,
"learning_rate": 3.66e-05,
"loss": 2.9545,
"step": 370
},
{
"epoch": 5.51,
"learning_rate": 3.76e-05,
"loss": 2.9668,
"step": 380
},
{
"epoch": 5.65,
"learning_rate": 3.86e-05,
"loss": 2.995,
"step": 390
},
{
"epoch": 5.8,
"learning_rate": 3.960000000000001e-05,
"loss": 3.0165,
"step": 400
},
{
"epoch": 5.94,
"learning_rate": 4.0600000000000004e-05,
"loss": 2.9506,
"step": 410
},
{
"epoch": 6.09,
"learning_rate": 4.16e-05,
"loss": 2.9652,
"step": 420
},
{
"epoch": 6.23,
"learning_rate": 4.26e-05,
"loss": 3.011,
"step": 430
},
{
"epoch": 6.38,
"learning_rate": 4.36e-05,
"loss": 2.9217,
"step": 440
},
{
"epoch": 6.52,
"learning_rate": 4.46e-05,
"loss": 3.0549,
"step": 450
},
{
"epoch": 6.67,
"learning_rate": 4.5600000000000004e-05,
"loss": 2.9966,
"step": 460
},
{
"epoch": 6.81,
"learning_rate": 4.660000000000001e-05,
"loss": 2.9123,
"step": 470
},
{
"epoch": 6.96,
"learning_rate": 4.76e-05,
"loss": 2.9838,
"step": 480
},
{
"epoch": 7.1,
"learning_rate": 4.86e-05,
"loss": 2.9364,
"step": 490
},
{
"epoch": 7.25,
"learning_rate": 4.96e-05,
"loss": 2.9888,
"step": 500
},
{
"epoch": 7.25,
"eval_loss": 2.9192206859588623,
"eval_runtime": 601.3365,
"eval_samples_per_second": 5.644,
"eval_steps_per_second": 0.707,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 7.39,
"learning_rate": 5.0600000000000003e-05,
"loss": 2.8978,
"step": 510
},
{
"epoch": 7.54,
"learning_rate": 5.16e-05,
"loss": 2.914,
"step": 520
},
{
"epoch": 7.68,
"learning_rate": 5.2600000000000005e-05,
"loss": 3.0331,
"step": 530
},
{
"epoch": 7.83,
"learning_rate": 5.360000000000001e-05,
"loss": 2.9362,
"step": 540
},
{
"epoch": 7.97,
"learning_rate": 5.4600000000000006e-05,
"loss": 2.9437,
"step": 550
},
{
"epoch": 8.12,
"learning_rate": 5.560000000000001e-05,
"loss": 2.9124,
"step": 560
},
{
"epoch": 8.26,
"learning_rate": 5.66e-05,
"loss": 2.9353,
"step": 570
},
{
"epoch": 8.41,
"learning_rate": 5.76e-05,
"loss": 2.9313,
"step": 580
},
{
"epoch": 8.55,
"learning_rate": 5.86e-05,
"loss": 2.954,
"step": 590
},
{
"epoch": 8.7,
"learning_rate": 5.96e-05,
"loss": 2.9495,
"step": 600
},
{
"epoch": 8.84,
"learning_rate": 6.06e-05,
"loss": 2.9209,
"step": 610
},
{
"epoch": 8.99,
"learning_rate": 6.16e-05,
"loss": 2.9983,
"step": 620
},
{
"epoch": 9.13,
"learning_rate": 6.26e-05,
"loss": 2.8978,
"step": 630
},
{
"epoch": 9.28,
"learning_rate": 6.35e-05,
"loss": 2.921,
"step": 640
},
{
"epoch": 9.42,
"learning_rate": 6.450000000000001e-05,
"loss": 2.9351,
"step": 650
},
{
"epoch": 9.57,
"learning_rate": 6.55e-05,
"loss": 2.9152,
"step": 660
},
{
"epoch": 9.71,
"learning_rate": 6.65e-05,
"loss": 2.984,
"step": 670
},
{
"epoch": 9.86,
"learning_rate": 6.750000000000001e-05,
"loss": 2.907,
"step": 680
},
{
"epoch": 10.0,
"learning_rate": 6.850000000000001e-05,
"loss": 2.9913,
"step": 690
},
{
"epoch": 10.14,
"learning_rate": 6.95e-05,
"loss": 2.9054,
"step": 700
},
{
"epoch": 10.29,
"learning_rate": 7.05e-05,
"loss": 2.9958,
"step": 710
},
{
"epoch": 10.43,
"learning_rate": 7.15e-05,
"loss": 2.91,
"step": 720
},
{
"epoch": 10.58,
"learning_rate": 7.25e-05,
"loss": 2.904,
"step": 730
},
{
"epoch": 10.72,
"learning_rate": 7.35e-05,
"loss": 2.9181,
"step": 740
},
{
"epoch": 10.87,
"learning_rate": 7.450000000000001e-05,
"loss": 2.8968,
"step": 750
},
{
"epoch": 11.01,
"learning_rate": 7.55e-05,
"loss": 2.9142,
"step": 760
},
{
"epoch": 11.16,
"learning_rate": 7.65e-05,
"loss": 2.9257,
"step": 770
},
{
"epoch": 11.3,
"learning_rate": 7.75e-05,
"loss": 2.958,
"step": 780
},
{
"epoch": 11.45,
"learning_rate": 7.850000000000001e-05,
"loss": 2.9208,
"step": 790
},
{
"epoch": 11.59,
"learning_rate": 7.950000000000001e-05,
"loss": 2.9139,
"step": 800
},
{
"epoch": 11.74,
"learning_rate": 8.05e-05,
"loss": 2.9225,
"step": 810
},
{
"epoch": 11.88,
"learning_rate": 8.15e-05,
"loss": 2.8919,
"step": 820
},
{
"epoch": 12.03,
"learning_rate": 8.25e-05,
"loss": 2.9232,
"step": 830
},
{
"epoch": 12.17,
"learning_rate": 8.35e-05,
"loss": 2.92,
"step": 840
},
{
"epoch": 12.32,
"learning_rate": 8.450000000000001e-05,
"loss": 2.8916,
"step": 850
},
{
"epoch": 12.46,
"learning_rate": 8.55e-05,
"loss": 2.9289,
"step": 860
},
{
"epoch": 12.61,
"learning_rate": 8.65e-05,
"loss": 2.8714,
"step": 870
},
{
"epoch": 12.75,
"learning_rate": 8.75e-05,
"loss": 2.9255,
"step": 880
},
{
"epoch": 12.9,
"learning_rate": 8.850000000000001e-05,
"loss": 2.9071,
"step": 890
},
{
"epoch": 13.04,
"learning_rate": 8.950000000000001e-05,
"loss": 2.8934,
"step": 900
},
{
"epoch": 13.19,
"learning_rate": 9.05e-05,
"loss": 2.9482,
"step": 910
},
{
"epoch": 13.33,
"learning_rate": 9.15e-05,
"loss": 2.8965,
"step": 920
},
{
"epoch": 13.48,
"learning_rate": 9.250000000000001e-05,
"loss": 2.8906,
"step": 930
},
{
"epoch": 13.62,
"learning_rate": 9.350000000000001e-05,
"loss": 2.8707,
"step": 940
},
{
"epoch": 13.77,
"learning_rate": 9.449999999999999e-05,
"loss": 2.935,
"step": 950
},
{
"epoch": 13.91,
"learning_rate": 9.55e-05,
"loss": 2.9169,
"step": 960
},
{
"epoch": 14.06,
"learning_rate": 9.65e-05,
"loss": 2.8785,
"step": 970
},
{
"epoch": 14.2,
"learning_rate": 9.75e-05,
"loss": 2.914,
"step": 980
},
{
"epoch": 14.35,
"learning_rate": 9.850000000000001e-05,
"loss": 2.9282,
"step": 990
},
{
"epoch": 14.49,
"learning_rate": 9.95e-05,
"loss": 2.9313,
"step": 1000
},
{
"epoch": 14.49,
"eval_loss": 2.869788646697998,
"eval_runtime": 583.4175,
"eval_samples_per_second": 5.817,
"eval_steps_per_second": 0.728,
"eval_wer": 1.0,
"step": 1000
},
{
"epoch": 14.64,
"learning_rate": 9.991525423728813e-05,
"loss": 2.8698,
"step": 1010
},
{
"epoch": 14.78,
"learning_rate": 9.974576271186441e-05,
"loss": 2.9056,
"step": 1020
},
{
"epoch": 14.93,
"learning_rate": 9.957627118644068e-05,
"loss": 2.8838,
"step": 1030
},
{
"epoch": 15.07,
"learning_rate": 9.940677966101696e-05,
"loss": 2.8688,
"step": 1040
},
{
"epoch": 15.22,
"learning_rate": 9.923728813559322e-05,
"loss": 2.9192,
"step": 1050
},
{
"epoch": 15.36,
"learning_rate": 9.906779661016949e-05,
"loss": 2.8884,
"step": 1060
},
{
"epoch": 15.51,
"learning_rate": 9.889830508474577e-05,
"loss": 2.9437,
"step": 1070
},
{
"epoch": 15.65,
"learning_rate": 9.872881355932204e-05,
"loss": 2.8941,
"step": 1080
},
{
"epoch": 15.8,
"learning_rate": 9.85593220338983e-05,
"loss": 2.8788,
"step": 1090
},
{
"epoch": 15.94,
"learning_rate": 9.838983050847458e-05,
"loss": 2.8942,
"step": 1100
},
{
"epoch": 16.09,
"learning_rate": 9.822033898305085e-05,
"loss": 2.9077,
"step": 1110
},
{
"epoch": 16.23,
"learning_rate": 9.805084745762713e-05,
"loss": 2.9044,
"step": 1120
},
{
"epoch": 16.38,
"learning_rate": 9.78813559322034e-05,
"loss": 2.8415,
"step": 1130
},
{
"epoch": 16.52,
"learning_rate": 9.771186440677966e-05,
"loss": 2.8204,
"step": 1140
},
{
"epoch": 16.67,
"learning_rate": 9.754237288135594e-05,
"loss": 2.7944,
"step": 1150
},
{
"epoch": 16.81,
"learning_rate": 9.737288135593221e-05,
"loss": 2.7636,
"step": 1160
},
{
"epoch": 16.96,
"learning_rate": 9.720338983050849e-05,
"loss": 2.7104,
"step": 1170
},
{
"epoch": 17.1,
"learning_rate": 9.703389830508474e-05,
"loss": 2.529,
"step": 1180
},
{
"epoch": 17.25,
"learning_rate": 9.686440677966102e-05,
"loss": 2.4256,
"step": 1190
},
{
"epoch": 17.39,
"learning_rate": 9.66949152542373e-05,
"loss": 2.3963,
"step": 1200
},
{
"epoch": 17.54,
"learning_rate": 9.652542372881357e-05,
"loss": 2.1997,
"step": 1210
},
{
"epoch": 17.68,
"learning_rate": 9.635593220338983e-05,
"loss": 2.1107,
"step": 1220
},
{
"epoch": 17.83,
"learning_rate": 9.61864406779661e-05,
"loss": 1.9865,
"step": 1230
},
{
"epoch": 17.97,
"learning_rate": 9.601694915254238e-05,
"loss": 1.7979,
"step": 1240
},
{
"epoch": 18.12,
"learning_rate": 9.584745762711866e-05,
"loss": 1.6753,
"step": 1250
},
{
"epoch": 18.26,
"learning_rate": 9.567796610169491e-05,
"loss": 1.6191,
"step": 1260
},
{
"epoch": 18.41,
"learning_rate": 9.550847457627119e-05,
"loss": 1.6077,
"step": 1270
},
{
"epoch": 18.55,
"learning_rate": 9.533898305084746e-05,
"loss": 1.5907,
"step": 1280
},
{
"epoch": 18.7,
"learning_rate": 9.516949152542374e-05,
"loss": 1.4164,
"step": 1290
},
{
"epoch": 18.84,
"learning_rate": 9.5e-05,
"loss": 1.596,
"step": 1300
},
{
"epoch": 18.99,
"learning_rate": 9.483050847457627e-05,
"loss": 1.3845,
"step": 1310
},
{
"epoch": 19.13,
"learning_rate": 9.466101694915255e-05,
"loss": 1.4502,
"step": 1320
},
{
"epoch": 19.28,
"learning_rate": 9.449152542372882e-05,
"loss": 1.3023,
"step": 1330
},
{
"epoch": 19.42,
"learning_rate": 9.432203389830508e-05,
"loss": 1.2154,
"step": 1340
},
{
"epoch": 19.57,
"learning_rate": 9.415254237288136e-05,
"loss": 1.1851,
"step": 1350
},
{
"epoch": 19.71,
"learning_rate": 9.398305084745763e-05,
"loss": 1.1953,
"step": 1360
},
{
"epoch": 19.86,
"learning_rate": 9.381355932203391e-05,
"loss": 1.1922,
"step": 1370
},
{
"epoch": 20.0,
"learning_rate": 9.364406779661016e-05,
"loss": 1.2421,
"step": 1380
},
{
"epoch": 20.14,
"learning_rate": 9.347457627118644e-05,
"loss": 1.3635,
"step": 1390
},
{
"epoch": 20.29,
"learning_rate": 9.330508474576271e-05,
"loss": 1.1689,
"step": 1400
},
{
"epoch": 20.43,
"learning_rate": 9.313559322033899e-05,
"loss": 1.2484,
"step": 1410
},
{
"epoch": 20.58,
"learning_rate": 9.296610169491527e-05,
"loss": 1.1633,
"step": 1420
},
{
"epoch": 20.72,
"learning_rate": 9.279661016949152e-05,
"loss": 1.2084,
"step": 1430
},
{
"epoch": 20.87,
"learning_rate": 9.26271186440678e-05,
"loss": 1.1117,
"step": 1440
},
{
"epoch": 21.01,
"learning_rate": 9.245762711864407e-05,
"loss": 1.0455,
"step": 1450
},
{
"epoch": 21.16,
"learning_rate": 9.228813559322035e-05,
"loss": 1.0456,
"step": 1460
},
{
"epoch": 21.3,
"learning_rate": 9.211864406779661e-05,
"loss": 1.1641,
"step": 1470
},
{
"epoch": 21.45,
"learning_rate": 9.194915254237288e-05,
"loss": 1.0116,
"step": 1480
},
{
"epoch": 21.59,
"learning_rate": 9.177966101694916e-05,
"loss": 1.0744,
"step": 1490
},
{
"epoch": 21.74,
"learning_rate": 9.161016949152543e-05,
"loss": 1.068,
"step": 1500
},
{
"epoch": 21.74,
"eval_loss": 0.2646801173686981,
"eval_runtime": 584.2239,
"eval_samples_per_second": 5.809,
"eval_steps_per_second": 0.727,
"eval_wer": 0.2564981222624888,
"step": 1500
},
{
"epoch": 21.88,
"learning_rate": 9.14406779661017e-05,
"loss": 0.949,
"step": 1510
},
{
"epoch": 22.03,
"learning_rate": 9.127118644067797e-05,
"loss": 1.0756,
"step": 1520
},
{
"epoch": 22.17,
"learning_rate": 9.110169491525424e-05,
"loss": 1.0889,
"step": 1530
},
{
"epoch": 22.32,
"learning_rate": 9.093220338983052e-05,
"loss": 1.0893,
"step": 1540
},
{
"epoch": 22.46,
"learning_rate": 9.076271186440677e-05,
"loss": 0.9964,
"step": 1550
},
{
"epoch": 22.61,
"learning_rate": 9.059322033898305e-05,
"loss": 0.9238,
"step": 1560
},
{
"epoch": 22.75,
"learning_rate": 9.042372881355933e-05,
"loss": 0.9582,
"step": 1570
},
{
"epoch": 22.9,
"learning_rate": 9.02542372881356e-05,
"loss": 0.9654,
"step": 1580
},
{
"epoch": 23.04,
"learning_rate": 9.008474576271187e-05,
"loss": 0.8715,
"step": 1590
},
{
"epoch": 23.19,
"learning_rate": 8.991525423728813e-05,
"loss": 0.9353,
"step": 1600
},
{
"epoch": 23.33,
"learning_rate": 8.974576271186441e-05,
"loss": 0.9148,
"step": 1610
},
{
"epoch": 23.48,
"learning_rate": 8.957627118644069e-05,
"loss": 0.9696,
"step": 1620
},
{
"epoch": 23.62,
"learning_rate": 8.940677966101694e-05,
"loss": 0.9165,
"step": 1630
},
{
"epoch": 23.77,
"learning_rate": 8.923728813559322e-05,
"loss": 0.9927,
"step": 1640
},
{
"epoch": 23.91,
"learning_rate": 8.906779661016949e-05,
"loss": 0.9624,
"step": 1650
},
{
"epoch": 24.06,
"learning_rate": 8.889830508474577e-05,
"loss": 0.9503,
"step": 1660
},
{
"epoch": 24.2,
"learning_rate": 8.872881355932204e-05,
"loss": 0.9891,
"step": 1670
},
{
"epoch": 24.35,
"learning_rate": 8.85593220338983e-05,
"loss": 0.9596,
"step": 1680
},
{
"epoch": 24.49,
"learning_rate": 8.838983050847458e-05,
"loss": 0.9499,
"step": 1690
},
{
"epoch": 24.64,
"learning_rate": 8.822033898305085e-05,
"loss": 0.8957,
"step": 1700
},
{
"epoch": 24.78,
"learning_rate": 8.805084745762713e-05,
"loss": 0.862,
"step": 1710
},
{
"epoch": 24.93,
"learning_rate": 8.78813559322034e-05,
"loss": 0.9045,
"step": 1720
},
{
"epoch": 25.07,
"learning_rate": 8.771186440677966e-05,
"loss": 1.0047,
"step": 1730
},
{
"epoch": 25.22,
"learning_rate": 8.754237288135594e-05,
"loss": 1.0456,
"step": 1740
},
{
"epoch": 25.36,
"learning_rate": 8.737288135593221e-05,
"loss": 1.2944,
"step": 1750
},
{
"epoch": 25.51,
"learning_rate": 8.720338983050848e-05,
"loss": 0.9649,
"step": 1760
},
{
"epoch": 25.65,
"learning_rate": 8.703389830508476e-05,
"loss": 0.903,
"step": 1770
},
{
"epoch": 25.8,
"learning_rate": 8.686440677966102e-05,
"loss": 0.8972,
"step": 1780
},
{
"epoch": 25.94,
"learning_rate": 8.66949152542373e-05,
"loss": 0.9095,
"step": 1790
},
{
"epoch": 26.09,
"learning_rate": 8.652542372881355e-05,
"loss": 0.8686,
"step": 1800
},
{
"epoch": 26.23,
"learning_rate": 8.635593220338983e-05,
"loss": 0.7924,
"step": 1810
},
{
"epoch": 26.38,
"learning_rate": 8.61864406779661e-05,
"loss": 0.8395,
"step": 1820
},
{
"epoch": 26.52,
"learning_rate": 8.601694915254238e-05,
"loss": 0.8397,
"step": 1830
},
{
"epoch": 26.67,
"learning_rate": 8.584745762711865e-05,
"loss": 0.9085,
"step": 1840
},
{
"epoch": 26.81,
"learning_rate": 8.567796610169491e-05,
"loss": 0.9359,
"step": 1850
},
{
"epoch": 26.96,
"learning_rate": 8.55084745762712e-05,
"loss": 0.9281,
"step": 1860
},
{
"epoch": 27.1,
"learning_rate": 8.533898305084746e-05,
"loss": 0.8757,
"step": 1870
},
{
"epoch": 27.25,
"learning_rate": 8.516949152542373e-05,
"loss": 0.8233,
"step": 1880
},
{
"epoch": 27.39,
"learning_rate": 8.5e-05,
"loss": 0.9204,
"step": 1890
},
{
"epoch": 27.54,
"learning_rate": 8.483050847457627e-05,
"loss": 0.8401,
"step": 1900
},
{
"epoch": 27.68,
"learning_rate": 8.466101694915255e-05,
"loss": 0.8236,
"step": 1910
},
{
"epoch": 27.83,
"learning_rate": 8.449152542372882e-05,
"loss": 0.8152,
"step": 1920
},
{
"epoch": 27.97,
"learning_rate": 8.432203389830509e-05,
"loss": 0.8678,
"step": 1930
},
{
"epoch": 28.12,
"learning_rate": 8.415254237288137e-05,
"loss": 0.8236,
"step": 1940
},
{
"epoch": 28.26,
"learning_rate": 8.398305084745763e-05,
"loss": 0.8069,
"step": 1950
},
{
"epoch": 28.41,
"learning_rate": 8.381355932203391e-05,
"loss": 0.849,
"step": 1960
},
{
"epoch": 28.55,
"learning_rate": 8.364406779661016e-05,
"loss": 0.8725,
"step": 1970
},
{
"epoch": 28.7,
"learning_rate": 8.347457627118644e-05,
"loss": 1.1043,
"step": 1980
},
{
"epoch": 28.84,
"learning_rate": 8.330508474576272e-05,
"loss": 0.8797,
"step": 1990
},
{
"epoch": 28.99,
"learning_rate": 8.313559322033899e-05,
"loss": 0.8151,
"step": 2000
},
{
"epoch": 28.99,
"eval_loss": 0.2066880613565445,
"eval_runtime": 586.2698,
"eval_samples_per_second": 5.789,
"eval_steps_per_second": 0.725,
"eval_wer": 0.17191364872749385,
"step": 2000
},
{
"epoch": 29.13,
"learning_rate": 8.296610169491526e-05,
"loss": 0.7783,
"step": 2010
},
{
"epoch": 29.28,
"learning_rate": 8.279661016949152e-05,
"loss": 0.9059,
"step": 2020
},
{
"epoch": 29.42,
"learning_rate": 8.26271186440678e-05,
"loss": 0.7881,
"step": 2030
},
{
"epoch": 29.57,
"learning_rate": 8.245762711864407e-05,
"loss": 0.8869,
"step": 2040
},
{
"epoch": 29.71,
"learning_rate": 8.228813559322034e-05,
"loss": 0.93,
"step": 2050
},
{
"epoch": 29.86,
"learning_rate": 8.211864406779662e-05,
"loss": 0.83,
"step": 2060
},
{
"epoch": 30.0,
"learning_rate": 8.194915254237288e-05,
"loss": 0.7013,
"step": 2070
},
{
"epoch": 30.14,
"learning_rate": 8.177966101694916e-05,
"loss": 0.7469,
"step": 2080
},
{
"epoch": 30.29,
"learning_rate": 8.161016949152543e-05,
"loss": 0.8841,
"step": 2090
},
{
"epoch": 30.43,
"learning_rate": 8.14406779661017e-05,
"loss": 0.7999,
"step": 2100
},
{
"epoch": 30.58,
"learning_rate": 8.127118644067797e-05,
"loss": 0.875,
"step": 2110
},
{
"epoch": 30.72,
"learning_rate": 8.110169491525424e-05,
"loss": 0.7745,
"step": 2120
},
{
"epoch": 30.87,
"learning_rate": 8.093220338983051e-05,
"loss": 0.8997,
"step": 2130
},
{
"epoch": 31.01,
"learning_rate": 8.076271186440679e-05,
"loss": 0.8064,
"step": 2140
},
{
"epoch": 31.16,
"learning_rate": 8.059322033898305e-05,
"loss": 0.8057,
"step": 2150
},
{
"epoch": 31.3,
"learning_rate": 8.042372881355933e-05,
"loss": 0.7763,
"step": 2160
},
{
"epoch": 31.45,
"learning_rate": 8.025423728813559e-05,
"loss": 0.7625,
"step": 2170
},
{
"epoch": 31.59,
"learning_rate": 8.008474576271187e-05,
"loss": 0.7622,
"step": 2180
},
{
"epoch": 31.74,
"learning_rate": 7.991525423728813e-05,
"loss": 0.8415,
"step": 2190
},
{
"epoch": 31.88,
"learning_rate": 7.974576271186441e-05,
"loss": 0.7549,
"step": 2200
},
{
"epoch": 32.03,
"learning_rate": 7.957627118644068e-05,
"loss": 0.8678,
"step": 2210
},
{
"epoch": 32.17,
"learning_rate": 7.940677966101695e-05,
"loss": 0.7861,
"step": 2220
},
{
"epoch": 32.32,
"learning_rate": 7.923728813559323e-05,
"loss": 0.7887,
"step": 2230
},
{
"epoch": 32.46,
"learning_rate": 7.906779661016949e-05,
"loss": 0.6937,
"step": 2240
},
{
"epoch": 32.61,
"learning_rate": 7.889830508474577e-05,
"loss": 0.746,
"step": 2250
},
{
"epoch": 32.75,
"learning_rate": 7.872881355932204e-05,
"loss": 0.7307,
"step": 2260
},
{
"epoch": 32.9,
"learning_rate": 7.85593220338983e-05,
"loss": 0.758,
"step": 2270
},
{
"epoch": 33.04,
"learning_rate": 7.838983050847458e-05,
"loss": 0.7016,
"step": 2280
},
{
"epoch": 33.19,
"learning_rate": 7.822033898305085e-05,
"loss": 0.7936,
"step": 2290
},
{
"epoch": 33.33,
"learning_rate": 7.805084745762712e-05,
"loss": 0.7138,
"step": 2300
},
{
"epoch": 33.48,
"learning_rate": 7.78813559322034e-05,
"loss": 0.8223,
"step": 2310
},
{
"epoch": 33.62,
"learning_rate": 7.771186440677966e-05,
"loss": 0.8025,
"step": 2320
},
{
"epoch": 33.77,
"learning_rate": 7.754237288135594e-05,
"loss": 0.9172,
"step": 2330
},
{
"epoch": 33.91,
"learning_rate": 7.73728813559322e-05,
"loss": 0.914,
"step": 2340
},
{
"epoch": 34.06,
"learning_rate": 7.720338983050848e-05,
"loss": 0.7792,
"step": 2350
},
{
"epoch": 34.2,
"learning_rate": 7.703389830508476e-05,
"loss": 0.7518,
"step": 2360
},
{
"epoch": 34.35,
"learning_rate": 7.686440677966102e-05,
"loss": 0.7291,
"step": 2370
},
{
"epoch": 34.49,
"learning_rate": 7.669491525423729e-05,
"loss": 0.7946,
"step": 2380
},
{
"epoch": 34.64,
"learning_rate": 7.652542372881356e-05,
"loss": 0.721,
"step": 2390
},
{
"epoch": 34.78,
"learning_rate": 7.635593220338984e-05,
"loss": 0.725,
"step": 2400
},
{
"epoch": 34.93,
"learning_rate": 7.618644067796612e-05,
"loss": 0.7771,
"step": 2410
},
{
"epoch": 35.07,
"learning_rate": 7.601694915254237e-05,
"loss": 0.7069,
"step": 2420
},
{
"epoch": 35.22,
"learning_rate": 7.584745762711865e-05,
"loss": 0.7194,
"step": 2430
},
{
"epoch": 35.36,
"learning_rate": 7.567796610169491e-05,
"loss": 0.7174,
"step": 2440
},
{
"epoch": 35.51,
"learning_rate": 7.55084745762712e-05,
"loss": 0.752,
"step": 2450
},
{
"epoch": 35.65,
"learning_rate": 7.533898305084746e-05,
"loss": 0.7291,
"step": 2460
},
{
"epoch": 35.8,
"learning_rate": 7.516949152542373e-05,
"loss": 0.8244,
"step": 2470
},
{
"epoch": 35.94,
"learning_rate": 7.500000000000001e-05,
"loss": 0.7068,
"step": 2480
},
{
"epoch": 36.09,
"learning_rate": 7.483050847457627e-05,
"loss": 0.8703,
"step": 2490
},
{
"epoch": 36.23,
"learning_rate": 7.466101694915255e-05,
"loss": 0.764,
"step": 2500
},
{
"epoch": 36.23,
"eval_loss": 0.1974831521511078,
"eval_runtime": 581.603,
"eval_samples_per_second": 5.836,
"eval_steps_per_second": 0.731,
"eval_wer": 0.1567931366023223,
"step": 2500
},
{
"epoch": 36.38,
"learning_rate": 7.449152542372882e-05,
"loss": 0.7373,
"step": 2510
},
{
"epoch": 36.52,
"learning_rate": 7.432203389830509e-05,
"loss": 0.7291,
"step": 2520
},
{
"epoch": 36.67,
"learning_rate": 7.415254237288137e-05,
"loss": 0.8097,
"step": 2530
},
{
"epoch": 36.81,
"learning_rate": 7.398305084745763e-05,
"loss": 0.738,
"step": 2540
},
{
"epoch": 36.96,
"learning_rate": 7.38135593220339e-05,
"loss": 0.7335,
"step": 2550
},
{
"epoch": 37.1,
"learning_rate": 7.364406779661018e-05,
"loss": 0.7293,
"step": 2560
},
{
"epoch": 37.25,
"learning_rate": 7.347457627118645e-05,
"loss": 0.7483,
"step": 2570
},
{
"epoch": 37.39,
"learning_rate": 7.330508474576272e-05,
"loss": 0.6923,
"step": 2580
},
{
"epoch": 37.54,
"learning_rate": 7.313559322033898e-05,
"loss": 0.663,
"step": 2590
},
{
"epoch": 37.68,
"learning_rate": 7.296610169491526e-05,
"loss": 0.7579,
"step": 2600
},
{
"epoch": 37.83,
"learning_rate": 7.279661016949152e-05,
"loss": 0.6966,
"step": 2610
},
{
"epoch": 37.97,
"learning_rate": 7.26271186440678e-05,
"loss": 0.7184,
"step": 2620
},
{
"epoch": 38.12,
"learning_rate": 7.245762711864407e-05,
"loss": 0.7068,
"step": 2630
},
{
"epoch": 38.26,
"learning_rate": 7.228813559322034e-05,
"loss": 0.6998,
"step": 2640
},
{
"epoch": 38.41,
"learning_rate": 7.211864406779662e-05,
"loss": 0.7044,
"step": 2650
},
{
"epoch": 38.55,
"learning_rate": 7.194915254237288e-05,
"loss": 0.6947,
"step": 2660
},
{
"epoch": 38.7,
"learning_rate": 7.177966101694915e-05,
"loss": 0.6872,
"step": 2670
},
{
"epoch": 38.84,
"learning_rate": 7.161016949152543e-05,
"loss": 0.7555,
"step": 2680
},
{
"epoch": 38.99,
"learning_rate": 7.14406779661017e-05,
"loss": 0.7157,
"step": 2690
},
{
"epoch": 39.13,
"learning_rate": 7.127118644067798e-05,
"loss": 0.751,
"step": 2700
},
{
"epoch": 39.28,
"learning_rate": 7.110169491525424e-05,
"loss": 0.7603,
"step": 2710
},
{
"epoch": 39.42,
"learning_rate": 7.093220338983051e-05,
"loss": 0.7847,
"step": 2720
},
{
"epoch": 39.57,
"learning_rate": 7.076271186440679e-05,
"loss": 0.7842,
"step": 2730
},
{
"epoch": 39.71,
"learning_rate": 7.059322033898305e-05,
"loss": 0.7265,
"step": 2740
},
{
"epoch": 39.86,
"learning_rate": 7.042372881355933e-05,
"loss": 0.7328,
"step": 2750
},
{
"epoch": 40.0,
"learning_rate": 7.025423728813559e-05,
"loss": 0.6276,
"step": 2760
},
{
"epoch": 40.14,
"learning_rate": 7.008474576271187e-05,
"loss": 0.6662,
"step": 2770
},
{
"epoch": 40.29,
"learning_rate": 6.991525423728815e-05,
"loss": 0.6845,
"step": 2780
},
{
"epoch": 40.43,
"learning_rate": 6.974576271186441e-05,
"loss": 0.76,
"step": 2790
},
{
"epoch": 40.58,
"learning_rate": 6.957627118644068e-05,
"loss": 0.7495,
"step": 2800
},
{
"epoch": 40.72,
"learning_rate": 6.940677966101695e-05,
"loss": 0.631,
"step": 2810
},
{
"epoch": 40.87,
"learning_rate": 6.923728813559323e-05,
"loss": 0.7702,
"step": 2820
},
{
"epoch": 41.01,
"learning_rate": 6.906779661016949e-05,
"loss": 0.7912,
"step": 2830
},
{
"epoch": 41.16,
"learning_rate": 6.889830508474576e-05,
"loss": 0.6516,
"step": 2840
},
{
"epoch": 41.3,
"learning_rate": 6.872881355932204e-05,
"loss": 0.8212,
"step": 2850
},
{
"epoch": 41.45,
"learning_rate": 6.85593220338983e-05,
"loss": 0.7247,
"step": 2860
},
{
"epoch": 41.59,
"learning_rate": 6.838983050847459e-05,
"loss": 0.7021,
"step": 2870
},
{
"epoch": 41.74,
"learning_rate": 6.822033898305085e-05,
"loss": 0.7079,
"step": 2880
},
{
"epoch": 41.88,
"learning_rate": 6.805084745762712e-05,
"loss": 0.7184,
"step": 2890
},
{
"epoch": 42.03,
"learning_rate": 6.78813559322034e-05,
"loss": 0.8249,
"step": 2900
},
{
"epoch": 42.17,
"learning_rate": 6.771186440677966e-05,
"loss": 0.6589,
"step": 2910
},
{
"epoch": 42.32,
"learning_rate": 6.754237288135593e-05,
"loss": 0.6485,
"step": 2920
},
{
"epoch": 42.46,
"learning_rate": 6.737288135593221e-05,
"loss": 0.7682,
"step": 2930
},
{
"epoch": 42.61,
"learning_rate": 6.720338983050848e-05,
"loss": 0.7569,
"step": 2940
},
{
"epoch": 42.75,
"learning_rate": 6.703389830508476e-05,
"loss": 0.6338,
"step": 2950
},
{
"epoch": 42.9,
"learning_rate": 6.686440677966101e-05,
"loss": 0.731,
"step": 2960
},
{
"epoch": 43.04,
"learning_rate": 6.669491525423729e-05,
"loss": 0.6278,
"step": 2970
},
{
"epoch": 43.19,
"learning_rate": 6.652542372881356e-05,
"loss": 0.6538,
"step": 2980
},
{
"epoch": 43.33,
"learning_rate": 6.635593220338984e-05,
"loss": 0.7682,
"step": 2990
},
{
"epoch": 43.48,
"learning_rate": 6.61864406779661e-05,
"loss": 0.7332,
"step": 3000
},
{
"epoch": 43.48,
"eval_loss": 0.18117694556713104,
"eval_runtime": 588.8092,
"eval_samples_per_second": 5.764,
"eval_steps_per_second": 0.722,
"eval_wer": 0.14625808413250171,
"step": 3000
},
{
"epoch": 43.62,
"learning_rate": 6.601694915254237e-05,
"loss": 0.6994,
"step": 3010
},
{
"epoch": 43.77,
"learning_rate": 6.584745762711865e-05,
"loss": 0.6738,
"step": 3020
},
{
"epoch": 43.91,
"learning_rate": 6.567796610169492e-05,
"loss": 0.6887,
"step": 3030
},
{
"epoch": 44.06,
"learning_rate": 6.55084745762712e-05,
"loss": 0.6816,
"step": 3040
},
{
"epoch": 44.2,
"learning_rate": 6.533898305084746e-05,
"loss": 0.7649,
"step": 3050
},
{
"epoch": 44.35,
"learning_rate": 6.516949152542373e-05,
"loss": 0.6712,
"step": 3060
},
{
"epoch": 44.49,
"learning_rate": 6.500000000000001e-05,
"loss": 0.6683,
"step": 3070
},
{
"epoch": 44.64,
"learning_rate": 6.483050847457627e-05,
"loss": 0.696,
"step": 3080
},
{
"epoch": 44.78,
"learning_rate": 6.466101694915254e-05,
"loss": 0.8165,
"step": 3090
},
{
"epoch": 44.93,
"learning_rate": 6.449152542372882e-05,
"loss": 0.7633,
"step": 3100
},
{
"epoch": 45.07,
"learning_rate": 6.432203389830509e-05,
"loss": 0.6813,
"step": 3110
},
{
"epoch": 45.22,
"learning_rate": 6.415254237288137e-05,
"loss": 0.6951,
"step": 3120
},
{
"epoch": 45.36,
"learning_rate": 6.398305084745762e-05,
"loss": 0.7011,
"step": 3130
},
{
"epoch": 45.51,
"learning_rate": 6.38135593220339e-05,
"loss": 0.6952,
"step": 3140
},
{
"epoch": 45.65,
"learning_rate": 6.364406779661018e-05,
"loss": 0.6875,
"step": 3150
},
{
"epoch": 45.8,
"learning_rate": 6.347457627118645e-05,
"loss": 0.689,
"step": 3160
},
{
"epoch": 45.94,
"learning_rate": 6.330508474576271e-05,
"loss": 0.7438,
"step": 3170
},
{
"epoch": 46.09,
"learning_rate": 6.313559322033898e-05,
"loss": 0.7675,
"step": 3180
},
{
"epoch": 46.23,
"learning_rate": 6.296610169491526e-05,
"loss": 0.6308,
"step": 3190
},
{
"epoch": 46.38,
"learning_rate": 6.279661016949154e-05,
"loss": 0.6733,
"step": 3200
},
{
"epoch": 46.52,
"learning_rate": 6.262711864406779e-05,
"loss": 0.7072,
"step": 3210
},
{
"epoch": 46.67,
"learning_rate": 6.245762711864407e-05,
"loss": 0.6429,
"step": 3220
},
{
"epoch": 46.81,
"learning_rate": 6.228813559322034e-05,
"loss": 0.7316,
"step": 3230
},
{
"epoch": 46.96,
"learning_rate": 6.211864406779662e-05,
"loss": 0.6912,
"step": 3240
},
{
"epoch": 47.1,
"learning_rate": 6.194915254237288e-05,
"loss": 0.7583,
"step": 3250
},
{
"epoch": 47.25,
"learning_rate": 6.177966101694915e-05,
"loss": 0.6429,
"step": 3260
},
{
"epoch": 47.39,
"learning_rate": 6.161016949152543e-05,
"loss": 0.7152,
"step": 3270
},
{
"epoch": 47.54,
"learning_rate": 6.14406779661017e-05,
"loss": 0.8123,
"step": 3280
},
{
"epoch": 47.68,
"learning_rate": 6.127118644067798e-05,
"loss": 0.737,
"step": 3290
},
{
"epoch": 47.83,
"learning_rate": 6.110169491525424e-05,
"loss": 0.6072,
"step": 3300
},
{
"epoch": 47.97,
"learning_rate": 6.093220338983051e-05,
"loss": 0.681,
"step": 3310
},
{
"epoch": 48.12,
"learning_rate": 6.076271186440678e-05,
"loss": 0.6064,
"step": 3320
},
{
"epoch": 48.26,
"learning_rate": 6.0593220338983056e-05,
"loss": 0.6178,
"step": 3330
},
{
"epoch": 48.41,
"learning_rate": 6.042372881355932e-05,
"loss": 0.7126,
"step": 3340
},
{
"epoch": 48.55,
"learning_rate": 6.0254237288135595e-05,
"loss": 0.7283,
"step": 3350
},
{
"epoch": 48.7,
"learning_rate": 6.008474576271187e-05,
"loss": 0.6796,
"step": 3360
},
{
"epoch": 48.84,
"learning_rate": 5.991525423728814e-05,
"loss": 0.7529,
"step": 3370
},
{
"epoch": 48.99,
"learning_rate": 5.974576271186441e-05,
"loss": 0.6973,
"step": 3380
},
{
"epoch": 49.13,
"learning_rate": 5.957627118644068e-05,
"loss": 0.6489,
"step": 3390
},
{
"epoch": 49.28,
"learning_rate": 5.9406779661016954e-05,
"loss": 0.7316,
"step": 3400
},
{
"epoch": 49.42,
"learning_rate": 5.923728813559323e-05,
"loss": 0.7525,
"step": 3410
},
{
"epoch": 49.57,
"learning_rate": 5.906779661016949e-05,
"loss": 0.6554,
"step": 3420
},
{
"epoch": 49.71,
"learning_rate": 5.889830508474577e-05,
"loss": 0.6275,
"step": 3430
},
{
"epoch": 49.86,
"learning_rate": 5.872881355932204e-05,
"loss": 0.6468,
"step": 3440
},
{
"epoch": 50.0,
"learning_rate": 5.855932203389831e-05,
"loss": 0.6908,
"step": 3450
},
{
"epoch": 50.14,
"learning_rate": 5.838983050847457e-05,
"loss": 0.6363,
"step": 3460
},
{
"epoch": 50.29,
"learning_rate": 5.8220338983050846e-05,
"loss": 0.7075,
"step": 3470
},
{
"epoch": 50.43,
"learning_rate": 5.805084745762712e-05,
"loss": 0.6981,
"step": 3480
},
{
"epoch": 50.58,
"learning_rate": 5.78813559322034e-05,
"loss": 0.6621,
"step": 3490
},
{
"epoch": 50.72,
"learning_rate": 5.771186440677966e-05,
"loss": 0.5952,
"step": 3500
},
{
"epoch": 50.72,
"eval_loss": 0.1922890692949295,
"eval_runtime": 587.7417,
"eval_samples_per_second": 5.775,
"eval_steps_per_second": 0.723,
"eval_wer": 0.1427820098776389,
"step": 3500
},
{
"epoch": 50.87,
"learning_rate": 5.754237288135593e-05,
"loss": 0.6752,
"step": 3510
},
{
"epoch": 51.01,
"learning_rate": 5.7372881355932205e-05,
"loss": 0.6024,
"step": 3520
},
{
"epoch": 51.16,
"learning_rate": 5.720338983050848e-05,
"loss": 0.7204,
"step": 3530
},
{
"epoch": 51.3,
"learning_rate": 5.7033898305084744e-05,
"loss": 0.7309,
"step": 3540
},
{
"epoch": 51.45,
"learning_rate": 5.686440677966102e-05,
"loss": 0.6726,
"step": 3550
},
{
"epoch": 51.59,
"learning_rate": 5.669491525423729e-05,
"loss": 0.6414,
"step": 3560
},
{
"epoch": 51.74,
"learning_rate": 5.6525423728813564e-05,
"loss": 0.6915,
"step": 3570
},
{
"epoch": 51.88,
"learning_rate": 5.635593220338984e-05,
"loss": 0.6821,
"step": 3580
},
{
"epoch": 52.03,
"learning_rate": 5.6186440677966103e-05,
"loss": 0.6443,
"step": 3590
},
{
"epoch": 52.17,
"learning_rate": 5.6016949152542377e-05,
"loss": 0.6501,
"step": 3600
},
{
"epoch": 52.32,
"learning_rate": 5.584745762711865e-05,
"loss": 0.7031,
"step": 3610
},
{
"epoch": 52.46,
"learning_rate": 5.567796610169492e-05,
"loss": 0.7238,
"step": 3620
},
{
"epoch": 52.61,
"learning_rate": 5.550847457627118e-05,
"loss": 0.6266,
"step": 3630
},
{
"epoch": 52.75,
"learning_rate": 5.533898305084746e-05,
"loss": 0.6443,
"step": 3640
},
{
"epoch": 52.9,
"learning_rate": 5.5169491525423736e-05,
"loss": 0.7794,
"step": 3650
},
{
"epoch": 53.04,
"learning_rate": 5.500000000000001e-05,
"loss": 0.629,
"step": 3660
},
{
"epoch": 53.19,
"learning_rate": 5.483050847457627e-05,
"loss": 0.67,
"step": 3670
},
{
"epoch": 53.33,
"learning_rate": 5.466101694915254e-05,
"loss": 0.6931,
"step": 3680
},
{
"epoch": 53.48,
"learning_rate": 5.4491525423728815e-05,
"loss": 0.5592,
"step": 3690
},
{
"epoch": 53.62,
"learning_rate": 5.4322033898305095e-05,
"loss": 0.6047,
"step": 3700
},
{
"epoch": 53.77,
"learning_rate": 5.4152542372881354e-05,
"loss": 0.6677,
"step": 3710
},
{
"epoch": 53.91,
"learning_rate": 5.398305084745763e-05,
"loss": 0.6204,
"step": 3720
},
{
"epoch": 54.06,
"learning_rate": 5.38135593220339e-05,
"loss": 0.6107,
"step": 3730
},
{
"epoch": 54.2,
"learning_rate": 5.3644067796610174e-05,
"loss": 0.6793,
"step": 3740
},
{
"epoch": 54.35,
"learning_rate": 5.347457627118644e-05,
"loss": 0.6442,
"step": 3750
},
{
"epoch": 54.49,
"learning_rate": 5.330508474576271e-05,
"loss": 0.618,
"step": 3760
},
{
"epoch": 54.64,
"learning_rate": 5.3135593220338986e-05,
"loss": 0.691,
"step": 3770
},
{
"epoch": 54.78,
"learning_rate": 5.296610169491526e-05,
"loss": 0.6762,
"step": 3780
},
{
"epoch": 54.93,
"learning_rate": 5.2796610169491526e-05,
"loss": 0.6501,
"step": 3790
},
{
"epoch": 55.07,
"learning_rate": 5.26271186440678e-05,
"loss": 0.6203,
"step": 3800
},
{
"epoch": 55.22,
"learning_rate": 5.245762711864407e-05,
"loss": 0.6739,
"step": 3810
},
{
"epoch": 55.36,
"learning_rate": 5.2288135593220345e-05,
"loss": 0.6109,
"step": 3820
},
{
"epoch": 55.51,
"learning_rate": 5.211864406779662e-05,
"loss": 0.6775,
"step": 3830
},
{
"epoch": 55.65,
"learning_rate": 5.194915254237288e-05,
"loss": 0.6144,
"step": 3840
},
{
"epoch": 55.8,
"learning_rate": 5.177966101694916e-05,
"loss": 0.6182,
"step": 3850
},
{
"epoch": 55.94,
"learning_rate": 5.161016949152543e-05,
"loss": 0.6425,
"step": 3860
},
{
"epoch": 56.09,
"learning_rate": 5.1440677966101704e-05,
"loss": 0.6439,
"step": 3870
},
{
"epoch": 56.23,
"learning_rate": 5.1271186440677964e-05,
"loss": 0.6336,
"step": 3880
},
{
"epoch": 56.38,
"learning_rate": 5.110169491525424e-05,
"loss": 0.6612,
"step": 3890
},
{
"epoch": 56.52,
"learning_rate": 5.093220338983051e-05,
"loss": 0.6779,
"step": 3900
},
{
"epoch": 56.67,
"learning_rate": 5.076271186440678e-05,
"loss": 0.6551,
"step": 3910
},
{
"epoch": 56.81,
"learning_rate": 5.059322033898305e-05,
"loss": 0.7081,
"step": 3920
},
{
"epoch": 56.96,
"learning_rate": 5.042372881355932e-05,
"loss": 0.5887,
"step": 3930
},
{
"epoch": 57.1,
"learning_rate": 5.0254237288135596e-05,
"loss": 0.5609,
"step": 3940
},
{
"epoch": 57.25,
"learning_rate": 5.008474576271187e-05,
"loss": 0.6164,
"step": 3950
},
{
"epoch": 57.39,
"learning_rate": 4.991525423728814e-05,
"loss": 0.6744,
"step": 3960
},
{
"epoch": 57.54,
"learning_rate": 4.974576271186441e-05,
"loss": 0.6463,
"step": 3970
},
{
"epoch": 57.68,
"learning_rate": 4.957627118644068e-05,
"loss": 0.5984,
"step": 3980
},
{
"epoch": 57.83,
"learning_rate": 4.940677966101695e-05,
"loss": 0.6503,
"step": 3990
},
{
"epoch": 57.97,
"learning_rate": 4.923728813559322e-05,
"loss": 0.6655,
"step": 4000
},
{
"epoch": 57.97,
"eval_loss": 0.18997956812381744,
"eval_runtime": 588.4214,
"eval_samples_per_second": 5.768,
"eval_steps_per_second": 0.722,
"eval_wer": 0.14038245034473124,
"step": 4000
},
{
"epoch": 58.12,
"learning_rate": 4.9067796610169495e-05,
"loss": 0.6322,
"step": 4010
},
{
"epoch": 58.26,
"learning_rate": 4.889830508474577e-05,
"loss": 0.62,
"step": 4020
},
{
"epoch": 58.41,
"learning_rate": 4.8728813559322034e-05,
"loss": 0.6211,
"step": 4030
},
{
"epoch": 58.55,
"learning_rate": 4.855932203389831e-05,
"loss": 0.5958,
"step": 4040
},
{
"epoch": 58.7,
"learning_rate": 4.8389830508474574e-05,
"loss": 0.6592,
"step": 4050
},
{
"epoch": 58.84,
"learning_rate": 4.822033898305085e-05,
"loss": 0.6164,
"step": 4060
},
{
"epoch": 58.99,
"learning_rate": 4.805084745762712e-05,
"loss": 0.5693,
"step": 4070
},
{
"epoch": 59.13,
"learning_rate": 4.788135593220339e-05,
"loss": 0.6635,
"step": 4080
},
{
"epoch": 59.28,
"learning_rate": 4.7711864406779666e-05,
"loss": 0.6637,
"step": 4090
},
{
"epoch": 59.42,
"learning_rate": 4.754237288135593e-05,
"loss": 0.6043,
"step": 4100
},
{
"epoch": 59.57,
"learning_rate": 4.7372881355932206e-05,
"loss": 0.6564,
"step": 4110
},
{
"epoch": 59.71,
"learning_rate": 4.720338983050848e-05,
"loss": 0.5801,
"step": 4120
},
{
"epoch": 59.86,
"learning_rate": 4.703389830508475e-05,
"loss": 0.643,
"step": 4130
},
{
"epoch": 60.0,
"learning_rate": 4.686440677966102e-05,
"loss": 0.6152,
"step": 4140
},
{
"epoch": 60.14,
"learning_rate": 4.669491525423729e-05,
"loss": 0.5629,
"step": 4150
},
{
"epoch": 60.29,
"learning_rate": 4.652542372881356e-05,
"loss": 0.6618,
"step": 4160
},
{
"epoch": 60.43,
"learning_rate": 4.635593220338984e-05,
"loss": 0.6955,
"step": 4170
},
{
"epoch": 60.58,
"learning_rate": 4.6186440677966104e-05,
"loss": 0.5983,
"step": 4180
},
{
"epoch": 60.72,
"learning_rate": 4.601694915254238e-05,
"loss": 0.6647,
"step": 4190
},
{
"epoch": 60.87,
"learning_rate": 4.5847457627118644e-05,
"loss": 0.7206,
"step": 4200
},
{
"epoch": 61.01,
"learning_rate": 4.567796610169492e-05,
"loss": 0.6092,
"step": 4210
},
{
"epoch": 61.16,
"learning_rate": 4.550847457627119e-05,
"loss": 0.7258,
"step": 4220
},
{
"epoch": 61.3,
"learning_rate": 4.533898305084746e-05,
"loss": 0.5842,
"step": 4230
},
{
"epoch": 61.45,
"learning_rate": 4.516949152542373e-05,
"loss": 0.5988,
"step": 4240
},
{
"epoch": 61.59,
"learning_rate": 4.5e-05,
"loss": 0.816,
"step": 4250
},
{
"epoch": 61.74,
"learning_rate": 4.483050847457627e-05,
"loss": 0.6105,
"step": 4260
},
{
"epoch": 61.88,
"learning_rate": 4.466101694915254e-05,
"loss": 0.6099,
"step": 4270
},
{
"epoch": 62.03,
"learning_rate": 4.4491525423728816e-05,
"loss": 0.6852,
"step": 4280
},
{
"epoch": 62.17,
"learning_rate": 4.432203389830509e-05,
"loss": 0.6235,
"step": 4290
},
{
"epoch": 62.32,
"learning_rate": 4.4152542372881355e-05,
"loss": 0.5611,
"step": 4300
},
{
"epoch": 62.46,
"learning_rate": 4.398305084745763e-05,
"loss": 0.6345,
"step": 4310
},
{
"epoch": 62.61,
"learning_rate": 4.38135593220339e-05,
"loss": 0.5963,
"step": 4320
},
{
"epoch": 62.75,
"learning_rate": 4.3644067796610175e-05,
"loss": 0.6105,
"step": 4330
},
{
"epoch": 62.9,
"learning_rate": 4.347457627118644e-05,
"loss": 0.6964,
"step": 4340
},
{
"epoch": 63.04,
"learning_rate": 4.3305084745762714e-05,
"loss": 0.627,
"step": 4350
},
{
"epoch": 63.19,
"learning_rate": 4.313559322033899e-05,
"loss": 0.7093,
"step": 4360
},
{
"epoch": 63.33,
"learning_rate": 4.2966101694915254e-05,
"loss": 0.8016,
"step": 4370
},
{
"epoch": 63.48,
"learning_rate": 4.279661016949153e-05,
"loss": 0.6789,
"step": 4380
},
{
"epoch": 63.62,
"learning_rate": 4.26271186440678e-05,
"loss": 0.633,
"step": 4390
},
{
"epoch": 63.77,
"learning_rate": 4.245762711864407e-05,
"loss": 0.5965,
"step": 4400
},
{
"epoch": 63.91,
"learning_rate": 4.228813559322034e-05,
"loss": 0.6236,
"step": 4410
},
{
"epoch": 64.06,
"learning_rate": 4.211864406779661e-05,
"loss": 0.6159,
"step": 4420
},
{
"epoch": 64.2,
"learning_rate": 4.1949152542372886e-05,
"loss": 0.7223,
"step": 4430
},
{
"epoch": 64.35,
"learning_rate": 4.177966101694916e-05,
"loss": 0.6515,
"step": 4440
},
{
"epoch": 64.49,
"learning_rate": 4.1610169491525425e-05,
"loss": 0.6059,
"step": 4450
},
{
"epoch": 64.64,
"learning_rate": 4.14406779661017e-05,
"loss": 0.6254,
"step": 4460
},
{
"epoch": 64.78,
"learning_rate": 4.1271186440677965e-05,
"loss": 0.5723,
"step": 4470
},
{
"epoch": 64.93,
"learning_rate": 4.110169491525424e-05,
"loss": 0.6953,
"step": 4480
},
{
"epoch": 65.07,
"learning_rate": 4.093220338983051e-05,
"loss": 0.6141,
"step": 4490
},
{
"epoch": 65.22,
"learning_rate": 4.0762711864406784e-05,
"loss": 0.574,
"step": 4500
},
{
"epoch": 65.22,
"eval_loss": 0.18217810988426208,
"eval_runtime": 586.1117,
"eval_samples_per_second": 5.791,
"eval_steps_per_second": 0.725,
"eval_wer": 0.13701320579510062,
"step": 4500
},
{
"epoch": 65.36,
"learning_rate": 4.059322033898305e-05,
"loss": 0.6026,
"step": 4510
},
{
"epoch": 65.51,
"learning_rate": 4.0423728813559324e-05,
"loss": 0.533,
"step": 4520
},
{
"epoch": 65.65,
"learning_rate": 4.025423728813559e-05,
"loss": 0.6359,
"step": 4530
},
{
"epoch": 65.8,
"learning_rate": 4.008474576271187e-05,
"loss": 0.6229,
"step": 4540
},
{
"epoch": 65.94,
"learning_rate": 3.9915254237288136e-05,
"loss": 0.6072,
"step": 4550
},
{
"epoch": 66.09,
"learning_rate": 3.974576271186441e-05,
"loss": 0.6953,
"step": 4560
},
{
"epoch": 66.23,
"learning_rate": 3.9576271186440676e-05,
"loss": 0.6184,
"step": 4570
},
{
"epoch": 66.38,
"learning_rate": 3.940677966101695e-05,
"loss": 0.6346,
"step": 4580
},
{
"epoch": 66.52,
"learning_rate": 3.923728813559322e-05,
"loss": 0.5897,
"step": 4590
},
{
"epoch": 66.67,
"learning_rate": 3.9067796610169495e-05,
"loss": 0.6499,
"step": 4600
},
{
"epoch": 66.81,
"learning_rate": 3.889830508474576e-05,
"loss": 0.6347,
"step": 4610
},
{
"epoch": 66.96,
"learning_rate": 3.8728813559322035e-05,
"loss": 0.6815,
"step": 4620
},
{
"epoch": 67.1,
"learning_rate": 3.855932203389831e-05,
"loss": 0.6266,
"step": 4630
},
{
"epoch": 67.25,
"learning_rate": 3.838983050847458e-05,
"loss": 0.6055,
"step": 4640
},
{
"epoch": 67.39,
"learning_rate": 3.8220338983050854e-05,
"loss": 0.6518,
"step": 4650
},
{
"epoch": 67.54,
"learning_rate": 3.805084745762712e-05,
"loss": 0.6208,
"step": 4660
},
{
"epoch": 67.68,
"learning_rate": 3.7881355932203394e-05,
"loss": 0.5473,
"step": 4670
},
{
"epoch": 67.83,
"learning_rate": 3.771186440677966e-05,
"loss": 0.6044,
"step": 4680
},
{
"epoch": 67.97,
"learning_rate": 3.7542372881355934e-05,
"loss": 0.5821,
"step": 4690
},
{
"epoch": 68.12,
"learning_rate": 3.737288135593221e-05,
"loss": 0.7136,
"step": 4700
},
{
"epoch": 68.26,
"learning_rate": 3.720338983050848e-05,
"loss": 0.6163,
"step": 4710
},
{
"epoch": 68.41,
"learning_rate": 3.7033898305084746e-05,
"loss": 0.6651,
"step": 4720
},
{
"epoch": 68.55,
"learning_rate": 3.686440677966102e-05,
"loss": 0.5881,
"step": 4730
},
{
"epoch": 68.7,
"learning_rate": 3.6694915254237286e-05,
"loss": 0.6139,
"step": 4740
},
{
"epoch": 68.84,
"learning_rate": 3.6525423728813566e-05,
"loss": 0.6667,
"step": 4750
},
{
"epoch": 68.99,
"learning_rate": 3.635593220338983e-05,
"loss": 0.6623,
"step": 4760
},
{
"epoch": 69.13,
"learning_rate": 3.6186440677966105e-05,
"loss": 0.6268,
"step": 4770
},
{
"epoch": 69.28,
"learning_rate": 3.601694915254237e-05,
"loss": 0.7324,
"step": 4780
},
{
"epoch": 69.42,
"learning_rate": 3.5847457627118645e-05,
"loss": 0.6088,
"step": 4790
},
{
"epoch": 69.57,
"learning_rate": 3.567796610169492e-05,
"loss": 0.5793,
"step": 4800
},
{
"epoch": 69.71,
"learning_rate": 3.550847457627119e-05,
"loss": 0.6164,
"step": 4810
},
{
"epoch": 69.86,
"learning_rate": 3.533898305084746e-05,
"loss": 0.5903,
"step": 4820
},
{
"epoch": 70.0,
"learning_rate": 3.516949152542373e-05,
"loss": 0.5692,
"step": 4830
},
{
"epoch": 70.14,
"learning_rate": 3.5e-05,
"loss": 0.6192,
"step": 4840
},
{
"epoch": 70.29,
"learning_rate": 3.483050847457627e-05,
"loss": 0.5691,
"step": 4850
},
{
"epoch": 70.43,
"learning_rate": 3.466101694915254e-05,
"loss": 0.6212,
"step": 4860
},
{
"epoch": 70.58,
"learning_rate": 3.4491525423728816e-05,
"loss": 0.5972,
"step": 4870
},
{
"epoch": 70.72,
"learning_rate": 3.432203389830508e-05,
"loss": 0.643,
"step": 4880
},
{
"epoch": 70.87,
"learning_rate": 3.4152542372881356e-05,
"loss": 0.6119,
"step": 4890
},
{
"epoch": 71.01,
"learning_rate": 3.398305084745763e-05,
"loss": 0.6299,
"step": 4900
},
{
"epoch": 71.16,
"learning_rate": 3.38135593220339e-05,
"loss": 0.5901,
"step": 4910
},
{
"epoch": 71.3,
"learning_rate": 3.3644067796610175e-05,
"loss": 0.5598,
"step": 4920
},
{
"epoch": 71.45,
"learning_rate": 3.347457627118644e-05,
"loss": 0.6103,
"step": 4930
},
{
"epoch": 71.59,
"learning_rate": 3.3305084745762715e-05,
"loss": 0.6357,
"step": 4940
},
{
"epoch": 71.74,
"learning_rate": 3.313559322033898e-05,
"loss": 0.6267,
"step": 4950
},
{
"epoch": 71.88,
"learning_rate": 3.296610169491526e-05,
"loss": 0.6761,
"step": 4960
},
{
"epoch": 72.03,
"learning_rate": 3.279661016949153e-05,
"loss": 0.5721,
"step": 4970
},
{
"epoch": 72.17,
"learning_rate": 3.26271186440678e-05,
"loss": 0.606,
"step": 4980
},
{
"epoch": 72.32,
"learning_rate": 3.245762711864407e-05,
"loss": 0.5572,
"step": 4990
},
{
"epoch": 72.46,
"learning_rate": 3.228813559322034e-05,
"loss": 0.6211,
"step": 5000
},
{
"epoch": 72.46,
"eval_loss": 0.19374500215053558,
"eval_runtime": 586.2574,
"eval_samples_per_second": 5.789,
"eval_steps_per_second": 0.725,
"eval_wer": 0.13547650157368374,
"step": 5000
},
{
"epoch": 72.61,
"learning_rate": 3.2118644067796613e-05,
"loss": 0.5981,
"step": 5010
},
{
"epoch": 72.75,
"learning_rate": 3.1949152542372887e-05,
"loss": 0.5553,
"step": 5020
},
{
"epoch": 72.9,
"learning_rate": 3.177966101694915e-05,
"loss": 0.5557,
"step": 5030
},
{
"epoch": 73.04,
"learning_rate": 3.1610169491525426e-05,
"loss": 0.6467,
"step": 5040
},
{
"epoch": 73.19,
"learning_rate": 3.144067796610169e-05,
"loss": 0.572,
"step": 5050
},
{
"epoch": 73.33,
"learning_rate": 3.1271186440677966e-05,
"loss": 0.5939,
"step": 5060
},
{
"epoch": 73.48,
"learning_rate": 3.110169491525424e-05,
"loss": 0.6428,
"step": 5070
},
{
"epoch": 73.62,
"learning_rate": 3.093220338983051e-05,
"loss": 0.5347,
"step": 5080
},
{
"epoch": 73.77,
"learning_rate": 3.076271186440678e-05,
"loss": 0.6837,
"step": 5090
},
{
"epoch": 73.91,
"learning_rate": 3.059322033898305e-05,
"loss": 0.6088,
"step": 5100
},
{
"epoch": 74.06,
"learning_rate": 3.042372881355932e-05,
"loss": 0.6286,
"step": 5110
},
{
"epoch": 74.2,
"learning_rate": 3.0254237288135594e-05,
"loss": 0.6553,
"step": 5120
},
{
"epoch": 74.35,
"learning_rate": 3.0084745762711864e-05,
"loss": 0.6107,
"step": 5130
},
{
"epoch": 74.49,
"learning_rate": 2.9915254237288137e-05,
"loss": 0.589,
"step": 5140
},
{
"epoch": 74.64,
"learning_rate": 2.9745762711864407e-05,
"loss": 0.5846,
"step": 5150
},
{
"epoch": 74.78,
"learning_rate": 2.957627118644068e-05,
"loss": 0.5878,
"step": 5160
},
{
"epoch": 74.93,
"learning_rate": 2.9406779661016953e-05,
"loss": 0.7024,
"step": 5170
},
{
"epoch": 75.07,
"learning_rate": 2.9237288135593223e-05,
"loss": 0.6198,
"step": 5180
},
{
"epoch": 75.22,
"learning_rate": 2.9067796610169496e-05,
"loss": 0.6065,
"step": 5190
},
{
"epoch": 75.36,
"learning_rate": 2.8898305084745763e-05,
"loss": 0.5833,
"step": 5200
},
{
"epoch": 75.51,
"learning_rate": 2.8728813559322036e-05,
"loss": 0.6323,
"step": 5210
},
{
"epoch": 75.65,
"learning_rate": 2.8559322033898306e-05,
"loss": 0.5818,
"step": 5220
},
{
"epoch": 75.8,
"learning_rate": 2.838983050847458e-05,
"loss": 0.6146,
"step": 5230
},
{
"epoch": 75.94,
"learning_rate": 2.822033898305085e-05,
"loss": 0.5587,
"step": 5240
},
{
"epoch": 76.09,
"learning_rate": 2.8050847457627122e-05,
"loss": 0.5374,
"step": 5250
},
{
"epoch": 76.23,
"learning_rate": 2.788135593220339e-05,
"loss": 0.6266,
"step": 5260
},
{
"epoch": 76.38,
"learning_rate": 2.7711864406779665e-05,
"loss": 0.6307,
"step": 5270
},
{
"epoch": 76.52,
"learning_rate": 2.754237288135593e-05,
"loss": 0.586,
"step": 5280
},
{
"epoch": 76.67,
"learning_rate": 2.7372881355932208e-05,
"loss": 0.6525,
"step": 5290
},
{
"epoch": 76.81,
"learning_rate": 2.7203389830508474e-05,
"loss": 0.5892,
"step": 5300
},
{
"epoch": 76.96,
"learning_rate": 2.7033898305084747e-05,
"loss": 0.6079,
"step": 5310
},
{
"epoch": 77.1,
"learning_rate": 2.6864406779661017e-05,
"loss": 0.5914,
"step": 5320
},
{
"epoch": 77.25,
"learning_rate": 2.669491525423729e-05,
"loss": 0.5444,
"step": 5330
},
{
"epoch": 77.39,
"learning_rate": 2.652542372881356e-05,
"loss": 0.6253,
"step": 5340
},
{
"epoch": 77.54,
"learning_rate": 2.6355932203389833e-05,
"loss": 0.6257,
"step": 5350
},
{
"epoch": 77.68,
"learning_rate": 2.61864406779661e-05,
"loss": 0.5631,
"step": 5360
},
{
"epoch": 77.83,
"learning_rate": 2.6016949152542376e-05,
"loss": 0.5509,
"step": 5370
},
{
"epoch": 77.97,
"learning_rate": 2.5847457627118642e-05,
"loss": 0.6038,
"step": 5380
},
{
"epoch": 78.12,
"learning_rate": 2.5677966101694915e-05,
"loss": 0.6923,
"step": 5390
},
{
"epoch": 78.26,
"learning_rate": 2.5508474576271185e-05,
"loss": 0.6687,
"step": 5400
},
{
"epoch": 78.41,
"learning_rate": 2.5338983050847458e-05,
"loss": 0.6324,
"step": 5410
},
{
"epoch": 78.55,
"learning_rate": 2.5169491525423728e-05,
"loss": 0.584,
"step": 5420
},
{
"epoch": 78.7,
"learning_rate": 2.5e-05,
"loss": 0.5833,
"step": 5430
},
{
"epoch": 78.84,
"learning_rate": 2.483050847457627e-05,
"loss": 0.5061,
"step": 5440
},
{
"epoch": 78.99,
"learning_rate": 2.4661016949152544e-05,
"loss": 0.5788,
"step": 5450
},
{
"epoch": 79.13,
"learning_rate": 2.4491525423728814e-05,
"loss": 0.783,
"step": 5460
},
{
"epoch": 79.28,
"learning_rate": 2.4322033898305087e-05,
"loss": 0.5854,
"step": 5470
},
{
"epoch": 79.42,
"learning_rate": 2.4152542372881357e-05,
"loss": 0.6223,
"step": 5480
},
{
"epoch": 79.57,
"learning_rate": 2.3983050847457627e-05,
"loss": 0.5626,
"step": 5490
},
{
"epoch": 79.71,
"learning_rate": 2.38135593220339e-05,
"loss": 0.5883,
"step": 5500
},
{
"epoch": 79.71,
"eval_loss": 0.18716615438461304,
"eval_runtime": 588.0258,
"eval_samples_per_second": 5.772,
"eval_steps_per_second": 0.723,
"eval_wer": 0.1334960431920716,
"step": 5500
},
{
"epoch": 79.86,
"learning_rate": 2.364406779661017e-05,
"loss": 0.5428,
"step": 5510
},
{
"epoch": 80.0,
"learning_rate": 2.347457627118644e-05,
"loss": 0.6025,
"step": 5520
},
{
"epoch": 80.14,
"learning_rate": 2.3305084745762712e-05,
"loss": 0.5627,
"step": 5530
},
{
"epoch": 80.29,
"learning_rate": 2.3135593220338982e-05,
"loss": 0.5748,
"step": 5540
},
{
"epoch": 80.43,
"learning_rate": 2.2966101694915255e-05,
"loss": 0.5737,
"step": 5550
},
{
"epoch": 80.58,
"learning_rate": 2.2796610169491525e-05,
"loss": 0.5734,
"step": 5560
},
{
"epoch": 80.72,
"learning_rate": 2.2627118644067798e-05,
"loss": 0.5766,
"step": 5570
},
{
"epoch": 80.87,
"learning_rate": 2.245762711864407e-05,
"loss": 0.5437,
"step": 5580
},
{
"epoch": 81.01,
"learning_rate": 2.228813559322034e-05,
"loss": 0.6554,
"step": 5590
},
{
"epoch": 81.16,
"learning_rate": 2.211864406779661e-05,
"loss": 0.593,
"step": 5600
},
{
"epoch": 81.3,
"learning_rate": 2.1949152542372884e-05,
"loss": 0.5838,
"step": 5610
},
{
"epoch": 81.45,
"learning_rate": 2.1779661016949154e-05,
"loss": 0.6307,
"step": 5620
},
{
"epoch": 81.59,
"learning_rate": 2.1610169491525427e-05,
"loss": 0.582,
"step": 5630
},
{
"epoch": 81.74,
"learning_rate": 2.1440677966101697e-05,
"loss": 0.5314,
"step": 5640
},
{
"epoch": 81.88,
"learning_rate": 2.1271186440677967e-05,
"loss": 0.6731,
"step": 5650
},
{
"epoch": 82.03,
"learning_rate": 2.110169491525424e-05,
"loss": 0.558,
"step": 5660
},
{
"epoch": 82.17,
"learning_rate": 2.093220338983051e-05,
"loss": 0.5859,
"step": 5670
},
{
"epoch": 82.32,
"learning_rate": 2.076271186440678e-05,
"loss": 0.5677,
"step": 5680
},
{
"epoch": 82.46,
"learning_rate": 2.0593220338983052e-05,
"loss": 0.537,
"step": 5690
},
{
"epoch": 82.61,
"learning_rate": 2.0423728813559322e-05,
"loss": 0.6172,
"step": 5700
},
{
"epoch": 82.75,
"learning_rate": 2.0254237288135595e-05,
"loss": 0.592,
"step": 5710
},
{
"epoch": 82.9,
"learning_rate": 2.0084745762711865e-05,
"loss": 0.6172,
"step": 5720
},
{
"epoch": 83.04,
"learning_rate": 1.9915254237288135e-05,
"loss": 0.5772,
"step": 5730
},
{
"epoch": 83.19,
"learning_rate": 1.9745762711864408e-05,
"loss": 0.58,
"step": 5740
},
{
"epoch": 83.33,
"learning_rate": 1.9576271186440678e-05,
"loss": 0.5454,
"step": 5750
},
{
"epoch": 83.48,
"learning_rate": 1.940677966101695e-05,
"loss": 0.5959,
"step": 5760
},
{
"epoch": 83.62,
"learning_rate": 1.923728813559322e-05,
"loss": 0.5758,
"step": 5770
},
{
"epoch": 83.77,
"learning_rate": 1.906779661016949e-05,
"loss": 0.5605,
"step": 5780
},
{
"epoch": 83.91,
"learning_rate": 1.8898305084745764e-05,
"loss": 0.5798,
"step": 5790
},
{
"epoch": 84.06,
"learning_rate": 1.8728813559322033e-05,
"loss": 0.5482,
"step": 5800
},
{
"epoch": 84.2,
"learning_rate": 1.8559322033898307e-05,
"loss": 0.5288,
"step": 5810
},
{
"epoch": 84.35,
"learning_rate": 1.8389830508474576e-05,
"loss": 0.5644,
"step": 5820
},
{
"epoch": 84.49,
"learning_rate": 1.8220338983050846e-05,
"loss": 0.5637,
"step": 5830
},
{
"epoch": 84.64,
"learning_rate": 1.805084745762712e-05,
"loss": 0.6117,
"step": 5840
},
{
"epoch": 84.78,
"learning_rate": 1.7881355932203392e-05,
"loss": 0.5939,
"step": 5850
},
{
"epoch": 84.93,
"learning_rate": 1.7711864406779662e-05,
"loss": 0.6476,
"step": 5860
},
{
"epoch": 85.07,
"learning_rate": 1.7542372881355935e-05,
"loss": 0.5519,
"step": 5870
},
{
"epoch": 85.22,
"learning_rate": 1.7372881355932205e-05,
"loss": 0.5438,
"step": 5880
},
{
"epoch": 85.36,
"learning_rate": 1.7203389830508475e-05,
"loss": 0.5756,
"step": 5890
},
{
"epoch": 85.51,
"learning_rate": 1.7033898305084748e-05,
"loss": 0.507,
"step": 5900
},
{
"epoch": 85.65,
"learning_rate": 1.6864406779661018e-05,
"loss": 0.6028,
"step": 5910
},
{
"epoch": 85.8,
"learning_rate": 1.669491525423729e-05,
"loss": 0.6395,
"step": 5920
},
{
"epoch": 85.94,
"learning_rate": 1.652542372881356e-05,
"loss": 0.544,
"step": 5930
},
{
"epoch": 86.09,
"learning_rate": 1.635593220338983e-05,
"loss": 0.6061,
"step": 5940
},
{
"epoch": 86.23,
"learning_rate": 1.6186440677966104e-05,
"loss": 0.5938,
"step": 5950
},
{
"epoch": 86.38,
"learning_rate": 1.6016949152542373e-05,
"loss": 0.5194,
"step": 5960
},
{
"epoch": 86.52,
"learning_rate": 1.5847457627118646e-05,
"loss": 0.5893,
"step": 5970
},
{
"epoch": 86.67,
"learning_rate": 1.5677966101694916e-05,
"loss": 0.6398,
"step": 5980
},
{
"epoch": 86.81,
"learning_rate": 1.5508474576271186e-05,
"loss": 0.5758,
"step": 5990
},
{
"epoch": 86.96,
"learning_rate": 1.533898305084746e-05,
"loss": 0.5666,
"step": 6000
},
{
"epoch": 86.96,
"eval_loss": 0.18741416931152344,
"eval_runtime": 591.1329,
"eval_samples_per_second": 5.742,
"eval_steps_per_second": 0.719,
"eval_wer": 0.13238665779158346,
"step": 6000
},
{
"epoch": 87.1,
"learning_rate": 1.5169491525423729e-05,
"loss": 0.5598,
"step": 6010
},
{
"epoch": 87.25,
"learning_rate": 1.5e-05,
"loss": 0.6275,
"step": 6020
},
{
"epoch": 87.39,
"learning_rate": 1.4830508474576272e-05,
"loss": 0.5636,
"step": 6030
},
{
"epoch": 87.54,
"learning_rate": 1.4661016949152542e-05,
"loss": 0.5811,
"step": 6040
},
{
"epoch": 87.68,
"learning_rate": 1.4491525423728813e-05,
"loss": 0.5395,
"step": 6050
},
{
"epoch": 87.83,
"learning_rate": 1.4322033898305085e-05,
"loss": 0.5891,
"step": 6060
},
{
"epoch": 87.97,
"learning_rate": 1.4152542372881356e-05,
"loss": 0.5548,
"step": 6070
},
{
"epoch": 88.12,
"learning_rate": 1.3983050847457627e-05,
"loss": 0.5743,
"step": 6080
},
{
"epoch": 88.26,
"learning_rate": 1.3813559322033897e-05,
"loss": 0.5601,
"step": 6090
},
{
"epoch": 88.41,
"learning_rate": 1.3644067796610169e-05,
"loss": 0.609,
"step": 6100
},
{
"epoch": 88.55,
"learning_rate": 1.3474576271186442e-05,
"loss": 0.5156,
"step": 6110
},
{
"epoch": 88.7,
"learning_rate": 1.3305084745762713e-05,
"loss": 0.6113,
"step": 6120
},
{
"epoch": 88.84,
"learning_rate": 1.3135593220338985e-05,
"loss": 0.5672,
"step": 6130
},
{
"epoch": 88.99,
"learning_rate": 1.2966101694915256e-05,
"loss": 0.5586,
"step": 6140
},
{
"epoch": 89.13,
"learning_rate": 1.2796610169491528e-05,
"loss": 0.6413,
"step": 6150
},
{
"epoch": 89.28,
"learning_rate": 1.2627118644067797e-05,
"loss": 0.5624,
"step": 6160
},
{
"epoch": 89.42,
"learning_rate": 1.2457627118644069e-05,
"loss": 0.557,
"step": 6170
},
{
"epoch": 89.57,
"learning_rate": 1.228813559322034e-05,
"loss": 0.6058,
"step": 6180
},
{
"epoch": 89.71,
"learning_rate": 1.2118644067796612e-05,
"loss": 0.5162,
"step": 6190
},
{
"epoch": 89.86,
"learning_rate": 1.1949152542372882e-05,
"loss": 0.558,
"step": 6200
},
{
"epoch": 90.0,
"learning_rate": 1.1779661016949153e-05,
"loss": 0.5162,
"step": 6210
},
{
"epoch": 90.14,
"learning_rate": 1.1610169491525424e-05,
"loss": 0.5429,
"step": 6220
},
{
"epoch": 90.29,
"learning_rate": 1.1440677966101696e-05,
"loss": 0.6122,
"step": 6230
},
{
"epoch": 90.43,
"learning_rate": 1.1271186440677967e-05,
"loss": 0.5315,
"step": 6240
},
{
"epoch": 90.58,
"learning_rate": 1.1101694915254237e-05,
"loss": 0.5651,
"step": 6250
},
{
"epoch": 90.72,
"learning_rate": 1.0932203389830509e-05,
"loss": 0.5548,
"step": 6260
},
{
"epoch": 90.87,
"learning_rate": 1.076271186440678e-05,
"loss": 0.5796,
"step": 6270
},
{
"epoch": 91.01,
"learning_rate": 1.0593220338983052e-05,
"loss": 0.6074,
"step": 6280
},
{
"epoch": 91.16,
"learning_rate": 1.0423728813559321e-05,
"loss": 0.5952,
"step": 6290
},
{
"epoch": 91.3,
"learning_rate": 1.0254237288135593e-05,
"loss": 0.5801,
"step": 6300
},
{
"epoch": 91.45,
"learning_rate": 1.0084745762711866e-05,
"loss": 0.578,
"step": 6310
},
{
"epoch": 91.59,
"learning_rate": 9.915254237288137e-06,
"loss": 0.5429,
"step": 6320
},
{
"epoch": 91.74,
"learning_rate": 9.745762711864407e-06,
"loss": 0.606,
"step": 6330
},
{
"epoch": 91.88,
"learning_rate": 9.576271186440679e-06,
"loss": 0.5513,
"step": 6340
},
{
"epoch": 92.03,
"learning_rate": 9.40677966101695e-06,
"loss": 0.5766,
"step": 6350
},
{
"epoch": 92.17,
"learning_rate": 9.237288135593222e-06,
"loss": 0.5609,
"step": 6360
},
{
"epoch": 92.32,
"learning_rate": 9.067796610169491e-06,
"loss": 0.5741,
"step": 6370
},
{
"epoch": 92.46,
"learning_rate": 8.898305084745763e-06,
"loss": 0.6328,
"step": 6380
},
{
"epoch": 92.61,
"learning_rate": 8.728813559322034e-06,
"loss": 0.5216,
"step": 6390
},
{
"epoch": 92.75,
"learning_rate": 8.559322033898306e-06,
"loss": 0.5547,
"step": 6400
},
{
"epoch": 92.9,
"learning_rate": 8.389830508474575e-06,
"loss": 0.6096,
"step": 6410
},
{
"epoch": 93.04,
"learning_rate": 8.220338983050847e-06,
"loss": 0.626,
"step": 6420
},
{
"epoch": 93.19,
"learning_rate": 8.050847457627118e-06,
"loss": 0.5131,
"step": 6430
},
{
"epoch": 93.33,
"learning_rate": 7.881355932203392e-06,
"loss": 0.5603,
"step": 6440
},
{
"epoch": 93.48,
"learning_rate": 7.711864406779661e-06,
"loss": 0.6756,
"step": 6450
},
{
"epoch": 93.62,
"learning_rate": 7.542372881355933e-06,
"loss": 0.6087,
"step": 6460
},
{
"epoch": 93.77,
"learning_rate": 7.372881355932204e-06,
"loss": 0.5605,
"step": 6470
},
{
"epoch": 93.91,
"learning_rate": 7.203389830508475e-06,
"loss": 0.6238,
"step": 6480
},
{
"epoch": 94.06,
"learning_rate": 7.033898305084746e-06,
"loss": 0.6002,
"step": 6490
},
{
"epoch": 94.2,
"learning_rate": 6.864406779661017e-06,
"loss": 0.5436,
"step": 6500
},
{
"epoch": 94.2,
"eval_loss": 0.19244608283042908,
"eval_runtime": 586.7241,
"eval_samples_per_second": 5.785,
"eval_steps_per_second": 0.724,
"eval_wer": 0.1310882659895307,
"step": 6500
},
{
"epoch": 94.35,
"learning_rate": 6.694915254237288e-06,
"loss": 0.6487,
"step": 6510
},
{
"epoch": 94.49,
"learning_rate": 6.52542372881356e-06,
"loss": 0.5546,
"step": 6520
},
{
"epoch": 94.64,
"learning_rate": 6.3559322033898304e-06,
"loss": 0.6267,
"step": 6530
},
{
"epoch": 94.78,
"learning_rate": 6.186440677966102e-06,
"loss": 0.6587,
"step": 6540
},
{
"epoch": 94.93,
"learning_rate": 6.016949152542373e-06,
"loss": 0.6001,
"step": 6550
},
{
"epoch": 95.07,
"learning_rate": 5.847457627118645e-06,
"loss": 0.582,
"step": 6560
},
{
"epoch": 95.22,
"learning_rate": 5.677966101694915e-06,
"loss": 0.5926,
"step": 6570
},
{
"epoch": 95.36,
"learning_rate": 5.508474576271187e-06,
"loss": 0.5256,
"step": 6580
},
{
"epoch": 95.51,
"learning_rate": 5.3389830508474575e-06,
"loss": 0.5113,
"step": 6590
},
{
"epoch": 95.65,
"learning_rate": 5.169491525423729e-06,
"loss": 0.5731,
"step": 6600
},
{
"epoch": 95.8,
"learning_rate": 5e-06,
"loss": 0.609,
"step": 6610
},
{
"epoch": 95.94,
"learning_rate": 4.830508474576272e-06,
"loss": 0.6031,
"step": 6620
},
{
"epoch": 96.09,
"learning_rate": 4.6610169491525425e-06,
"loss": 0.5478,
"step": 6630
},
{
"epoch": 96.23,
"learning_rate": 4.491525423728814e-06,
"loss": 0.5287,
"step": 6640
},
{
"epoch": 96.38,
"learning_rate": 4.3220338983050846e-06,
"loss": 0.5772,
"step": 6650
},
{
"epoch": 96.52,
"learning_rate": 4.152542372881356e-06,
"loss": 0.572,
"step": 6660
},
{
"epoch": 96.67,
"learning_rate": 3.983050847457627e-06,
"loss": 0.5447,
"step": 6670
},
{
"epoch": 96.81,
"learning_rate": 3.813559322033899e-06,
"loss": 0.5333,
"step": 6680
},
{
"epoch": 96.96,
"learning_rate": 3.64406779661017e-06,
"loss": 0.7215,
"step": 6690
},
{
"epoch": 97.1,
"learning_rate": 3.474576271186441e-06,
"loss": 0.5152,
"step": 6700
},
{
"epoch": 97.25,
"learning_rate": 3.305084745762712e-06,
"loss": 0.6345,
"step": 6710
},
{
"epoch": 97.39,
"learning_rate": 3.135593220338983e-06,
"loss": 0.6032,
"step": 6720
},
{
"epoch": 97.54,
"learning_rate": 2.9661016949152545e-06,
"loss": 0.5735,
"step": 6730
},
{
"epoch": 97.68,
"learning_rate": 2.7966101694915256e-06,
"loss": 0.6241,
"step": 6740
},
{
"epoch": 97.83,
"learning_rate": 2.6271186440677966e-06,
"loss": 0.5547,
"step": 6750
},
{
"epoch": 97.97,
"learning_rate": 2.457627118644068e-06,
"loss": 0.5475,
"step": 6760
},
{
"epoch": 98.12,
"learning_rate": 2.288135593220339e-06,
"loss": 0.5953,
"step": 6770
},
{
"epoch": 98.26,
"learning_rate": 2.11864406779661e-06,
"loss": 0.528,
"step": 6780
},
{
"epoch": 98.41,
"learning_rate": 1.9491525423728816e-06,
"loss": 0.6749,
"step": 6790
},
{
"epoch": 98.55,
"learning_rate": 1.7796610169491526e-06,
"loss": 0.6135,
"step": 6800
},
{
"epoch": 98.7,
"learning_rate": 1.6101694915254237e-06,
"loss": 0.6421,
"step": 6810
},
{
"epoch": 98.84,
"learning_rate": 1.440677966101695e-06,
"loss": 0.5446,
"step": 6820
},
{
"epoch": 98.99,
"learning_rate": 1.2711864406779662e-06,
"loss": 0.5912,
"step": 6830
},
{
"epoch": 99.13,
"learning_rate": 1.1016949152542374e-06,
"loss": 0.5671,
"step": 6840
},
{
"epoch": 99.28,
"learning_rate": 9.322033898305086e-07,
"loss": 0.6215,
"step": 6850
},
{
"epoch": 99.42,
"learning_rate": 7.627118644067797e-07,
"loss": 0.6059,
"step": 6860
},
{
"epoch": 99.57,
"learning_rate": 5.932203389830508e-07,
"loss": 0.6379,
"step": 6870
},
{
"epoch": 99.71,
"learning_rate": 4.2372881355932204e-07,
"loss": 0.5951,
"step": 6880
},
{
"epoch": 99.86,
"learning_rate": 2.5423728813559323e-07,
"loss": 0.59,
"step": 6890
},
{
"epoch": 100.0,
"learning_rate": 8.47457627118644e-08,
"loss": 0.5327,
"step": 6900
},
{
"epoch": 100.0,
"step": 6900,
"total_flos": 1.0025325448199992e+20,
"train_loss": 1.2831729147399682,
"train_runtime": 53575.6745,
"train_samples_per_second": 4.095,
"train_steps_per_second": 0.129
}
],
"max_steps": 6900,
"num_train_epochs": 100,
"total_flos": 1.0025325448199992e+20,
"trial_name": null,
"trial_params": null
}