subatomicseer's picture
End of training
23741f5
raw
history blame
35.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 1450,
"global_step": 2900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 1.0000000000000002e-06,
"loss": 8.9192,
"step": 10
},
{
"epoch": 0.14,
"learning_rate": 2.0000000000000003e-06,
"loss": 8.5138,
"step": 20
},
{
"epoch": 0.21,
"learning_rate": 3e-06,
"loss": 7.895,
"step": 30
},
{
"epoch": 0.28,
"learning_rate": 4.000000000000001e-06,
"loss": 8.2316,
"step": 40
},
{
"epoch": 0.34,
"learning_rate": 5e-06,
"loss": 8.3274,
"step": 50
},
{
"epoch": 0.41,
"learning_rate": 6e-06,
"loss": 7.7786,
"step": 60
},
{
"epoch": 0.48,
"learning_rate": 7.000000000000001e-06,
"loss": 7.1157,
"step": 70
},
{
"epoch": 0.55,
"learning_rate": 8.000000000000001e-06,
"loss": 7.5486,
"step": 80
},
{
"epoch": 0.62,
"learning_rate": 9e-06,
"loss": 6.633,
"step": 90
},
{
"epoch": 0.69,
"learning_rate": 1e-05,
"loss": 5.4894,
"step": 100
},
{
"epoch": 0.76,
"learning_rate": 1.1000000000000001e-05,
"loss": 4.5307,
"step": 110
},
{
"epoch": 0.83,
"learning_rate": 1.2e-05,
"loss": 4.1082,
"step": 120
},
{
"epoch": 0.9,
"learning_rate": 1.3000000000000001e-05,
"loss": 3.8578,
"step": 130
},
{
"epoch": 0.97,
"learning_rate": 1.4000000000000001e-05,
"loss": 3.675,
"step": 140
},
{
"epoch": 1.03,
"learning_rate": 1.5e-05,
"loss": 3.6452,
"step": 150
},
{
"epoch": 1.1,
"learning_rate": 1.6000000000000003e-05,
"loss": 3.5126,
"step": 160
},
{
"epoch": 1.17,
"learning_rate": 1.7000000000000003e-05,
"loss": 3.3975,
"step": 170
},
{
"epoch": 1.24,
"learning_rate": 1.8e-05,
"loss": 3.3037,
"step": 180
},
{
"epoch": 1.31,
"learning_rate": 1.9e-05,
"loss": 3.2924,
"step": 190
},
{
"epoch": 1.38,
"learning_rate": 2e-05,
"loss": 3.1584,
"step": 200
},
{
"epoch": 1.45,
"learning_rate": 2.1e-05,
"loss": 3.1086,
"step": 210
},
{
"epoch": 1.52,
"learning_rate": 2.2000000000000003e-05,
"loss": 3.1207,
"step": 220
},
{
"epoch": 1.59,
"learning_rate": 2.3000000000000003e-05,
"loss": 3.0599,
"step": 230
},
{
"epoch": 1.66,
"learning_rate": 2.4e-05,
"loss": 3.0037,
"step": 240
},
{
"epoch": 1.72,
"learning_rate": 2.5e-05,
"loss": 2.9957,
"step": 250
},
{
"epoch": 1.79,
"learning_rate": 2.6000000000000002e-05,
"loss": 3.0302,
"step": 260
},
{
"epoch": 1.86,
"learning_rate": 2.7000000000000002e-05,
"loss": 2.9769,
"step": 270
},
{
"epoch": 1.93,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.9324,
"step": 280
},
{
"epoch": 2.0,
"learning_rate": 2.9e-05,
"loss": 2.9486,
"step": 290
},
{
"epoch": 2.07,
"learning_rate": 3e-05,
"loss": 2.9842,
"step": 300
},
{
"epoch": 2.14,
"learning_rate": 3.1e-05,
"loss": 2.8971,
"step": 310
},
{
"epoch": 2.21,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.9036,
"step": 320
},
{
"epoch": 2.28,
"learning_rate": 3.3e-05,
"loss": 2.9475,
"step": 330
},
{
"epoch": 2.34,
"learning_rate": 3.4000000000000007e-05,
"loss": 2.8944,
"step": 340
},
{
"epoch": 2.41,
"learning_rate": 3.5e-05,
"loss": 2.8786,
"step": 350
},
{
"epoch": 2.48,
"learning_rate": 3.6e-05,
"loss": 2.8823,
"step": 360
},
{
"epoch": 2.55,
"learning_rate": 3.7e-05,
"loss": 2.9207,
"step": 370
},
{
"epoch": 2.62,
"learning_rate": 3.8e-05,
"loss": 2.8595,
"step": 380
},
{
"epoch": 2.69,
"learning_rate": 3.9000000000000006e-05,
"loss": 2.8531,
"step": 390
},
{
"epoch": 2.76,
"learning_rate": 4e-05,
"loss": 2.8828,
"step": 400
},
{
"epoch": 2.83,
"learning_rate": 4.1e-05,
"loss": 2.8601,
"step": 410
},
{
"epoch": 2.9,
"learning_rate": 4.2e-05,
"loss": 2.8315,
"step": 420
},
{
"epoch": 2.97,
"learning_rate": 4.3e-05,
"loss": 2.8174,
"step": 430
},
{
"epoch": 3.03,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.89,
"step": 440
},
{
"epoch": 3.1,
"learning_rate": 4.5e-05,
"loss": 2.815,
"step": 450
},
{
"epoch": 3.17,
"learning_rate": 4.600000000000001e-05,
"loss": 2.7842,
"step": 460
},
{
"epoch": 3.24,
"learning_rate": 4.7e-05,
"loss": 2.8152,
"step": 470
},
{
"epoch": 3.31,
"learning_rate": 4.8e-05,
"loss": 2.8418,
"step": 480
},
{
"epoch": 3.38,
"learning_rate": 4.9e-05,
"loss": 2.7557,
"step": 490
},
{
"epoch": 3.45,
"learning_rate": 5e-05,
"loss": 2.7455,
"step": 500
},
{
"epoch": 3.52,
"learning_rate": 5.1000000000000006e-05,
"loss": 2.8095,
"step": 510
},
{
"epoch": 3.59,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.7294,
"step": 520
},
{
"epoch": 3.66,
"learning_rate": 5.300000000000001e-05,
"loss": 2.6936,
"step": 530
},
{
"epoch": 3.72,
"learning_rate": 5.4000000000000005e-05,
"loss": 2.6999,
"step": 540
},
{
"epoch": 3.79,
"learning_rate": 5.500000000000001e-05,
"loss": 2.7664,
"step": 550
},
{
"epoch": 3.86,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.6373,
"step": 560
},
{
"epoch": 3.93,
"learning_rate": 5.6999999999999996e-05,
"loss": 2.6569,
"step": 570
},
{
"epoch": 4.0,
"learning_rate": 5.8e-05,
"loss": 2.7333,
"step": 580
},
{
"epoch": 4.07,
"learning_rate": 5.9e-05,
"loss": 2.6905,
"step": 590
},
{
"epoch": 4.14,
"learning_rate": 6e-05,
"loss": 2.5422,
"step": 600
},
{
"epoch": 4.21,
"learning_rate": 6.1e-05,
"loss": 2.5804,
"step": 610
},
{
"epoch": 4.28,
"learning_rate": 6.2e-05,
"loss": 2.7211,
"step": 620
},
{
"epoch": 4.34,
"learning_rate": 6.3e-05,
"loss": 2.518,
"step": 630
},
{
"epoch": 4.41,
"learning_rate": 6.400000000000001e-05,
"loss": 2.4777,
"step": 640
},
{
"epoch": 4.48,
"learning_rate": 6.500000000000001e-05,
"loss": 2.5562,
"step": 650
},
{
"epoch": 4.55,
"learning_rate": 6.6e-05,
"loss": 2.5929,
"step": 660
},
{
"epoch": 4.62,
"learning_rate": 6.7e-05,
"loss": 2.3472,
"step": 670
},
{
"epoch": 4.69,
"learning_rate": 6.800000000000001e-05,
"loss": 2.4409,
"step": 680
},
{
"epoch": 4.76,
"learning_rate": 6.9e-05,
"loss": 2.5433,
"step": 690
},
{
"epoch": 4.83,
"learning_rate": 7e-05,
"loss": 2.3576,
"step": 700
},
{
"epoch": 4.9,
"learning_rate": 7.1e-05,
"loss": 2.2553,
"step": 710
},
{
"epoch": 4.97,
"learning_rate": 7.2e-05,
"loss": 2.3377,
"step": 720
},
{
"epoch": 5.03,
"learning_rate": 7.3e-05,
"loss": 2.4641,
"step": 730
},
{
"epoch": 5.1,
"learning_rate": 7.4e-05,
"loss": 2.1024,
"step": 740
},
{
"epoch": 5.17,
"learning_rate": 7.500000000000001e-05,
"loss": 2.1396,
"step": 750
},
{
"epoch": 5.24,
"learning_rate": 7.6e-05,
"loss": 2.2905,
"step": 760
},
{
"epoch": 5.31,
"learning_rate": 7.7e-05,
"loss": 2.1351,
"step": 770
},
{
"epoch": 5.38,
"learning_rate": 7.800000000000001e-05,
"loss": 1.8236,
"step": 780
},
{
"epoch": 5.45,
"learning_rate": 7.900000000000001e-05,
"loss": 1.9193,
"step": 790
},
{
"epoch": 5.52,
"learning_rate": 8e-05,
"loss": 2.1076,
"step": 800
},
{
"epoch": 5.59,
"learning_rate": 8.1e-05,
"loss": 1.7412,
"step": 810
},
{
"epoch": 5.66,
"learning_rate": 8.2e-05,
"loss": 1.6121,
"step": 820
},
{
"epoch": 5.72,
"learning_rate": 8.3e-05,
"loss": 1.6504,
"step": 830
},
{
"epoch": 5.79,
"learning_rate": 8.4e-05,
"loss": 1.7832,
"step": 840
},
{
"epoch": 5.86,
"learning_rate": 8.5e-05,
"loss": 1.3229,
"step": 850
},
{
"epoch": 5.93,
"learning_rate": 8.6e-05,
"loss": 1.4531,
"step": 860
},
{
"epoch": 6.0,
"learning_rate": 8.7e-05,
"loss": 1.5507,
"step": 870
},
{
"epoch": 6.07,
"learning_rate": 8.800000000000001e-05,
"loss": 1.3791,
"step": 880
},
{
"epoch": 6.14,
"learning_rate": 8.900000000000001e-05,
"loss": 1.1911,
"step": 890
},
{
"epoch": 6.21,
"learning_rate": 9e-05,
"loss": 1.2356,
"step": 900
},
{
"epoch": 6.28,
"learning_rate": 9.1e-05,
"loss": 1.3927,
"step": 910
},
{
"epoch": 6.34,
"learning_rate": 9.200000000000001e-05,
"loss": 1.0738,
"step": 920
},
{
"epoch": 6.41,
"learning_rate": 9.300000000000001e-05,
"loss": 1.0313,
"step": 930
},
{
"epoch": 6.48,
"learning_rate": 9.4e-05,
"loss": 1.1769,
"step": 940
},
{
"epoch": 6.55,
"learning_rate": 9.5e-05,
"loss": 1.2078,
"step": 950
},
{
"epoch": 6.62,
"learning_rate": 9.6e-05,
"loss": 0.9346,
"step": 960
},
{
"epoch": 6.69,
"learning_rate": 9.7e-05,
"loss": 0.9883,
"step": 970
},
{
"epoch": 6.76,
"learning_rate": 9.8e-05,
"loss": 1.1853,
"step": 980
},
{
"epoch": 6.83,
"learning_rate": 9.900000000000001e-05,
"loss": 0.9751,
"step": 990
},
{
"epoch": 6.9,
"learning_rate": 0.0001,
"loss": 0.8268,
"step": 1000
},
{
"epoch": 6.97,
"learning_rate": 9.947368421052632e-05,
"loss": 0.9507,
"step": 1010
},
{
"epoch": 7.03,
"learning_rate": 9.894736842105263e-05,
"loss": 1.0724,
"step": 1020
},
{
"epoch": 7.1,
"learning_rate": 9.842105263157894e-05,
"loss": 0.7477,
"step": 1030
},
{
"epoch": 7.17,
"learning_rate": 9.789473684210527e-05,
"loss": 0.7514,
"step": 1040
},
{
"epoch": 7.24,
"learning_rate": 9.736842105263158e-05,
"loss": 0.8923,
"step": 1050
},
{
"epoch": 7.31,
"learning_rate": 9.68421052631579e-05,
"loss": 0.9013,
"step": 1060
},
{
"epoch": 7.38,
"learning_rate": 9.631578947368421e-05,
"loss": 0.6789,
"step": 1070
},
{
"epoch": 7.45,
"learning_rate": 9.578947368421052e-05,
"loss": 0.7158,
"step": 1080
},
{
"epoch": 7.52,
"learning_rate": 9.526315789473685e-05,
"loss": 0.9585,
"step": 1090
},
{
"epoch": 7.59,
"learning_rate": 9.473684210526316e-05,
"loss": 0.7256,
"step": 1100
},
{
"epoch": 7.66,
"learning_rate": 9.421052631578949e-05,
"loss": 0.6444,
"step": 1110
},
{
"epoch": 7.72,
"learning_rate": 9.36842105263158e-05,
"loss": 0.7693,
"step": 1120
},
{
"epoch": 7.79,
"learning_rate": 9.315789473684211e-05,
"loss": 0.8643,
"step": 1130
},
{
"epoch": 7.86,
"learning_rate": 9.263157894736843e-05,
"loss": 0.6321,
"step": 1140
},
{
"epoch": 7.93,
"learning_rate": 9.210526315789474e-05,
"loss": 0.6692,
"step": 1150
},
{
"epoch": 8.0,
"learning_rate": 9.157894736842105e-05,
"loss": 0.8402,
"step": 1160
},
{
"epoch": 8.07,
"learning_rate": 9.105263157894738e-05,
"loss": 0.6585,
"step": 1170
},
{
"epoch": 8.14,
"learning_rate": 9.052631578947369e-05,
"loss": 0.5458,
"step": 1180
},
{
"epoch": 8.21,
"learning_rate": 9e-05,
"loss": 0.6021,
"step": 1190
},
{
"epoch": 8.28,
"learning_rate": 8.947368421052632e-05,
"loss": 0.7951,
"step": 1200
},
{
"epoch": 8.34,
"learning_rate": 8.894736842105263e-05,
"loss": 0.5468,
"step": 1210
},
{
"epoch": 8.41,
"learning_rate": 8.842105263157894e-05,
"loss": 0.512,
"step": 1220
},
{
"epoch": 8.48,
"learning_rate": 8.789473684210526e-05,
"loss": 0.5986,
"step": 1230
},
{
"epoch": 8.55,
"learning_rate": 8.736842105263158e-05,
"loss": 0.7063,
"step": 1240
},
{
"epoch": 8.62,
"learning_rate": 8.68421052631579e-05,
"loss": 0.4963,
"step": 1250
},
{
"epoch": 8.69,
"learning_rate": 8.631578947368421e-05,
"loss": 0.5244,
"step": 1260
},
{
"epoch": 8.76,
"learning_rate": 8.578947368421054e-05,
"loss": 0.7563,
"step": 1270
},
{
"epoch": 8.83,
"learning_rate": 8.526315789473685e-05,
"loss": 0.5817,
"step": 1280
},
{
"epoch": 8.9,
"learning_rate": 8.473684210526316e-05,
"loss": 0.4977,
"step": 1290
},
{
"epoch": 8.97,
"learning_rate": 8.421052631578948e-05,
"loss": 0.5366,
"step": 1300
},
{
"epoch": 9.03,
"learning_rate": 8.36842105263158e-05,
"loss": 0.6796,
"step": 1310
},
{
"epoch": 9.1,
"learning_rate": 8.315789473684212e-05,
"loss": 0.4394,
"step": 1320
},
{
"epoch": 9.17,
"learning_rate": 8.263157894736843e-05,
"loss": 0.4338,
"step": 1330
},
{
"epoch": 9.24,
"learning_rate": 8.210526315789474e-05,
"loss": 0.553,
"step": 1340
},
{
"epoch": 9.31,
"learning_rate": 8.157894736842105e-05,
"loss": 0.5857,
"step": 1350
},
{
"epoch": 9.38,
"learning_rate": 8.105263157894737e-05,
"loss": 0.4039,
"step": 1360
},
{
"epoch": 9.45,
"learning_rate": 8.052631578947368e-05,
"loss": 0.4663,
"step": 1370
},
{
"epoch": 9.52,
"learning_rate": 8e-05,
"loss": 0.6259,
"step": 1380
},
{
"epoch": 9.59,
"learning_rate": 7.947368421052632e-05,
"loss": 0.4401,
"step": 1390
},
{
"epoch": 9.66,
"learning_rate": 7.894736842105263e-05,
"loss": 0.4078,
"step": 1400
},
{
"epoch": 9.72,
"learning_rate": 7.842105263157895e-05,
"loss": 0.4573,
"step": 1410
},
{
"epoch": 9.79,
"learning_rate": 7.789473684210526e-05,
"loss": 0.6243,
"step": 1420
},
{
"epoch": 9.86,
"learning_rate": 7.736842105263159e-05,
"loss": 0.4176,
"step": 1430
},
{
"epoch": 9.93,
"learning_rate": 7.68421052631579e-05,
"loss": 0.4041,
"step": 1440
},
{
"epoch": 10.0,
"learning_rate": 7.631578947368422e-05,
"loss": 0.6057,
"step": 1450
},
{
"epoch": 10.0,
"eval_loss": 0.6450413465499878,
"eval_runtime": 9.7845,
"eval_samples_per_second": 171.701,
"eval_steps_per_second": 5.417,
"eval_wer": 0.6166356557094618,
"step": 1450
},
{
"epoch": 10.07,
"learning_rate": 7.578947368421054e-05,
"loss": 0.4214,
"step": 1460
},
{
"epoch": 10.14,
"learning_rate": 7.526315789473685e-05,
"loss": 0.3491,
"step": 1470
},
{
"epoch": 10.21,
"learning_rate": 7.473684210526316e-05,
"loss": 0.3768,
"step": 1480
},
{
"epoch": 10.28,
"learning_rate": 7.421052631578948e-05,
"loss": 0.5278,
"step": 1490
},
{
"epoch": 10.34,
"learning_rate": 7.368421052631579e-05,
"loss": 0.3585,
"step": 1500
},
{
"epoch": 10.41,
"learning_rate": 7.315789473684212e-05,
"loss": 0.3362,
"step": 1510
},
{
"epoch": 10.48,
"learning_rate": 7.263157894736843e-05,
"loss": 0.4173,
"step": 1520
},
{
"epoch": 10.55,
"learning_rate": 7.210526315789474e-05,
"loss": 0.519,
"step": 1530
},
{
"epoch": 10.62,
"learning_rate": 7.157894736842105e-05,
"loss": 0.3318,
"step": 1540
},
{
"epoch": 10.69,
"learning_rate": 7.105263157894737e-05,
"loss": 0.3875,
"step": 1550
},
{
"epoch": 10.76,
"learning_rate": 7.052631578947368e-05,
"loss": 0.5483,
"step": 1560
},
{
"epoch": 10.83,
"learning_rate": 7e-05,
"loss": 0.3939,
"step": 1570
},
{
"epoch": 10.9,
"learning_rate": 6.947368421052632e-05,
"loss": 0.337,
"step": 1580
},
{
"epoch": 10.97,
"learning_rate": 6.894736842105263e-05,
"loss": 0.3964,
"step": 1590
},
{
"epoch": 11.03,
"learning_rate": 6.842105263157895e-05,
"loss": 0.5173,
"step": 1600
},
{
"epoch": 11.1,
"learning_rate": 6.789473684210527e-05,
"loss": 0.2987,
"step": 1610
},
{
"epoch": 11.17,
"learning_rate": 6.736842105263159e-05,
"loss": 0.3273,
"step": 1620
},
{
"epoch": 11.24,
"learning_rate": 6.68421052631579e-05,
"loss": 0.4159,
"step": 1630
},
{
"epoch": 11.31,
"learning_rate": 6.631578947368421e-05,
"loss": 0.3859,
"step": 1640
},
{
"epoch": 11.38,
"learning_rate": 6.578947368421054e-05,
"loss": 0.2825,
"step": 1650
},
{
"epoch": 11.45,
"learning_rate": 6.526315789473685e-05,
"loss": 0.3041,
"step": 1660
},
{
"epoch": 11.52,
"learning_rate": 6.473684210526316e-05,
"loss": 0.4922,
"step": 1670
},
{
"epoch": 11.59,
"learning_rate": 6.421052631578948e-05,
"loss": 0.3212,
"step": 1680
},
{
"epoch": 11.66,
"learning_rate": 6.368421052631579e-05,
"loss": 0.2861,
"step": 1690
},
{
"epoch": 11.72,
"learning_rate": 6.31578947368421e-05,
"loss": 0.3634,
"step": 1700
},
{
"epoch": 11.79,
"learning_rate": 6.263157894736842e-05,
"loss": 0.458,
"step": 1710
},
{
"epoch": 11.86,
"learning_rate": 6.210526315789474e-05,
"loss": 0.2961,
"step": 1720
},
{
"epoch": 11.93,
"learning_rate": 6.157894736842106e-05,
"loss": 0.3275,
"step": 1730
},
{
"epoch": 12.0,
"learning_rate": 6.105263157894737e-05,
"loss": 0.4403,
"step": 1740
},
{
"epoch": 12.07,
"learning_rate": 6.052631578947369e-05,
"loss": 0.3202,
"step": 1750
},
{
"epoch": 12.14,
"learning_rate": 6e-05,
"loss": 0.2352,
"step": 1760
},
{
"epoch": 12.21,
"learning_rate": 5.9473684210526315e-05,
"loss": 0.2907,
"step": 1770
},
{
"epoch": 12.28,
"learning_rate": 5.894736842105263e-05,
"loss": 0.417,
"step": 1780
},
{
"epoch": 12.34,
"learning_rate": 5.8421052631578954e-05,
"loss": 0.2678,
"step": 1790
},
{
"epoch": 12.41,
"learning_rate": 5.789473684210527e-05,
"loss": 0.2741,
"step": 1800
},
{
"epoch": 12.48,
"learning_rate": 5.736842105263158e-05,
"loss": 0.3506,
"step": 1810
},
{
"epoch": 12.55,
"learning_rate": 5.68421052631579e-05,
"loss": 0.385,
"step": 1820
},
{
"epoch": 12.62,
"learning_rate": 5.631578947368421e-05,
"loss": 0.25,
"step": 1830
},
{
"epoch": 12.69,
"learning_rate": 5.5789473684210526e-05,
"loss": 0.285,
"step": 1840
},
{
"epoch": 12.76,
"learning_rate": 5.526315789473685e-05,
"loss": 0.4443,
"step": 1850
},
{
"epoch": 12.83,
"learning_rate": 5.4736842105263165e-05,
"loss": 0.2927,
"step": 1860
},
{
"epoch": 12.9,
"learning_rate": 5.421052631578948e-05,
"loss": 0.2531,
"step": 1870
},
{
"epoch": 12.97,
"learning_rate": 5.368421052631579e-05,
"loss": 0.3416,
"step": 1880
},
{
"epoch": 13.03,
"learning_rate": 5.3157894736842104e-05,
"loss": 0.3826,
"step": 1890
},
{
"epoch": 13.1,
"learning_rate": 5.2631578947368424e-05,
"loss": 0.231,
"step": 1900
},
{
"epoch": 13.17,
"learning_rate": 5.210526315789474e-05,
"loss": 0.254,
"step": 1910
},
{
"epoch": 13.24,
"learning_rate": 5.157894736842106e-05,
"loss": 0.3595,
"step": 1920
},
{
"epoch": 13.31,
"learning_rate": 5.1052631578947376e-05,
"loss": 0.312,
"step": 1930
},
{
"epoch": 13.38,
"learning_rate": 5.052631578947369e-05,
"loss": 0.2345,
"step": 1940
},
{
"epoch": 13.45,
"learning_rate": 5e-05,
"loss": 0.2783,
"step": 1950
},
{
"epoch": 13.52,
"learning_rate": 4.9473684210526315e-05,
"loss": 0.4077,
"step": 1960
},
{
"epoch": 13.59,
"learning_rate": 4.8947368421052635e-05,
"loss": 0.2558,
"step": 1970
},
{
"epoch": 13.66,
"learning_rate": 4.842105263157895e-05,
"loss": 0.227,
"step": 1980
},
{
"epoch": 13.72,
"learning_rate": 4.789473684210526e-05,
"loss": 0.3002,
"step": 1990
},
{
"epoch": 13.79,
"learning_rate": 4.736842105263158e-05,
"loss": 0.3746,
"step": 2000
},
{
"epoch": 13.86,
"learning_rate": 4.68421052631579e-05,
"loss": 0.2459,
"step": 2010
},
{
"epoch": 13.93,
"learning_rate": 4.6315789473684214e-05,
"loss": 0.2841,
"step": 2020
},
{
"epoch": 14.0,
"learning_rate": 4.5789473684210527e-05,
"loss": 0.3758,
"step": 2030
},
{
"epoch": 14.07,
"learning_rate": 4.5263157894736846e-05,
"loss": 0.2574,
"step": 2040
},
{
"epoch": 14.14,
"learning_rate": 4.473684210526316e-05,
"loss": 0.1991,
"step": 2050
},
{
"epoch": 14.21,
"learning_rate": 4.421052631578947e-05,
"loss": 0.2724,
"step": 2060
},
{
"epoch": 14.28,
"learning_rate": 4.368421052631579e-05,
"loss": 0.3524,
"step": 2070
},
{
"epoch": 14.34,
"learning_rate": 4.3157894736842105e-05,
"loss": 0.2233,
"step": 2080
},
{
"epoch": 14.41,
"learning_rate": 4.2631578947368425e-05,
"loss": 0.2486,
"step": 2090
},
{
"epoch": 14.48,
"learning_rate": 4.210526315789474e-05,
"loss": 0.3034,
"step": 2100
},
{
"epoch": 14.55,
"learning_rate": 4.157894736842106e-05,
"loss": 0.3108,
"step": 2110
},
{
"epoch": 14.62,
"learning_rate": 4.105263157894737e-05,
"loss": 0.2047,
"step": 2120
},
{
"epoch": 14.69,
"learning_rate": 4.0526315789473684e-05,
"loss": 0.2412,
"step": 2130
},
{
"epoch": 14.76,
"learning_rate": 4e-05,
"loss": 0.3715,
"step": 2140
},
{
"epoch": 14.83,
"learning_rate": 3.9473684210526316e-05,
"loss": 0.2471,
"step": 2150
},
{
"epoch": 14.9,
"learning_rate": 3.894736842105263e-05,
"loss": 0.2142,
"step": 2160
},
{
"epoch": 14.97,
"learning_rate": 3.842105263157895e-05,
"loss": 0.2709,
"step": 2170
},
{
"epoch": 15.03,
"learning_rate": 3.789473684210527e-05,
"loss": 0.3467,
"step": 2180
},
{
"epoch": 15.1,
"learning_rate": 3.736842105263158e-05,
"loss": 0.2154,
"step": 2190
},
{
"epoch": 15.17,
"learning_rate": 3.6842105263157895e-05,
"loss": 0.214,
"step": 2200
},
{
"epoch": 15.24,
"learning_rate": 3.6315789473684214e-05,
"loss": 0.3125,
"step": 2210
},
{
"epoch": 15.31,
"learning_rate": 3.578947368421053e-05,
"loss": 0.2642,
"step": 2220
},
{
"epoch": 15.38,
"learning_rate": 3.526315789473684e-05,
"loss": 0.1977,
"step": 2230
},
{
"epoch": 15.45,
"learning_rate": 3.473684210526316e-05,
"loss": 0.231,
"step": 2240
},
{
"epoch": 15.52,
"learning_rate": 3.421052631578947e-05,
"loss": 0.3571,
"step": 2250
},
{
"epoch": 15.59,
"learning_rate": 3.368421052631579e-05,
"loss": 0.2162,
"step": 2260
},
{
"epoch": 15.66,
"learning_rate": 3.3157894736842106e-05,
"loss": 0.2054,
"step": 2270
},
{
"epoch": 15.72,
"learning_rate": 3.2631578947368426e-05,
"loss": 0.2829,
"step": 2280
},
{
"epoch": 15.79,
"learning_rate": 3.210526315789474e-05,
"loss": 0.321,
"step": 2290
},
{
"epoch": 15.86,
"learning_rate": 3.157894736842105e-05,
"loss": 0.1882,
"step": 2300
},
{
"epoch": 15.93,
"learning_rate": 3.105263157894737e-05,
"loss": 0.2393,
"step": 2310
},
{
"epoch": 16.0,
"learning_rate": 3.0526315789473684e-05,
"loss": 0.3597,
"step": 2320
},
{
"epoch": 16.07,
"learning_rate": 3e-05,
"loss": 0.2243,
"step": 2330
},
{
"epoch": 16.14,
"learning_rate": 2.9473684210526314e-05,
"loss": 0.1971,
"step": 2340
},
{
"epoch": 16.21,
"learning_rate": 2.8947368421052634e-05,
"loss": 0.2363,
"step": 2350
},
{
"epoch": 16.28,
"learning_rate": 2.842105263157895e-05,
"loss": 0.3159,
"step": 2360
},
{
"epoch": 16.34,
"learning_rate": 2.7894736842105263e-05,
"loss": 0.1789,
"step": 2370
},
{
"epoch": 16.41,
"learning_rate": 2.7368421052631583e-05,
"loss": 0.2081,
"step": 2380
},
{
"epoch": 16.48,
"learning_rate": 2.6842105263157896e-05,
"loss": 0.2722,
"step": 2390
},
{
"epoch": 16.55,
"learning_rate": 2.6315789473684212e-05,
"loss": 0.2745,
"step": 2400
},
{
"epoch": 16.62,
"learning_rate": 2.578947368421053e-05,
"loss": 0.1838,
"step": 2410
},
{
"epoch": 16.69,
"learning_rate": 2.5263157894736845e-05,
"loss": 0.2324,
"step": 2420
},
{
"epoch": 16.76,
"learning_rate": 2.4736842105263158e-05,
"loss": 0.3483,
"step": 2430
},
{
"epoch": 16.83,
"learning_rate": 2.4210526315789474e-05,
"loss": 0.2155,
"step": 2440
},
{
"epoch": 16.9,
"learning_rate": 2.368421052631579e-05,
"loss": 0.2028,
"step": 2450
},
{
"epoch": 16.97,
"learning_rate": 2.3157894736842107e-05,
"loss": 0.2449,
"step": 2460
},
{
"epoch": 17.03,
"learning_rate": 2.2631578947368423e-05,
"loss": 0.3167,
"step": 2470
},
{
"epoch": 17.1,
"learning_rate": 2.2105263157894736e-05,
"loss": 0.1844,
"step": 2480
},
{
"epoch": 17.17,
"learning_rate": 2.1578947368421053e-05,
"loss": 0.1915,
"step": 2490
},
{
"epoch": 17.24,
"learning_rate": 2.105263157894737e-05,
"loss": 0.2815,
"step": 2500
},
{
"epoch": 17.31,
"learning_rate": 2.0526315789473685e-05,
"loss": 0.2585,
"step": 2510
},
{
"epoch": 17.38,
"learning_rate": 2e-05,
"loss": 0.1617,
"step": 2520
},
{
"epoch": 17.45,
"learning_rate": 1.9473684210526315e-05,
"loss": 0.2171,
"step": 2530
},
{
"epoch": 17.52,
"learning_rate": 1.8947368421052634e-05,
"loss": 0.3201,
"step": 2540
},
{
"epoch": 17.59,
"learning_rate": 1.8421052631578947e-05,
"loss": 0.1838,
"step": 2550
},
{
"epoch": 17.66,
"learning_rate": 1.7894736842105264e-05,
"loss": 0.1925,
"step": 2560
},
{
"epoch": 17.72,
"learning_rate": 1.736842105263158e-05,
"loss": 0.2416,
"step": 2570
},
{
"epoch": 17.79,
"learning_rate": 1.6842105263157896e-05,
"loss": 0.2668,
"step": 2580
},
{
"epoch": 17.86,
"learning_rate": 1.6315789473684213e-05,
"loss": 0.1689,
"step": 2590
},
{
"epoch": 17.93,
"learning_rate": 1.5789473684210526e-05,
"loss": 0.2112,
"step": 2600
},
{
"epoch": 18.0,
"learning_rate": 1.5263157894736842e-05,
"loss": 0.3257,
"step": 2610
},
{
"epoch": 18.07,
"learning_rate": 1.4736842105263157e-05,
"loss": 0.2023,
"step": 2620
},
{
"epoch": 18.14,
"learning_rate": 1.4210526315789475e-05,
"loss": 0.1681,
"step": 2630
},
{
"epoch": 18.21,
"learning_rate": 1.3684210526315791e-05,
"loss": 0.2162,
"step": 2640
},
{
"epoch": 18.28,
"learning_rate": 1.3157894736842106e-05,
"loss": 0.3089,
"step": 2650
},
{
"epoch": 18.34,
"learning_rate": 1.2631578947368422e-05,
"loss": 0.1866,
"step": 2660
},
{
"epoch": 18.41,
"learning_rate": 1.2105263157894737e-05,
"loss": 0.1948,
"step": 2670
},
{
"epoch": 18.48,
"learning_rate": 1.1578947368421053e-05,
"loss": 0.2388,
"step": 2680
},
{
"epoch": 18.55,
"learning_rate": 1.1052631578947368e-05,
"loss": 0.2446,
"step": 2690
},
{
"epoch": 18.62,
"learning_rate": 1.0526315789473684e-05,
"loss": 0.1834,
"step": 2700
},
{
"epoch": 18.69,
"learning_rate": 1e-05,
"loss": 0.2103,
"step": 2710
},
{
"epoch": 18.76,
"learning_rate": 9.473684210526317e-06,
"loss": 0.3084,
"step": 2720
},
{
"epoch": 18.83,
"learning_rate": 8.947368421052632e-06,
"loss": 0.1919,
"step": 2730
},
{
"epoch": 18.9,
"learning_rate": 8.421052631578948e-06,
"loss": 0.1744,
"step": 2740
},
{
"epoch": 18.97,
"learning_rate": 7.894736842105263e-06,
"loss": 0.2449,
"step": 2750
},
{
"epoch": 19.03,
"learning_rate": 7.3684210526315784e-06,
"loss": 0.3009,
"step": 2760
},
{
"epoch": 19.1,
"learning_rate": 6.842105263157896e-06,
"loss": 0.1799,
"step": 2770
},
{
"epoch": 19.17,
"learning_rate": 6.315789473684211e-06,
"loss": 0.2052,
"step": 2780
},
{
"epoch": 19.24,
"learning_rate": 5.789473684210527e-06,
"loss": 0.2713,
"step": 2790
},
{
"epoch": 19.31,
"learning_rate": 5.263157894736842e-06,
"loss": 0.2286,
"step": 2800
},
{
"epoch": 19.38,
"learning_rate": 4.736842105263159e-06,
"loss": 0.1707,
"step": 2810
},
{
"epoch": 19.45,
"learning_rate": 4.210526315789474e-06,
"loss": 0.1961,
"step": 2820
},
{
"epoch": 19.52,
"learning_rate": 3.6842105263157892e-06,
"loss": 0.2916,
"step": 2830
},
{
"epoch": 19.59,
"learning_rate": 3.1578947368421056e-06,
"loss": 0.1624,
"step": 2840
},
{
"epoch": 19.66,
"learning_rate": 2.631578947368421e-06,
"loss": 0.1789,
"step": 2850
},
{
"epoch": 19.72,
"learning_rate": 2.105263157894737e-06,
"loss": 0.2253,
"step": 2860
},
{
"epoch": 19.79,
"learning_rate": 1.5789473684210528e-06,
"loss": 0.2809,
"step": 2870
},
{
"epoch": 19.86,
"learning_rate": 1.0526315789473685e-06,
"loss": 0.1673,
"step": 2880
},
{
"epoch": 19.93,
"learning_rate": 5.263157894736843e-07,
"loss": 0.1979,
"step": 2890
},
{
"epoch": 20.0,
"learning_rate": 0.0,
"loss": 0.327,
"step": 2900
},
{
"epoch": 20.0,
"eval_loss": 0.6916695237159729,
"eval_runtime": 8.4317,
"eval_samples_per_second": 199.249,
"eval_steps_per_second": 6.286,
"eval_wer": 0.5904486251808972,
"step": 2900
},
{
"epoch": 20.0,
"step": 2900,
"total_flos": 2.6569362344615726e+18,
"train_loss": 1.2737908951167403,
"train_runtime": 394.8383,
"train_samples_per_second": 234.02,
"train_steps_per_second": 7.345
}
],
"logging_steps": 10,
"max_steps": 2900,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 1450,
"total_flos": 2.6569362344615726e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}