subatomicseer's picture
End of training
3554ec3
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 1450,
"global_step": 2900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 1.0000000000000002e-06,
"loss": 8.7405,
"step": 10
},
{
"epoch": 0.14,
"learning_rate": 2.0000000000000003e-06,
"loss": 8.3725,
"step": 20
},
{
"epoch": 0.21,
"learning_rate": 3e-06,
"loss": 7.7607,
"step": 30
},
{
"epoch": 0.28,
"learning_rate": 4.000000000000001e-06,
"loss": 8.0251,
"step": 40
},
{
"epoch": 0.34,
"learning_rate": 5e-06,
"loss": 8.0286,
"step": 50
},
{
"epoch": 0.41,
"learning_rate": 6e-06,
"loss": 7.4312,
"step": 60
},
{
"epoch": 0.48,
"learning_rate": 7.000000000000001e-06,
"loss": 6.7337,
"step": 70
},
{
"epoch": 0.55,
"learning_rate": 8.000000000000001e-06,
"loss": 6.8916,
"step": 80
},
{
"epoch": 0.62,
"learning_rate": 9e-06,
"loss": 5.9048,
"step": 90
},
{
"epoch": 0.69,
"learning_rate": 1e-05,
"loss": 4.789,
"step": 100
},
{
"epoch": 0.76,
"learning_rate": 1.1000000000000001e-05,
"loss": 3.7895,
"step": 110
},
{
"epoch": 0.83,
"learning_rate": 1.2e-05,
"loss": 3.3101,
"step": 120
},
{
"epoch": 0.9,
"learning_rate": 1.3000000000000001e-05,
"loss": 3.1335,
"step": 130
},
{
"epoch": 0.97,
"learning_rate": 1.4000000000000001e-05,
"loss": 3.0395,
"step": 140
},
{
"epoch": 1.03,
"learning_rate": 1.5e-05,
"loss": 3.0154,
"step": 150
},
{
"epoch": 1.1,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.9716,
"step": 160
},
{
"epoch": 1.17,
"learning_rate": 1.7000000000000003e-05,
"loss": 2.9457,
"step": 170
},
{
"epoch": 1.24,
"learning_rate": 1.8e-05,
"loss": 2.95,
"step": 180
},
{
"epoch": 1.31,
"learning_rate": 1.9e-05,
"loss": 2.9412,
"step": 190
},
{
"epoch": 1.38,
"learning_rate": 2e-05,
"loss": 2.9126,
"step": 200
},
{
"epoch": 1.45,
"learning_rate": 2.1e-05,
"loss": 2.9103,
"step": 210
},
{
"epoch": 1.52,
"learning_rate": 2.2000000000000003e-05,
"loss": 2.931,
"step": 220
},
{
"epoch": 1.59,
"learning_rate": 2.3000000000000003e-05,
"loss": 2.9066,
"step": 230
},
{
"epoch": 1.66,
"learning_rate": 2.4e-05,
"loss": 2.8818,
"step": 240
},
{
"epoch": 1.72,
"learning_rate": 2.5e-05,
"loss": 2.8773,
"step": 250
},
{
"epoch": 1.79,
"learning_rate": 2.6000000000000002e-05,
"loss": 2.8816,
"step": 260
},
{
"epoch": 1.86,
"learning_rate": 2.7000000000000002e-05,
"loss": 2.8123,
"step": 270
},
{
"epoch": 1.93,
"learning_rate": 2.8000000000000003e-05,
"loss": 2.7808,
"step": 280
},
{
"epoch": 2.0,
"learning_rate": 2.9e-05,
"loss": 2.7578,
"step": 290
},
{
"epoch": 2.07,
"learning_rate": 3e-05,
"loss": 2.6916,
"step": 300
},
{
"epoch": 2.14,
"learning_rate": 3.1e-05,
"loss": 2.5814,
"step": 310
},
{
"epoch": 2.21,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.518,
"step": 320
},
{
"epoch": 2.28,
"learning_rate": 3.3e-05,
"loss": 2.5069,
"step": 330
},
{
"epoch": 2.34,
"learning_rate": 3.4000000000000007e-05,
"loss": 2.3335,
"step": 340
},
{
"epoch": 2.41,
"learning_rate": 3.5e-05,
"loss": 2.211,
"step": 350
},
{
"epoch": 2.48,
"learning_rate": 3.6e-05,
"loss": 2.1754,
"step": 360
},
{
"epoch": 2.55,
"learning_rate": 3.7e-05,
"loss": 2.081,
"step": 370
},
{
"epoch": 2.62,
"learning_rate": 3.8e-05,
"loss": 1.8885,
"step": 380
},
{
"epoch": 2.69,
"learning_rate": 3.9000000000000006e-05,
"loss": 1.812,
"step": 390
},
{
"epoch": 2.76,
"learning_rate": 4e-05,
"loss": 1.8453,
"step": 400
},
{
"epoch": 2.83,
"learning_rate": 4.1e-05,
"loss": 1.6226,
"step": 410
},
{
"epoch": 2.9,
"learning_rate": 4.2e-05,
"loss": 1.5139,
"step": 420
},
{
"epoch": 2.97,
"learning_rate": 4.3e-05,
"loss": 1.5378,
"step": 430
},
{
"epoch": 3.03,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.5982,
"step": 440
},
{
"epoch": 3.1,
"learning_rate": 4.5e-05,
"loss": 1.3221,
"step": 450
},
{
"epoch": 3.17,
"learning_rate": 4.600000000000001e-05,
"loss": 1.2879,
"step": 460
},
{
"epoch": 3.24,
"learning_rate": 4.7e-05,
"loss": 1.4471,
"step": 470
},
{
"epoch": 3.31,
"learning_rate": 4.8e-05,
"loss": 1.3435,
"step": 480
},
{
"epoch": 3.38,
"learning_rate": 4.9e-05,
"loss": 1.1629,
"step": 490
},
{
"epoch": 3.45,
"learning_rate": 5e-05,
"loss": 1.1958,
"step": 500
},
{
"epoch": 3.52,
"learning_rate": 5.1000000000000006e-05,
"loss": 1.3826,
"step": 510
},
{
"epoch": 3.59,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.1417,
"step": 520
},
{
"epoch": 3.66,
"learning_rate": 5.300000000000001e-05,
"loss": 1.0833,
"step": 530
},
{
"epoch": 3.72,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.1789,
"step": 540
},
{
"epoch": 3.79,
"learning_rate": 5.500000000000001e-05,
"loss": 1.2242,
"step": 550
},
{
"epoch": 3.86,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.9675,
"step": 560
},
{
"epoch": 3.93,
"learning_rate": 5.6999999999999996e-05,
"loss": 1.0413,
"step": 570
},
{
"epoch": 4.0,
"learning_rate": 5.8e-05,
"loss": 1.1943,
"step": 580
},
{
"epoch": 4.07,
"learning_rate": 5.9e-05,
"loss": 1.0426,
"step": 590
},
{
"epoch": 4.14,
"learning_rate": 6e-05,
"loss": 0.8603,
"step": 600
},
{
"epoch": 4.21,
"learning_rate": 6.1e-05,
"loss": 0.9454,
"step": 610
},
{
"epoch": 4.28,
"learning_rate": 6.2e-05,
"loss": 1.1162,
"step": 620
},
{
"epoch": 4.34,
"learning_rate": 6.3e-05,
"loss": 0.8964,
"step": 630
},
{
"epoch": 4.41,
"learning_rate": 6.400000000000001e-05,
"loss": 0.903,
"step": 640
},
{
"epoch": 4.48,
"learning_rate": 6.500000000000001e-05,
"loss": 0.9923,
"step": 650
},
{
"epoch": 4.55,
"learning_rate": 6.6e-05,
"loss": 0.99,
"step": 660
},
{
"epoch": 4.62,
"learning_rate": 6.7e-05,
"loss": 0.7718,
"step": 670
},
{
"epoch": 4.69,
"learning_rate": 6.800000000000001e-05,
"loss": 0.8345,
"step": 680
},
{
"epoch": 4.76,
"learning_rate": 6.9e-05,
"loss": 1.0266,
"step": 690
},
{
"epoch": 4.83,
"learning_rate": 7e-05,
"loss": 0.8174,
"step": 700
},
{
"epoch": 4.9,
"learning_rate": 7.1e-05,
"loss": 0.756,
"step": 710
},
{
"epoch": 4.97,
"learning_rate": 7.2e-05,
"loss": 0.8567,
"step": 720
},
{
"epoch": 5.03,
"learning_rate": 7.3e-05,
"loss": 1.0015,
"step": 730
},
{
"epoch": 5.1,
"learning_rate": 7.4e-05,
"loss": 0.7094,
"step": 740
},
{
"epoch": 5.17,
"learning_rate": 7.500000000000001e-05,
"loss": 0.745,
"step": 750
},
{
"epoch": 5.24,
"learning_rate": 7.6e-05,
"loss": 0.8566,
"step": 760
},
{
"epoch": 5.31,
"learning_rate": 7.7e-05,
"loss": 0.8234,
"step": 770
},
{
"epoch": 5.38,
"learning_rate": 7.800000000000001e-05,
"loss": 0.6645,
"step": 780
},
{
"epoch": 5.45,
"learning_rate": 7.900000000000001e-05,
"loss": 0.7258,
"step": 790
},
{
"epoch": 5.52,
"learning_rate": 8e-05,
"loss": 0.8984,
"step": 800
},
{
"epoch": 5.59,
"learning_rate": 8.1e-05,
"loss": 0.7077,
"step": 810
},
{
"epoch": 5.66,
"learning_rate": 8.2e-05,
"loss": 0.6287,
"step": 820
},
{
"epoch": 5.72,
"learning_rate": 8.3e-05,
"loss": 0.789,
"step": 830
},
{
"epoch": 5.79,
"learning_rate": 8.4e-05,
"loss": 0.8406,
"step": 840
},
{
"epoch": 5.86,
"learning_rate": 8.5e-05,
"loss": 0.5996,
"step": 850
},
{
"epoch": 5.93,
"learning_rate": 8.6e-05,
"loss": 0.6525,
"step": 860
},
{
"epoch": 6.0,
"learning_rate": 8.7e-05,
"loss": 0.825,
"step": 870
},
{
"epoch": 6.07,
"learning_rate": 8.800000000000001e-05,
"loss": 0.6722,
"step": 880
},
{
"epoch": 6.14,
"learning_rate": 8.900000000000001e-05,
"loss": 0.5402,
"step": 890
},
{
"epoch": 6.21,
"learning_rate": 9e-05,
"loss": 0.6394,
"step": 900
},
{
"epoch": 6.28,
"learning_rate": 9.1e-05,
"loss": 0.7814,
"step": 910
},
{
"epoch": 6.34,
"learning_rate": 9.200000000000001e-05,
"loss": 0.5593,
"step": 920
},
{
"epoch": 6.41,
"learning_rate": 9.300000000000001e-05,
"loss": 0.5488,
"step": 930
},
{
"epoch": 6.48,
"learning_rate": 9.4e-05,
"loss": 0.6778,
"step": 940
},
{
"epoch": 6.55,
"learning_rate": 9.5e-05,
"loss": 0.7197,
"step": 950
},
{
"epoch": 6.62,
"learning_rate": 9.6e-05,
"loss": 0.5515,
"step": 960
},
{
"epoch": 6.69,
"learning_rate": 9.7e-05,
"loss": 0.547,
"step": 970
},
{
"epoch": 6.76,
"learning_rate": 9.8e-05,
"loss": 0.7743,
"step": 980
},
{
"epoch": 6.83,
"learning_rate": 9.900000000000001e-05,
"loss": 0.5979,
"step": 990
},
{
"epoch": 6.9,
"learning_rate": 0.0001,
"loss": 0.5156,
"step": 1000
},
{
"epoch": 6.97,
"learning_rate": 9.947368421052632e-05,
"loss": 0.5823,
"step": 1010
},
{
"epoch": 7.03,
"learning_rate": 9.894736842105263e-05,
"loss": 0.687,
"step": 1020
},
{
"epoch": 7.1,
"learning_rate": 9.842105263157894e-05,
"loss": 0.4806,
"step": 1030
},
{
"epoch": 7.17,
"learning_rate": 9.789473684210527e-05,
"loss": 0.4601,
"step": 1040
},
{
"epoch": 7.24,
"learning_rate": 9.736842105263158e-05,
"loss": 0.6117,
"step": 1050
},
{
"epoch": 7.31,
"learning_rate": 9.68421052631579e-05,
"loss": 0.6,
"step": 1060
},
{
"epoch": 7.38,
"learning_rate": 9.631578947368421e-05,
"loss": 0.4322,
"step": 1070
},
{
"epoch": 7.45,
"learning_rate": 9.578947368421052e-05,
"loss": 0.4855,
"step": 1080
},
{
"epoch": 7.52,
"learning_rate": 9.526315789473685e-05,
"loss": 0.6307,
"step": 1090
},
{
"epoch": 7.59,
"learning_rate": 9.473684210526316e-05,
"loss": 0.4686,
"step": 1100
},
{
"epoch": 7.66,
"learning_rate": 9.421052631578949e-05,
"loss": 0.4421,
"step": 1110
},
{
"epoch": 7.72,
"learning_rate": 9.36842105263158e-05,
"loss": 0.5434,
"step": 1120
},
{
"epoch": 7.79,
"learning_rate": 9.315789473684211e-05,
"loss": 0.6483,
"step": 1130
},
{
"epoch": 7.86,
"learning_rate": 9.263157894736843e-05,
"loss": 0.4373,
"step": 1140
},
{
"epoch": 7.93,
"learning_rate": 9.210526315789474e-05,
"loss": 0.464,
"step": 1150
},
{
"epoch": 8.0,
"learning_rate": 9.157894736842105e-05,
"loss": 0.6059,
"step": 1160
},
{
"epoch": 8.07,
"learning_rate": 9.105263157894738e-05,
"loss": 0.4463,
"step": 1170
},
{
"epoch": 8.14,
"learning_rate": 9.052631578947369e-05,
"loss": 0.3746,
"step": 1180
},
{
"epoch": 8.21,
"learning_rate": 9e-05,
"loss": 0.4007,
"step": 1190
},
{
"epoch": 8.28,
"learning_rate": 8.947368421052632e-05,
"loss": 0.5896,
"step": 1200
},
{
"epoch": 8.34,
"learning_rate": 8.894736842105263e-05,
"loss": 0.4089,
"step": 1210
},
{
"epoch": 8.41,
"learning_rate": 8.842105263157894e-05,
"loss": 0.3942,
"step": 1220
},
{
"epoch": 8.48,
"learning_rate": 8.789473684210526e-05,
"loss": 0.4547,
"step": 1230
},
{
"epoch": 8.55,
"learning_rate": 8.736842105263158e-05,
"loss": 0.497,
"step": 1240
},
{
"epoch": 8.62,
"learning_rate": 8.68421052631579e-05,
"loss": 0.3453,
"step": 1250
},
{
"epoch": 8.69,
"learning_rate": 8.631578947368421e-05,
"loss": 0.3837,
"step": 1260
},
{
"epoch": 8.76,
"learning_rate": 8.578947368421054e-05,
"loss": 0.5705,
"step": 1270
},
{
"epoch": 8.83,
"learning_rate": 8.526315789473685e-05,
"loss": 0.4159,
"step": 1280
},
{
"epoch": 8.9,
"learning_rate": 8.473684210526316e-05,
"loss": 0.3548,
"step": 1290
},
{
"epoch": 8.97,
"learning_rate": 8.421052631578948e-05,
"loss": 0.4421,
"step": 1300
},
{
"epoch": 9.03,
"learning_rate": 8.36842105263158e-05,
"loss": 0.5231,
"step": 1310
},
{
"epoch": 9.1,
"learning_rate": 8.315789473684212e-05,
"loss": 0.3367,
"step": 1320
},
{
"epoch": 9.17,
"learning_rate": 8.263157894736843e-05,
"loss": 0.3161,
"step": 1330
},
{
"epoch": 9.24,
"learning_rate": 8.210526315789474e-05,
"loss": 0.4323,
"step": 1340
},
{
"epoch": 9.31,
"learning_rate": 8.157894736842105e-05,
"loss": 0.4278,
"step": 1350
},
{
"epoch": 9.38,
"learning_rate": 8.105263157894737e-05,
"loss": 0.2999,
"step": 1360
},
{
"epoch": 9.45,
"learning_rate": 8.052631578947368e-05,
"loss": 0.3515,
"step": 1370
},
{
"epoch": 9.52,
"learning_rate": 8e-05,
"loss": 0.5025,
"step": 1380
},
{
"epoch": 9.59,
"learning_rate": 7.947368421052632e-05,
"loss": 0.331,
"step": 1390
},
{
"epoch": 9.66,
"learning_rate": 7.894736842105263e-05,
"loss": 0.3059,
"step": 1400
},
{
"epoch": 9.72,
"learning_rate": 7.842105263157895e-05,
"loss": 0.3913,
"step": 1410
},
{
"epoch": 9.79,
"learning_rate": 7.789473684210526e-05,
"loss": 0.4537,
"step": 1420
},
{
"epoch": 9.86,
"learning_rate": 7.736842105263159e-05,
"loss": 0.2862,
"step": 1430
},
{
"epoch": 9.93,
"learning_rate": 7.68421052631579e-05,
"loss": 0.3201,
"step": 1440
},
{
"epoch": 10.0,
"learning_rate": 7.631578947368422e-05,
"loss": 0.4628,
"step": 1450
},
{
"epoch": 10.0,
"eval_loss": 0.6778876781463623,
"eval_runtime": 34.9456,
"eval_samples_per_second": 48.075,
"eval_steps_per_second": 48.075,
"eval_wer": 0.5171249397009166,
"step": 1450
},
{
"epoch": 10.07,
"learning_rate": 7.578947368421054e-05,
"loss": 0.3185,
"step": 1460
},
{
"epoch": 10.14,
"learning_rate": 7.526315789473685e-05,
"loss": 0.2507,
"step": 1470
},
{
"epoch": 10.21,
"learning_rate": 7.473684210526316e-05,
"loss": 0.3112,
"step": 1480
},
{
"epoch": 10.28,
"learning_rate": 7.421052631578948e-05,
"loss": 0.4179,
"step": 1490
},
{
"epoch": 10.34,
"learning_rate": 7.368421052631579e-05,
"loss": 0.283,
"step": 1500
},
{
"epoch": 10.41,
"learning_rate": 7.315789473684212e-05,
"loss": 0.2931,
"step": 1510
},
{
"epoch": 10.48,
"learning_rate": 7.263157894736843e-05,
"loss": 0.3917,
"step": 1520
},
{
"epoch": 10.55,
"learning_rate": 7.210526315789474e-05,
"loss": 0.3625,
"step": 1530
},
{
"epoch": 10.62,
"learning_rate": 7.157894736842105e-05,
"loss": 0.258,
"step": 1540
},
{
"epoch": 10.69,
"learning_rate": 7.105263157894737e-05,
"loss": 0.3266,
"step": 1550
},
{
"epoch": 10.76,
"learning_rate": 7.052631578947368e-05,
"loss": 0.4379,
"step": 1560
},
{
"epoch": 10.83,
"learning_rate": 7e-05,
"loss": 0.3105,
"step": 1570
},
{
"epoch": 10.9,
"learning_rate": 6.947368421052632e-05,
"loss": 0.2873,
"step": 1580
},
{
"epoch": 10.97,
"learning_rate": 6.894736842105263e-05,
"loss": 0.3476,
"step": 1590
},
{
"epoch": 11.03,
"learning_rate": 6.842105263157895e-05,
"loss": 0.3837,
"step": 1600
},
{
"epoch": 11.1,
"learning_rate": 6.789473684210527e-05,
"loss": 0.2481,
"step": 1610
},
{
"epoch": 11.17,
"learning_rate": 6.736842105263159e-05,
"loss": 0.2574,
"step": 1620
},
{
"epoch": 11.24,
"learning_rate": 6.68421052631579e-05,
"loss": 0.3801,
"step": 1630
},
{
"epoch": 11.31,
"learning_rate": 6.631578947368421e-05,
"loss": 0.3083,
"step": 1640
},
{
"epoch": 11.38,
"learning_rate": 6.578947368421054e-05,
"loss": 0.2301,
"step": 1650
},
{
"epoch": 11.45,
"learning_rate": 6.526315789473685e-05,
"loss": 0.2517,
"step": 1660
},
{
"epoch": 11.52,
"learning_rate": 6.473684210526316e-05,
"loss": 0.4046,
"step": 1670
},
{
"epoch": 11.59,
"learning_rate": 6.421052631578948e-05,
"loss": 0.2412,
"step": 1680
},
{
"epoch": 11.66,
"learning_rate": 6.368421052631579e-05,
"loss": 0.2427,
"step": 1690
},
{
"epoch": 11.72,
"learning_rate": 6.31578947368421e-05,
"loss": 0.3368,
"step": 1700
},
{
"epoch": 11.79,
"learning_rate": 6.263157894736842e-05,
"loss": 0.3602,
"step": 1710
},
{
"epoch": 11.86,
"learning_rate": 6.210526315789474e-05,
"loss": 0.2291,
"step": 1720
},
{
"epoch": 11.93,
"learning_rate": 6.157894736842106e-05,
"loss": 0.268,
"step": 1730
},
{
"epoch": 12.0,
"learning_rate": 6.105263157894737e-05,
"loss": 0.3812,
"step": 1740
},
{
"epoch": 12.07,
"learning_rate": 6.052631578947369e-05,
"loss": 0.2434,
"step": 1750
},
{
"epoch": 12.14,
"learning_rate": 6e-05,
"loss": 0.2464,
"step": 1760
},
{
"epoch": 12.21,
"learning_rate": 5.9473684210526315e-05,
"loss": 0.2766,
"step": 1770
},
{
"epoch": 12.28,
"learning_rate": 5.894736842105263e-05,
"loss": 0.3767,
"step": 1780
},
{
"epoch": 12.34,
"learning_rate": 5.8421052631578954e-05,
"loss": 0.2147,
"step": 1790
},
{
"epoch": 12.41,
"learning_rate": 5.789473684210527e-05,
"loss": 0.2303,
"step": 1800
},
{
"epoch": 12.48,
"learning_rate": 5.736842105263158e-05,
"loss": 0.287,
"step": 1810
},
{
"epoch": 12.55,
"learning_rate": 5.68421052631579e-05,
"loss": 0.307,
"step": 1820
},
{
"epoch": 12.62,
"learning_rate": 5.631578947368421e-05,
"loss": 0.2175,
"step": 1830
},
{
"epoch": 12.69,
"learning_rate": 5.5789473684210526e-05,
"loss": 0.2523,
"step": 1840
},
{
"epoch": 12.76,
"learning_rate": 5.526315789473685e-05,
"loss": 0.3714,
"step": 1850
},
{
"epoch": 12.83,
"learning_rate": 5.4736842105263165e-05,
"loss": 0.2392,
"step": 1860
},
{
"epoch": 12.9,
"learning_rate": 5.421052631578948e-05,
"loss": 0.207,
"step": 1870
},
{
"epoch": 12.97,
"learning_rate": 5.368421052631579e-05,
"loss": 0.2852,
"step": 1880
},
{
"epoch": 13.03,
"learning_rate": 5.3157894736842104e-05,
"loss": 0.3358,
"step": 1890
},
{
"epoch": 13.1,
"learning_rate": 5.2631578947368424e-05,
"loss": 0.2168,
"step": 1900
},
{
"epoch": 13.17,
"learning_rate": 5.210526315789474e-05,
"loss": 0.2274,
"step": 1910
},
{
"epoch": 13.24,
"learning_rate": 5.157894736842106e-05,
"loss": 0.3218,
"step": 1920
},
{
"epoch": 13.31,
"learning_rate": 5.1052631578947376e-05,
"loss": 0.2585,
"step": 1930
},
{
"epoch": 13.38,
"learning_rate": 5.052631578947369e-05,
"loss": 0.1977,
"step": 1940
},
{
"epoch": 13.45,
"learning_rate": 5e-05,
"loss": 0.24,
"step": 1950
},
{
"epoch": 13.52,
"learning_rate": 4.9473684210526315e-05,
"loss": 0.3329,
"step": 1960
},
{
"epoch": 13.59,
"learning_rate": 4.8947368421052635e-05,
"loss": 0.1981,
"step": 1970
},
{
"epoch": 13.66,
"learning_rate": 4.842105263157895e-05,
"loss": 0.2081,
"step": 1980
},
{
"epoch": 13.72,
"learning_rate": 4.789473684210526e-05,
"loss": 0.2666,
"step": 1990
},
{
"epoch": 13.79,
"learning_rate": 4.736842105263158e-05,
"loss": 0.3079,
"step": 2000
},
{
"epoch": 13.86,
"learning_rate": 4.68421052631579e-05,
"loss": 0.1899,
"step": 2010
},
{
"epoch": 13.93,
"learning_rate": 4.6315789473684214e-05,
"loss": 0.2373,
"step": 2020
},
{
"epoch": 14.0,
"learning_rate": 4.5789473684210527e-05,
"loss": 0.3616,
"step": 2030
},
{
"epoch": 14.07,
"learning_rate": 4.5263157894736846e-05,
"loss": 0.2077,
"step": 2040
},
{
"epoch": 14.14,
"learning_rate": 4.473684210526316e-05,
"loss": 0.1876,
"step": 2050
},
{
"epoch": 14.21,
"learning_rate": 4.421052631578947e-05,
"loss": 0.2584,
"step": 2060
},
{
"epoch": 14.28,
"learning_rate": 4.368421052631579e-05,
"loss": 0.3133,
"step": 2070
},
{
"epoch": 14.34,
"learning_rate": 4.3157894736842105e-05,
"loss": 0.1894,
"step": 2080
},
{
"epoch": 14.41,
"learning_rate": 4.2631578947368425e-05,
"loss": 0.1958,
"step": 2090
},
{
"epoch": 14.48,
"learning_rate": 4.210526315789474e-05,
"loss": 0.2899,
"step": 2100
},
{
"epoch": 14.55,
"learning_rate": 4.157894736842106e-05,
"loss": 0.2644,
"step": 2110
},
{
"epoch": 14.62,
"learning_rate": 4.105263157894737e-05,
"loss": 0.1853,
"step": 2120
},
{
"epoch": 14.69,
"learning_rate": 4.0526315789473684e-05,
"loss": 0.2336,
"step": 2130
},
{
"epoch": 14.76,
"learning_rate": 4e-05,
"loss": 0.322,
"step": 2140
},
{
"epoch": 14.83,
"learning_rate": 3.9473684210526316e-05,
"loss": 0.2084,
"step": 2150
},
{
"epoch": 14.9,
"learning_rate": 3.894736842105263e-05,
"loss": 0.2028,
"step": 2160
},
{
"epoch": 14.97,
"learning_rate": 3.842105263157895e-05,
"loss": 0.2511,
"step": 2170
},
{
"epoch": 15.03,
"learning_rate": 3.789473684210527e-05,
"loss": 0.2843,
"step": 2180
},
{
"epoch": 15.1,
"learning_rate": 3.736842105263158e-05,
"loss": 0.174,
"step": 2190
},
{
"epoch": 15.17,
"learning_rate": 3.6842105263157895e-05,
"loss": 0.1943,
"step": 2200
},
{
"epoch": 15.24,
"learning_rate": 3.6315789473684214e-05,
"loss": 0.3042,
"step": 2210
},
{
"epoch": 15.31,
"learning_rate": 3.578947368421053e-05,
"loss": 0.2302,
"step": 2220
},
{
"epoch": 15.38,
"learning_rate": 3.526315789473684e-05,
"loss": 0.1731,
"step": 2230
},
{
"epoch": 15.45,
"learning_rate": 3.473684210526316e-05,
"loss": 0.2261,
"step": 2240
},
{
"epoch": 15.52,
"learning_rate": 3.421052631578947e-05,
"loss": 0.3035,
"step": 2250
},
{
"epoch": 15.59,
"learning_rate": 3.368421052631579e-05,
"loss": 0.2029,
"step": 2260
},
{
"epoch": 15.66,
"learning_rate": 3.3157894736842106e-05,
"loss": 0.1987,
"step": 2270
},
{
"epoch": 15.72,
"learning_rate": 3.2631578947368426e-05,
"loss": 0.262,
"step": 2280
},
{
"epoch": 15.79,
"learning_rate": 3.210526315789474e-05,
"loss": 0.2698,
"step": 2290
},
{
"epoch": 15.86,
"learning_rate": 3.157894736842105e-05,
"loss": 0.1755,
"step": 2300
},
{
"epoch": 15.93,
"learning_rate": 3.105263157894737e-05,
"loss": 0.1991,
"step": 2310
},
{
"epoch": 16.0,
"learning_rate": 3.0526315789473684e-05,
"loss": 0.3251,
"step": 2320
},
{
"epoch": 16.07,
"learning_rate": 3e-05,
"loss": 0.19,
"step": 2330
},
{
"epoch": 16.14,
"learning_rate": 2.9473684210526314e-05,
"loss": 0.1784,
"step": 2340
},
{
"epoch": 16.21,
"learning_rate": 2.8947368421052634e-05,
"loss": 0.2436,
"step": 2350
},
{
"epoch": 16.28,
"learning_rate": 2.842105263157895e-05,
"loss": 0.2933,
"step": 2360
},
{
"epoch": 16.34,
"learning_rate": 2.7894736842105263e-05,
"loss": 0.1761,
"step": 2370
},
{
"epoch": 16.41,
"learning_rate": 2.7368421052631583e-05,
"loss": 0.1848,
"step": 2380
},
{
"epoch": 16.48,
"learning_rate": 2.6842105263157896e-05,
"loss": 0.2543,
"step": 2390
},
{
"epoch": 16.55,
"learning_rate": 2.6315789473684212e-05,
"loss": 0.2544,
"step": 2400
},
{
"epoch": 16.62,
"learning_rate": 2.578947368421053e-05,
"loss": 0.1573,
"step": 2410
},
{
"epoch": 16.69,
"learning_rate": 2.5263157894736845e-05,
"loss": 0.217,
"step": 2420
},
{
"epoch": 16.76,
"learning_rate": 2.4736842105263158e-05,
"loss": 0.3055,
"step": 2430
},
{
"epoch": 16.83,
"learning_rate": 2.4210526315789474e-05,
"loss": 0.1782,
"step": 2440
},
{
"epoch": 16.9,
"learning_rate": 2.368421052631579e-05,
"loss": 0.1739,
"step": 2450
},
{
"epoch": 16.97,
"learning_rate": 2.3157894736842107e-05,
"loss": 0.2208,
"step": 2460
},
{
"epoch": 17.03,
"learning_rate": 2.2631578947368423e-05,
"loss": 0.2789,
"step": 2470
},
{
"epoch": 17.1,
"learning_rate": 2.2105263157894736e-05,
"loss": 0.1674,
"step": 2480
},
{
"epoch": 17.17,
"learning_rate": 2.1578947368421053e-05,
"loss": 0.1829,
"step": 2490
},
{
"epoch": 17.24,
"learning_rate": 2.105263157894737e-05,
"loss": 0.2931,
"step": 2500
},
{
"epoch": 17.31,
"learning_rate": 2.0526315789473685e-05,
"loss": 0.2071,
"step": 2510
},
{
"epoch": 17.38,
"learning_rate": 2e-05,
"loss": 0.1626,
"step": 2520
},
{
"epoch": 17.45,
"learning_rate": 1.9473684210526315e-05,
"loss": 0.1888,
"step": 2530
},
{
"epoch": 17.52,
"learning_rate": 1.8947368421052634e-05,
"loss": 0.2998,
"step": 2540
},
{
"epoch": 17.59,
"learning_rate": 1.8421052631578947e-05,
"loss": 0.1682,
"step": 2550
},
{
"epoch": 17.66,
"learning_rate": 1.7894736842105264e-05,
"loss": 0.176,
"step": 2560
},
{
"epoch": 17.72,
"learning_rate": 1.736842105263158e-05,
"loss": 0.2486,
"step": 2570
},
{
"epoch": 17.79,
"learning_rate": 1.6842105263157896e-05,
"loss": 0.2343,
"step": 2580
},
{
"epoch": 17.86,
"learning_rate": 1.6315789473684213e-05,
"loss": 0.1735,
"step": 2590
},
{
"epoch": 17.93,
"learning_rate": 1.5789473684210526e-05,
"loss": 0.1903,
"step": 2600
},
{
"epoch": 18.0,
"learning_rate": 1.5263157894736842e-05,
"loss": 0.3146,
"step": 2610
},
{
"epoch": 18.07,
"learning_rate": 1.4736842105263157e-05,
"loss": 0.1868,
"step": 2620
},
{
"epoch": 18.14,
"learning_rate": 1.4210526315789475e-05,
"loss": 0.1762,
"step": 2630
},
{
"epoch": 18.21,
"learning_rate": 1.3684210526315791e-05,
"loss": 0.2106,
"step": 2640
},
{
"epoch": 18.28,
"learning_rate": 1.3157894736842106e-05,
"loss": 0.2651,
"step": 2650
},
{
"epoch": 18.34,
"learning_rate": 1.2631578947368422e-05,
"loss": 0.1763,
"step": 2660
},
{
"epoch": 18.41,
"learning_rate": 1.2105263157894737e-05,
"loss": 0.1735,
"step": 2670
},
{
"epoch": 18.48,
"learning_rate": 1.1578947368421053e-05,
"loss": 0.2462,
"step": 2680
},
{
"epoch": 18.55,
"learning_rate": 1.1052631578947368e-05,
"loss": 0.2385,
"step": 2690
},
{
"epoch": 18.62,
"learning_rate": 1.0526315789473684e-05,
"loss": 0.1635,
"step": 2700
},
{
"epoch": 18.69,
"learning_rate": 1e-05,
"loss": 0.2076,
"step": 2710
},
{
"epoch": 18.76,
"learning_rate": 9.473684210526317e-06,
"loss": 0.2846,
"step": 2720
},
{
"epoch": 18.83,
"learning_rate": 8.947368421052632e-06,
"loss": 0.1578,
"step": 2730
},
{
"epoch": 18.9,
"learning_rate": 8.421052631578948e-06,
"loss": 0.1799,
"step": 2740
},
{
"epoch": 18.97,
"learning_rate": 7.894736842105263e-06,
"loss": 0.2316,
"step": 2750
},
{
"epoch": 19.03,
"learning_rate": 7.3684210526315784e-06,
"loss": 0.2804,
"step": 2760
},
{
"epoch": 19.1,
"learning_rate": 6.842105263157896e-06,
"loss": 0.1665,
"step": 2770
},
{
"epoch": 19.17,
"learning_rate": 6.315789473684211e-06,
"loss": 0.179,
"step": 2780
},
{
"epoch": 19.24,
"learning_rate": 5.789473684210527e-06,
"loss": 0.2641,
"step": 2790
},
{
"epoch": 19.31,
"learning_rate": 5.263157894736842e-06,
"loss": 0.1893,
"step": 2800
},
{
"epoch": 19.38,
"learning_rate": 4.736842105263159e-06,
"loss": 0.1525,
"step": 2810
},
{
"epoch": 19.45,
"learning_rate": 4.210526315789474e-06,
"loss": 0.2093,
"step": 2820
},
{
"epoch": 19.52,
"learning_rate": 3.6842105263157892e-06,
"loss": 0.2953,
"step": 2830
},
{
"epoch": 19.59,
"learning_rate": 3.1578947368421056e-06,
"loss": 0.1601,
"step": 2840
},
{
"epoch": 19.66,
"learning_rate": 2.631578947368421e-06,
"loss": 0.1738,
"step": 2850
},
{
"epoch": 19.72,
"learning_rate": 2.105263157894737e-06,
"loss": 0.2373,
"step": 2860
},
{
"epoch": 19.79,
"learning_rate": 1.5789473684210528e-06,
"loss": 0.2352,
"step": 2870
},
{
"epoch": 19.86,
"learning_rate": 1.0526315789473685e-06,
"loss": 0.1506,
"step": 2880
},
{
"epoch": 19.93,
"learning_rate": 5.263157894736843e-07,
"loss": 0.1906,
"step": 2890
},
{
"epoch": 20.0,
"learning_rate": 0.0,
"loss": 0.3036,
"step": 2900
},
{
"epoch": 20.0,
"eval_loss": 0.7549105882644653,
"eval_runtime": 29.6257,
"eval_samples_per_second": 56.708,
"eval_steps_per_second": 56.708,
"eval_wer": 0.4901798635517883,
"step": 2900
},
{
"epoch": 20.0,
"step": 2900,
"total_flos": 2.657192658080392e+18,
"train_loss": 0.9176692845903593,
"train_runtime": 459.1587,
"train_samples_per_second": 201.238,
"train_steps_per_second": 6.316
}
],
"logging_steps": 10,
"max_steps": 2900,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 1450,
"total_flos": 2.657192658080392e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}