wav2vec2-100m-mls-german-ft-2 / trainer_state.json
patrickvonplaten's picture
End of training
e85edf8
raw
history blame
41.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"global_step": 3500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.29,
"learning_rate": 4e-06,
"loss": 12.4261,
"step": 10
},
{
"epoch": 0.57,
"learning_rate": 8.500000000000002e-06,
"loss": 14.8659,
"step": 20
},
{
"epoch": 0.86,
"learning_rate": 1.35e-05,
"loss": 12.4176,
"step": 30
},
{
"epoch": 1.14,
"learning_rate": 1.8e-05,
"loss": 13.4075,
"step": 40
},
{
"epoch": 1.43,
"learning_rate": 2.3e-05,
"loss": 13.113,
"step": 50
},
{
"epoch": 1.71,
"learning_rate": 2.8e-05,
"loss": 12.0587,
"step": 60
},
{
"epoch": 2.0,
"learning_rate": 3.3e-05,
"loss": 11.7634,
"step": 70
},
{
"epoch": 2.29,
"learning_rate": 3.8e-05,
"loss": 10.2845,
"step": 80
},
{
"epoch": 2.57,
"learning_rate": 4.2999999999999995e-05,
"loss": 11.5741,
"step": 90
},
{
"epoch": 2.86,
"learning_rate": 4.8e-05,
"loss": 9.9064,
"step": 100
},
{
"epoch": 3.14,
"learning_rate": 5.3e-05,
"loss": 10.4647,
"step": 110
},
{
"epoch": 3.43,
"learning_rate": 5.800000000000001e-05,
"loss": 9.851,
"step": 120
},
{
"epoch": 3.71,
"learning_rate": 6.3e-05,
"loss": 10.0611,
"step": 130
},
{
"epoch": 4.0,
"learning_rate": 6.800000000000001e-05,
"loss": 9.3195,
"step": 140
},
{
"epoch": 4.29,
"learning_rate": 7.3e-05,
"loss": 8.3377,
"step": 150
},
{
"epoch": 4.57,
"learning_rate": 7.8e-05,
"loss": 8.9758,
"step": 160
},
{
"epoch": 4.86,
"learning_rate": 8.300000000000001e-05,
"loss": 7.912,
"step": 170
},
{
"epoch": 5.14,
"learning_rate": 8.8e-05,
"loss": 7.9751,
"step": 180
},
{
"epoch": 5.43,
"learning_rate": 9.3e-05,
"loss": 7.2092,
"step": 190
},
{
"epoch": 5.71,
"learning_rate": 9.800000000000001e-05,
"loss": 7.0987,
"step": 200
},
{
"epoch": 6.0,
"learning_rate": 0.000103,
"loss": 6.2911,
"step": 210
},
{
"epoch": 6.29,
"learning_rate": 0.000108,
"loss": 5.555,
"step": 220
},
{
"epoch": 6.57,
"learning_rate": 0.00011300000000000001,
"loss": 5.5446,
"step": 230
},
{
"epoch": 6.86,
"learning_rate": 0.000118,
"loss": 4.5362,
"step": 240
},
{
"epoch": 7.14,
"learning_rate": 0.000123,
"loss": 4.2736,
"step": 250
},
{
"epoch": 7.43,
"learning_rate": 0.000128,
"loss": 3.8207,
"step": 260
},
{
"epoch": 7.71,
"learning_rate": 0.000133,
"loss": 3.7274,
"step": 270
},
{
"epoch": 8.0,
"learning_rate": 0.00013800000000000002,
"loss": 3.3005,
"step": 280
},
{
"epoch": 8.29,
"learning_rate": 0.00014299999999999998,
"loss": 3.1548,
"step": 290
},
{
"epoch": 8.57,
"learning_rate": 0.000148,
"loss": 3.0679,
"step": 300
},
{
"epoch": 8.86,
"learning_rate": 0.000153,
"loss": 3.0221,
"step": 310
},
{
"epoch": 9.14,
"learning_rate": 0.000158,
"loss": 3.0757,
"step": 320
},
{
"epoch": 9.43,
"learning_rate": 0.000163,
"loss": 2.9748,
"step": 330
},
{
"epoch": 9.71,
"learning_rate": 0.00016800000000000002,
"loss": 2.9839,
"step": 340
},
{
"epoch": 10.0,
"learning_rate": 0.000173,
"loss": 2.9899,
"step": 350
},
{
"epoch": 10.29,
"learning_rate": 0.000178,
"loss": 2.9415,
"step": 360
},
{
"epoch": 10.57,
"learning_rate": 0.000183,
"loss": 3.0035,
"step": 370
},
{
"epoch": 10.86,
"learning_rate": 0.00018800000000000002,
"loss": 2.9654,
"step": 380
},
{
"epoch": 11.14,
"learning_rate": 0.000193,
"loss": 2.9752,
"step": 390
},
{
"epoch": 11.43,
"learning_rate": 0.00019800000000000002,
"loss": 2.945,
"step": 400
},
{
"epoch": 11.71,
"learning_rate": 0.00020300000000000003,
"loss": 3.0024,
"step": 410
},
{
"epoch": 12.0,
"learning_rate": 0.000208,
"loss": 2.9515,
"step": 420
},
{
"epoch": 12.29,
"learning_rate": 0.000213,
"loss": 2.9324,
"step": 430
},
{
"epoch": 12.57,
"learning_rate": 0.000218,
"loss": 2.9882,
"step": 440
},
{
"epoch": 12.86,
"learning_rate": 0.000223,
"loss": 2.9414,
"step": 450
},
{
"epoch": 13.14,
"learning_rate": 0.000228,
"loss": 2.9913,
"step": 460
},
{
"epoch": 13.43,
"learning_rate": 0.00023300000000000003,
"loss": 2.9644,
"step": 470
},
{
"epoch": 13.71,
"learning_rate": 0.00023799999999999998,
"loss": 2.9708,
"step": 480
},
{
"epoch": 14.0,
"learning_rate": 0.000243,
"loss": 2.9498,
"step": 490
},
{
"epoch": 14.29,
"learning_rate": 0.000248,
"loss": 2.9545,
"step": 500
},
{
"epoch": 14.29,
"eval_loss": 2.9353771209716797,
"eval_runtime": 66.3897,
"eval_samples_per_second": 51.122,
"eval_steps_per_second": 0.813,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 14.57,
"learning_rate": 0.000253,
"loss": 2.9787,
"step": 510
},
{
"epoch": 14.86,
"learning_rate": 0.00025800000000000004,
"loss": 2.9257,
"step": 520
},
{
"epoch": 15.14,
"learning_rate": 0.000263,
"loss": 2.9613,
"step": 530
},
{
"epoch": 15.43,
"learning_rate": 0.000268,
"loss": 2.9755,
"step": 540
},
{
"epoch": 15.71,
"learning_rate": 0.000273,
"loss": 2.958,
"step": 550
},
{
"epoch": 16.0,
"learning_rate": 0.00027800000000000004,
"loss": 2.9364,
"step": 560
},
{
"epoch": 16.29,
"learning_rate": 0.000283,
"loss": 2.9692,
"step": 570
},
{
"epoch": 16.57,
"learning_rate": 0.000288,
"loss": 2.9823,
"step": 580
},
{
"epoch": 16.86,
"learning_rate": 0.00029299999999999997,
"loss": 2.9235,
"step": 590
},
{
"epoch": 17.14,
"learning_rate": 0.000298,
"loss": 2.9359,
"step": 600
},
{
"epoch": 17.43,
"learning_rate": 0.000303,
"loss": 2.9438,
"step": 610
},
{
"epoch": 17.71,
"learning_rate": 0.000308,
"loss": 2.966,
"step": 620
},
{
"epoch": 18.0,
"learning_rate": 0.000313,
"loss": 2.9659,
"step": 630
},
{
"epoch": 18.29,
"learning_rate": 0.00031800000000000003,
"loss": 2.9329,
"step": 640
},
{
"epoch": 18.57,
"learning_rate": 0.000323,
"loss": 2.9605,
"step": 650
},
{
"epoch": 18.86,
"learning_rate": 0.000328,
"loss": 2.9415,
"step": 660
},
{
"epoch": 19.14,
"learning_rate": 0.000333,
"loss": 2.9615,
"step": 670
},
{
"epoch": 19.43,
"learning_rate": 0.00033800000000000003,
"loss": 2.9254,
"step": 680
},
{
"epoch": 19.71,
"learning_rate": 0.00034300000000000004,
"loss": 2.9979,
"step": 690
},
{
"epoch": 20.0,
"learning_rate": 0.000348,
"loss": 2.9817,
"step": 700
},
{
"epoch": 20.29,
"learning_rate": 0.00035299999999999996,
"loss": 2.9375,
"step": 710
},
{
"epoch": 20.57,
"learning_rate": 0.000358,
"loss": 2.9355,
"step": 720
},
{
"epoch": 20.86,
"learning_rate": 0.000363,
"loss": 2.9937,
"step": 730
},
{
"epoch": 21.14,
"learning_rate": 0.000368,
"loss": 2.943,
"step": 740
},
{
"epoch": 21.43,
"learning_rate": 0.000373,
"loss": 2.9456,
"step": 750
},
{
"epoch": 21.71,
"learning_rate": 0.000378,
"loss": 2.9607,
"step": 760
},
{
"epoch": 22.0,
"learning_rate": 0.00038300000000000004,
"loss": 2.9646,
"step": 770
},
{
"epoch": 22.29,
"learning_rate": 0.000388,
"loss": 2.9157,
"step": 780
},
{
"epoch": 22.57,
"learning_rate": 0.000393,
"loss": 2.9803,
"step": 790
},
{
"epoch": 22.86,
"learning_rate": 0.000398,
"loss": 2.9481,
"step": 800
},
{
"epoch": 23.14,
"learning_rate": 0.00040300000000000004,
"loss": 3.007,
"step": 810
},
{
"epoch": 23.43,
"learning_rate": 0.000408,
"loss": 2.9522,
"step": 820
},
{
"epoch": 23.71,
"learning_rate": 0.000413,
"loss": 2.9632,
"step": 830
},
{
"epoch": 24.0,
"learning_rate": 0.00041799999999999997,
"loss": 2.947,
"step": 840
},
{
"epoch": 24.29,
"learning_rate": 0.000423,
"loss": 2.929,
"step": 850
},
{
"epoch": 24.57,
"learning_rate": 0.000428,
"loss": 2.9513,
"step": 860
},
{
"epoch": 24.86,
"learning_rate": 0.000433,
"loss": 2.9646,
"step": 870
},
{
"epoch": 25.14,
"learning_rate": 0.000438,
"loss": 2.9381,
"step": 880
},
{
"epoch": 25.43,
"learning_rate": 0.00044300000000000003,
"loss": 2.931,
"step": 890
},
{
"epoch": 25.71,
"learning_rate": 0.000448,
"loss": 2.9765,
"step": 900
},
{
"epoch": 26.0,
"learning_rate": 0.000453,
"loss": 2.9666,
"step": 910
},
{
"epoch": 26.29,
"learning_rate": 0.000458,
"loss": 2.9378,
"step": 920
},
{
"epoch": 26.57,
"learning_rate": 0.00046300000000000003,
"loss": 2.9663,
"step": 930
},
{
"epoch": 26.86,
"learning_rate": 0.00046800000000000005,
"loss": 2.9334,
"step": 940
},
{
"epoch": 27.14,
"learning_rate": 0.000473,
"loss": 2.9717,
"step": 950
},
{
"epoch": 27.43,
"learning_rate": 0.00047799999999999996,
"loss": 2.9325,
"step": 960
},
{
"epoch": 27.71,
"learning_rate": 0.000483,
"loss": 2.9502,
"step": 970
},
{
"epoch": 28.0,
"learning_rate": 0.000488,
"loss": 2.9664,
"step": 980
},
{
"epoch": 28.29,
"learning_rate": 0.0004930000000000001,
"loss": 2.9713,
"step": 990
},
{
"epoch": 28.57,
"learning_rate": 0.000498,
"loss": 2.9537,
"step": 1000
},
{
"epoch": 28.57,
"eval_loss": 2.9358763694763184,
"eval_runtime": 65.8509,
"eval_samples_per_second": 51.541,
"eval_steps_per_second": 0.82,
"eval_wer": 1.0,
"step": 1000
},
{
"epoch": 28.86,
"learning_rate": 0.0004988,
"loss": 2.9211,
"step": 1010
},
{
"epoch": 29.14,
"learning_rate": 0.0004968,
"loss": 2.9524,
"step": 1020
},
{
"epoch": 29.43,
"learning_rate": 0.0004948,
"loss": 2.9605,
"step": 1030
},
{
"epoch": 29.71,
"learning_rate": 0.0004928,
"loss": 2.9451,
"step": 1040
},
{
"epoch": 30.0,
"learning_rate": 0.0004908,
"loss": 2.9759,
"step": 1050
},
{
"epoch": 30.29,
"learning_rate": 0.0004888000000000001,
"loss": 2.9576,
"step": 1060
},
{
"epoch": 30.57,
"learning_rate": 0.0004868,
"loss": 2.9476,
"step": 1070
},
{
"epoch": 30.86,
"learning_rate": 0.0004848,
"loss": 2.9421,
"step": 1080
},
{
"epoch": 31.14,
"learning_rate": 0.0004828,
"loss": 2.9508,
"step": 1090
},
{
"epoch": 31.43,
"learning_rate": 0.00048080000000000003,
"loss": 2.9596,
"step": 1100
},
{
"epoch": 31.71,
"learning_rate": 0.00047880000000000004,
"loss": 2.9904,
"step": 1110
},
{
"epoch": 32.0,
"learning_rate": 0.0004768,
"loss": 2.9336,
"step": 1120
},
{
"epoch": 32.29,
"learning_rate": 0.0004748,
"loss": 2.9416,
"step": 1130
},
{
"epoch": 32.57,
"learning_rate": 0.0004728,
"loss": 2.9831,
"step": 1140
},
{
"epoch": 32.86,
"learning_rate": 0.0004708,
"loss": 2.927,
"step": 1150
},
{
"epoch": 33.14,
"learning_rate": 0.0004688,
"loss": 2.9632,
"step": 1160
},
{
"epoch": 33.43,
"learning_rate": 0.0004668,
"loss": 2.9518,
"step": 1170
},
{
"epoch": 33.71,
"learning_rate": 0.0004648,
"loss": 2.9453,
"step": 1180
},
{
"epoch": 34.0,
"learning_rate": 0.0004628,
"loss": 2.9684,
"step": 1190
},
{
"epoch": 34.29,
"learning_rate": 0.0004608,
"loss": 2.9251,
"step": 1200
},
{
"epoch": 34.57,
"learning_rate": 0.0004588,
"loss": 2.9602,
"step": 1210
},
{
"epoch": 34.86,
"learning_rate": 0.0004568,
"loss": 2.9548,
"step": 1220
},
{
"epoch": 35.14,
"learning_rate": 0.0004548,
"loss": 2.9687,
"step": 1230
},
{
"epoch": 35.43,
"learning_rate": 0.0004528,
"loss": 2.9727,
"step": 1240
},
{
"epoch": 35.71,
"learning_rate": 0.0004508,
"loss": 2.9759,
"step": 1250
},
{
"epoch": 36.0,
"learning_rate": 0.0004488,
"loss": 2.9311,
"step": 1260
},
{
"epoch": 36.29,
"learning_rate": 0.00044679999999999996,
"loss": 2.9056,
"step": 1270
},
{
"epoch": 36.57,
"learning_rate": 0.00044479999999999997,
"loss": 2.9899,
"step": 1280
},
{
"epoch": 36.86,
"learning_rate": 0.00044280000000000003,
"loss": 2.9501,
"step": 1290
},
{
"epoch": 37.14,
"learning_rate": 0.00044080000000000004,
"loss": 2.9453,
"step": 1300
},
{
"epoch": 37.43,
"learning_rate": 0.00043880000000000004,
"loss": 2.9338,
"step": 1310
},
{
"epoch": 37.71,
"learning_rate": 0.00043680000000000005,
"loss": 2.9629,
"step": 1320
},
{
"epoch": 38.0,
"learning_rate": 0.00043480000000000005,
"loss": 2.9897,
"step": 1330
},
{
"epoch": 38.29,
"learning_rate": 0.0004328,
"loss": 2.9602,
"step": 1340
},
{
"epoch": 38.57,
"learning_rate": 0.0004308,
"loss": 2.9804,
"step": 1350
},
{
"epoch": 38.86,
"learning_rate": 0.0004288,
"loss": 2.9159,
"step": 1360
},
{
"epoch": 39.14,
"learning_rate": 0.0004268,
"loss": 2.9451,
"step": 1370
},
{
"epoch": 39.43,
"learning_rate": 0.0004248,
"loss": 2.9379,
"step": 1380
},
{
"epoch": 39.71,
"learning_rate": 0.00042280000000000003,
"loss": 2.9911,
"step": 1390
},
{
"epoch": 40.0,
"learning_rate": 0.00042080000000000004,
"loss": 2.9358,
"step": 1400
},
{
"epoch": 40.29,
"learning_rate": 0.0004188,
"loss": 2.9187,
"step": 1410
},
{
"epoch": 40.57,
"learning_rate": 0.0004168,
"loss": 3.0139,
"step": 1420
},
{
"epoch": 40.86,
"learning_rate": 0.0004148,
"loss": 2.9355,
"step": 1430
},
{
"epoch": 41.14,
"learning_rate": 0.0004128,
"loss": 2.9418,
"step": 1440
},
{
"epoch": 41.43,
"learning_rate": 0.0004108,
"loss": 2.9447,
"step": 1450
},
{
"epoch": 41.71,
"learning_rate": 0.0004088,
"loss": 2.9398,
"step": 1460
},
{
"epoch": 42.0,
"learning_rate": 0.0004068,
"loss": 2.9802,
"step": 1470
},
{
"epoch": 42.29,
"learning_rate": 0.00040480000000000003,
"loss": 2.9597,
"step": 1480
},
{
"epoch": 42.57,
"learning_rate": 0.0004028,
"loss": 2.9553,
"step": 1490
},
{
"epoch": 42.86,
"learning_rate": 0.0004008,
"loss": 2.9602,
"step": 1500
},
{
"epoch": 42.86,
"eval_loss": 2.9302072525024414,
"eval_runtime": 66.1205,
"eval_samples_per_second": 51.331,
"eval_steps_per_second": 0.817,
"eval_wer": 1.0,
"step": 1500
},
{
"epoch": 43.14,
"learning_rate": 0.0003988,
"loss": 2.9544,
"step": 1510
},
{
"epoch": 43.43,
"learning_rate": 0.0003968,
"loss": 2.9437,
"step": 1520
},
{
"epoch": 43.71,
"learning_rate": 0.0003948,
"loss": 2.942,
"step": 1530
},
{
"epoch": 44.0,
"learning_rate": 0.0003928,
"loss": 2.9629,
"step": 1540
},
{
"epoch": 44.29,
"learning_rate": 0.0003908,
"loss": 2.9859,
"step": 1550
},
{
"epoch": 44.57,
"learning_rate": 0.00038879999999999996,
"loss": 2.9469,
"step": 1560
},
{
"epoch": 44.86,
"learning_rate": 0.00038679999999999997,
"loss": 2.9093,
"step": 1570
},
{
"epoch": 45.14,
"learning_rate": 0.0003848,
"loss": 2.9465,
"step": 1580
},
{
"epoch": 45.43,
"learning_rate": 0.0003828,
"loss": 2.9415,
"step": 1590
},
{
"epoch": 45.71,
"learning_rate": 0.00038080000000000004,
"loss": 2.984,
"step": 1600
},
{
"epoch": 46.0,
"learning_rate": 0.00037880000000000005,
"loss": 2.9673,
"step": 1610
},
{
"epoch": 46.29,
"learning_rate": 0.00037680000000000005,
"loss": 2.9158,
"step": 1620
},
{
"epoch": 46.57,
"learning_rate": 0.0003748,
"loss": 2.9977,
"step": 1630
},
{
"epoch": 46.86,
"learning_rate": 0.0003728,
"loss": 2.9243,
"step": 1640
},
{
"epoch": 47.14,
"learning_rate": 0.0003708,
"loss": 2.9622,
"step": 1650
},
{
"epoch": 47.43,
"learning_rate": 0.0003688,
"loss": 2.9526,
"step": 1660
},
{
"epoch": 47.71,
"learning_rate": 0.0003668,
"loss": 2.97,
"step": 1670
},
{
"epoch": 48.0,
"learning_rate": 0.00036480000000000003,
"loss": 2.9454,
"step": 1680
},
{
"epoch": 48.29,
"learning_rate": 0.00036280000000000004,
"loss": 2.9616,
"step": 1690
},
{
"epoch": 48.57,
"learning_rate": 0.00036080000000000004,
"loss": 2.9774,
"step": 1700
},
{
"epoch": 48.86,
"learning_rate": 0.0003588,
"loss": 2.9154,
"step": 1710
},
{
"epoch": 49.14,
"learning_rate": 0.0003568,
"loss": 2.9377,
"step": 1720
},
{
"epoch": 49.43,
"learning_rate": 0.0003548,
"loss": 2.9443,
"step": 1730
},
{
"epoch": 49.71,
"learning_rate": 0.0003528,
"loss": 2.9345,
"step": 1740
},
{
"epoch": 50.0,
"learning_rate": 0.0003508,
"loss": 2.9977,
"step": 1750
},
{
"epoch": 50.29,
"learning_rate": 0.0003488,
"loss": 2.9369,
"step": 1760
},
{
"epoch": 50.57,
"learning_rate": 0.0003468,
"loss": 2.9816,
"step": 1770
},
{
"epoch": 50.86,
"learning_rate": 0.0003448,
"loss": 2.9333,
"step": 1780
},
{
"epoch": 51.14,
"learning_rate": 0.0003428,
"loss": 2.9837,
"step": 1790
},
{
"epoch": 51.43,
"learning_rate": 0.0003408,
"loss": 2.9518,
"step": 1800
},
{
"epoch": 51.71,
"learning_rate": 0.0003388,
"loss": 2.9514,
"step": 1810
},
{
"epoch": 52.0,
"learning_rate": 0.0003368,
"loss": 2.9507,
"step": 1820
},
{
"epoch": 52.29,
"learning_rate": 0.0003348,
"loss": 2.9371,
"step": 1830
},
{
"epoch": 52.57,
"learning_rate": 0.0003328,
"loss": 2.9755,
"step": 1840
},
{
"epoch": 52.86,
"learning_rate": 0.00033079999999999996,
"loss": 2.9404,
"step": 1850
},
{
"epoch": 53.14,
"learning_rate": 0.00032879999999999997,
"loss": 2.9284,
"step": 1860
},
{
"epoch": 53.43,
"learning_rate": 0.0003268,
"loss": 2.9372,
"step": 1870
},
{
"epoch": 53.71,
"learning_rate": 0.0003248,
"loss": 2.9748,
"step": 1880
},
{
"epoch": 54.0,
"learning_rate": 0.0003228,
"loss": 2.9814,
"step": 1890
},
{
"epoch": 54.29,
"learning_rate": 0.0003208,
"loss": 2.9361,
"step": 1900
},
{
"epoch": 54.57,
"learning_rate": 0.0003188,
"loss": 2.9629,
"step": 1910
},
{
"epoch": 54.86,
"learning_rate": 0.00031680000000000006,
"loss": 2.9547,
"step": 1920
},
{
"epoch": 55.14,
"learning_rate": 0.0003148,
"loss": 2.914,
"step": 1930
},
{
"epoch": 55.43,
"learning_rate": 0.0003128,
"loss": 2.9773,
"step": 1940
},
{
"epoch": 55.71,
"learning_rate": 0.0003108,
"loss": 2.9639,
"step": 1950
},
{
"epoch": 56.0,
"learning_rate": 0.0003088,
"loss": 2.9579,
"step": 1960
},
{
"epoch": 56.29,
"learning_rate": 0.00030680000000000003,
"loss": 2.9494,
"step": 1970
},
{
"epoch": 56.57,
"learning_rate": 0.00030480000000000004,
"loss": 2.9539,
"step": 1980
},
{
"epoch": 56.86,
"learning_rate": 0.00030280000000000004,
"loss": 2.9248,
"step": 1990
},
{
"epoch": 57.14,
"learning_rate": 0.0003008,
"loss": 2.9586,
"step": 2000
},
{
"epoch": 57.14,
"eval_loss": 2.9298441410064697,
"eval_runtime": 65.7809,
"eval_samples_per_second": 51.596,
"eval_steps_per_second": 0.821,
"eval_wer": 1.0,
"step": 2000
},
{
"epoch": 57.43,
"learning_rate": 0.0002988,
"loss": 2.9489,
"step": 2010
},
{
"epoch": 57.71,
"learning_rate": 0.0002968,
"loss": 2.9719,
"step": 2020
},
{
"epoch": 58.0,
"learning_rate": 0.0002948,
"loss": 2.9337,
"step": 2030
},
{
"epoch": 58.29,
"learning_rate": 0.0002928,
"loss": 2.9317,
"step": 2040
},
{
"epoch": 58.57,
"learning_rate": 0.0002908,
"loss": 2.9499,
"step": 2050
},
{
"epoch": 58.86,
"learning_rate": 0.0002888,
"loss": 2.9416,
"step": 2060
},
{
"epoch": 59.14,
"learning_rate": 0.00028680000000000003,
"loss": 2.9647,
"step": 2070
},
{
"epoch": 59.43,
"learning_rate": 0.0002848,
"loss": 2.971,
"step": 2080
},
{
"epoch": 59.71,
"learning_rate": 0.0002828,
"loss": 2.9469,
"step": 2090
},
{
"epoch": 60.0,
"learning_rate": 0.0002808,
"loss": 2.9519,
"step": 2100
},
{
"epoch": 60.29,
"learning_rate": 0.0002788,
"loss": 2.923,
"step": 2110
},
{
"epoch": 60.57,
"learning_rate": 0.0002768,
"loss": 2.9823,
"step": 2120
},
{
"epoch": 60.86,
"learning_rate": 0.0002748,
"loss": 2.9494,
"step": 2130
},
{
"epoch": 61.14,
"learning_rate": 0.0002728,
"loss": 2.9558,
"step": 2140
},
{
"epoch": 61.43,
"learning_rate": 0.00027079999999999997,
"loss": 2.9288,
"step": 2150
},
{
"epoch": 61.71,
"learning_rate": 0.0002688,
"loss": 2.9741,
"step": 2160
},
{
"epoch": 62.0,
"learning_rate": 0.0002668,
"loss": 2.9881,
"step": 2170
},
{
"epoch": 62.29,
"learning_rate": 0.0002648,
"loss": 2.9711,
"step": 2180
},
{
"epoch": 62.57,
"learning_rate": 0.0002628,
"loss": 2.9422,
"step": 2190
},
{
"epoch": 62.86,
"learning_rate": 0.0002608,
"loss": 2.9263,
"step": 2200
},
{
"epoch": 63.14,
"learning_rate": 0.0002588,
"loss": 2.9625,
"step": 2210
},
{
"epoch": 63.43,
"learning_rate": 0.00025679999999999995,
"loss": 2.9551,
"step": 2220
},
{
"epoch": 63.71,
"learning_rate": 0.0002548,
"loss": 2.9793,
"step": 2230
},
{
"epoch": 64.0,
"learning_rate": 0.0002528,
"loss": 2.9382,
"step": 2240
},
{
"epoch": 64.29,
"learning_rate": 0.0002508,
"loss": 2.924,
"step": 2250
},
{
"epoch": 64.57,
"learning_rate": 0.0002488,
"loss": 2.9732,
"step": 2260
},
{
"epoch": 64.86,
"learning_rate": 0.0002468,
"loss": 2.9312,
"step": 2270
},
{
"epoch": 65.14,
"learning_rate": 0.0002448,
"loss": 2.9755,
"step": 2280
},
{
"epoch": 65.43,
"learning_rate": 0.0002428,
"loss": 2.9621,
"step": 2290
},
{
"epoch": 65.71,
"learning_rate": 0.0002408,
"loss": 2.9642,
"step": 2300
},
{
"epoch": 66.0,
"learning_rate": 0.0002388,
"loss": 2.9313,
"step": 2310
},
{
"epoch": 66.29,
"learning_rate": 0.0002368,
"loss": 2.9545,
"step": 2320
},
{
"epoch": 66.57,
"learning_rate": 0.00023480000000000002,
"loss": 2.9405,
"step": 2330
},
{
"epoch": 66.86,
"learning_rate": 0.00023280000000000002,
"loss": 2.944,
"step": 2340
},
{
"epoch": 67.14,
"learning_rate": 0.0002308,
"loss": 2.9489,
"step": 2350
},
{
"epoch": 67.43,
"learning_rate": 0.0002288,
"loss": 2.9503,
"step": 2360
},
{
"epoch": 67.71,
"learning_rate": 0.0002268,
"loss": 2.9919,
"step": 2370
},
{
"epoch": 68.0,
"learning_rate": 0.00022480000000000002,
"loss": 2.9345,
"step": 2380
},
{
"epoch": 68.29,
"learning_rate": 0.0002228,
"loss": 2.9068,
"step": 2390
},
{
"epoch": 68.57,
"learning_rate": 0.0002208,
"loss": 2.98,
"step": 2400
},
{
"epoch": 68.86,
"learning_rate": 0.0002188,
"loss": 2.9278,
"step": 2410
},
{
"epoch": 69.14,
"learning_rate": 0.00021679999999999998,
"loss": 2.992,
"step": 2420
},
{
"epoch": 69.43,
"learning_rate": 0.0002148,
"loss": 2.9384,
"step": 2430
},
{
"epoch": 69.71,
"learning_rate": 0.0002128,
"loss": 2.9699,
"step": 2440
},
{
"epoch": 70.0,
"learning_rate": 0.0002108,
"loss": 2.9611,
"step": 2450
},
{
"epoch": 70.29,
"learning_rate": 0.0002088,
"loss": 2.9521,
"step": 2460
},
{
"epoch": 70.57,
"learning_rate": 0.0002068,
"loss": 2.9557,
"step": 2470
},
{
"epoch": 70.86,
"learning_rate": 0.00020480000000000002,
"loss": 2.9253,
"step": 2480
},
{
"epoch": 71.14,
"learning_rate": 0.00020280000000000002,
"loss": 2.9362,
"step": 2490
},
{
"epoch": 71.43,
"learning_rate": 0.0002008,
"loss": 2.9331,
"step": 2500
},
{
"epoch": 71.43,
"eval_loss": 2.9313948154449463,
"eval_runtime": 65.7925,
"eval_samples_per_second": 51.586,
"eval_steps_per_second": 0.821,
"eval_wer": 1.0,
"step": 2500
},
{
"epoch": 71.71,
"learning_rate": 0.0001988,
"loss": 3.0128,
"step": 2510
},
{
"epoch": 72.0,
"learning_rate": 0.0001968,
"loss": 2.9246,
"step": 2520
},
{
"epoch": 72.29,
"learning_rate": 0.0001948,
"loss": 2.94,
"step": 2530
},
{
"epoch": 72.57,
"learning_rate": 0.0001928,
"loss": 2.9585,
"step": 2540
},
{
"epoch": 72.86,
"learning_rate": 0.0001908,
"loss": 2.9438,
"step": 2550
},
{
"epoch": 73.14,
"learning_rate": 0.0001888,
"loss": 2.9463,
"step": 2560
},
{
"epoch": 73.43,
"learning_rate": 0.0001868,
"loss": 2.9362,
"step": 2570
},
{
"epoch": 73.71,
"learning_rate": 0.0001848,
"loss": 2.9395,
"step": 2580
},
{
"epoch": 74.0,
"learning_rate": 0.0001828,
"loss": 2.9878,
"step": 2590
},
{
"epoch": 74.29,
"learning_rate": 0.0001808,
"loss": 2.9226,
"step": 2600
},
{
"epoch": 74.57,
"learning_rate": 0.00017879999999999998,
"loss": 2.9537,
"step": 2610
},
{
"epoch": 74.86,
"learning_rate": 0.00017680000000000001,
"loss": 2.9453,
"step": 2620
},
{
"epoch": 75.14,
"learning_rate": 0.00017480000000000002,
"loss": 2.9661,
"step": 2630
},
{
"epoch": 75.43,
"learning_rate": 0.00017280000000000003,
"loss": 2.9578,
"step": 2640
},
{
"epoch": 75.71,
"learning_rate": 0.0001708,
"loss": 2.9374,
"step": 2650
},
{
"epoch": 76.0,
"learning_rate": 0.0001688,
"loss": 2.9715,
"step": 2660
},
{
"epoch": 76.29,
"learning_rate": 0.00016680000000000002,
"loss": 2.9757,
"step": 2670
},
{
"epoch": 76.57,
"learning_rate": 0.0001648,
"loss": 2.9317,
"step": 2680
},
{
"epoch": 76.86,
"learning_rate": 0.0001628,
"loss": 2.9299,
"step": 2690
},
{
"epoch": 77.14,
"learning_rate": 0.0001608,
"loss": 2.9397,
"step": 2700
},
{
"epoch": 77.43,
"learning_rate": 0.0001588,
"loss": 2.9614,
"step": 2710
},
{
"epoch": 77.71,
"learning_rate": 0.0001568,
"loss": 2.9265,
"step": 2720
},
{
"epoch": 78.0,
"learning_rate": 0.0001548,
"loss": 2.9905,
"step": 2730
},
{
"epoch": 78.29,
"learning_rate": 0.0001528,
"loss": 2.9141,
"step": 2740
},
{
"epoch": 78.57,
"learning_rate": 0.00015079999999999998,
"loss": 2.9771,
"step": 2750
},
{
"epoch": 78.86,
"learning_rate": 0.00014879999999999998,
"loss": 2.9364,
"step": 2760
},
{
"epoch": 79.14,
"learning_rate": 0.00014680000000000002,
"loss": 2.9487,
"step": 2770
},
{
"epoch": 79.43,
"learning_rate": 0.00014480000000000002,
"loss": 2.9566,
"step": 2780
},
{
"epoch": 79.71,
"learning_rate": 0.0001428,
"loss": 2.9481,
"step": 2790
},
{
"epoch": 80.0,
"learning_rate": 0.0001408,
"loss": 2.9693,
"step": 2800
},
{
"epoch": 80.29,
"learning_rate": 0.0001388,
"loss": 2.9414,
"step": 2810
},
{
"epoch": 80.57,
"learning_rate": 0.00013680000000000002,
"loss": 2.9957,
"step": 2820
},
{
"epoch": 80.86,
"learning_rate": 0.0001348,
"loss": 2.934,
"step": 2830
},
{
"epoch": 81.14,
"learning_rate": 0.0001328,
"loss": 2.9112,
"step": 2840
},
{
"epoch": 81.43,
"learning_rate": 0.0001308,
"loss": 2.9581,
"step": 2850
},
{
"epoch": 81.71,
"learning_rate": 0.00012880000000000001,
"loss": 2.9418,
"step": 2860
},
{
"epoch": 82.0,
"learning_rate": 0.0001268,
"loss": 2.977,
"step": 2870
},
{
"epoch": 82.29,
"learning_rate": 0.0001248,
"loss": 2.9398,
"step": 2880
},
{
"epoch": 82.57,
"learning_rate": 0.0001228,
"loss": 2.9491,
"step": 2890
},
{
"epoch": 82.86,
"learning_rate": 0.00012080000000000001,
"loss": 2.9556,
"step": 2900
},
{
"epoch": 83.14,
"learning_rate": 0.0001188,
"loss": 2.9354,
"step": 2910
},
{
"epoch": 83.43,
"learning_rate": 0.0001168,
"loss": 2.9571,
"step": 2920
},
{
"epoch": 83.71,
"learning_rate": 0.0001148,
"loss": 2.934,
"step": 2930
},
{
"epoch": 84.0,
"learning_rate": 0.0001128,
"loss": 2.9884,
"step": 2940
},
{
"epoch": 84.29,
"learning_rate": 0.0001108,
"loss": 2.9358,
"step": 2950
},
{
"epoch": 84.57,
"learning_rate": 0.0001088,
"loss": 2.9325,
"step": 2960
},
{
"epoch": 84.86,
"learning_rate": 0.00010680000000000001,
"loss": 2.9796,
"step": 2970
},
{
"epoch": 85.14,
"learning_rate": 0.00010480000000000001,
"loss": 2.9281,
"step": 2980
},
{
"epoch": 85.43,
"learning_rate": 0.0001028,
"loss": 2.9646,
"step": 2990
},
{
"epoch": 85.71,
"learning_rate": 0.0001008,
"loss": 2.9321,
"step": 3000
},
{
"epoch": 85.71,
"eval_loss": 2.9303746223449707,
"eval_runtime": 66.1157,
"eval_samples_per_second": 51.334,
"eval_steps_per_second": 0.817,
"eval_wer": 1.0,
"step": 3000
},
{
"epoch": 86.0,
"learning_rate": 9.88e-05,
"loss": 2.9703,
"step": 3010
},
{
"epoch": 86.29,
"learning_rate": 9.68e-05,
"loss": 2.9676,
"step": 3020
},
{
"epoch": 86.57,
"learning_rate": 9.48e-05,
"loss": 2.9489,
"step": 3030
},
{
"epoch": 86.86,
"learning_rate": 9.279999999999999e-05,
"loss": 2.927,
"step": 3040
},
{
"epoch": 87.14,
"learning_rate": 9.080000000000001e-05,
"loss": 2.9381,
"step": 3050
},
{
"epoch": 87.43,
"learning_rate": 8.88e-05,
"loss": 2.9631,
"step": 3060
},
{
"epoch": 87.71,
"learning_rate": 8.680000000000001e-05,
"loss": 2.9556,
"step": 3070
},
{
"epoch": 88.0,
"learning_rate": 8.48e-05,
"loss": 2.9472,
"step": 3080
},
{
"epoch": 88.29,
"learning_rate": 8.280000000000001e-05,
"loss": 2.9214,
"step": 3090
},
{
"epoch": 88.57,
"learning_rate": 8.08e-05,
"loss": 2.9877,
"step": 3100
},
{
"epoch": 88.86,
"learning_rate": 7.879999999999999e-05,
"loss": 2.9664,
"step": 3110
},
{
"epoch": 89.14,
"learning_rate": 7.68e-05,
"loss": 2.9161,
"step": 3120
},
{
"epoch": 89.43,
"learning_rate": 7.48e-05,
"loss": 2.9676,
"step": 3130
},
{
"epoch": 89.71,
"learning_rate": 7.280000000000001e-05,
"loss": 2.9451,
"step": 3140
},
{
"epoch": 90.0,
"learning_rate": 7.08e-05,
"loss": 2.956,
"step": 3150
},
{
"epoch": 90.29,
"learning_rate": 6.88e-05,
"loss": 2.9261,
"step": 3160
},
{
"epoch": 90.57,
"learning_rate": 6.68e-05,
"loss": 2.9622,
"step": 3170
},
{
"epoch": 90.86,
"learning_rate": 6.48e-05,
"loss": 2.9435,
"step": 3180
},
{
"epoch": 91.14,
"learning_rate": 6.28e-05,
"loss": 2.9906,
"step": 3190
},
{
"epoch": 91.43,
"learning_rate": 6.08e-05,
"loss": 2.925,
"step": 3200
},
{
"epoch": 91.71,
"learning_rate": 5.88e-05,
"loss": 2.9326,
"step": 3210
},
{
"epoch": 92.0,
"learning_rate": 5.6800000000000005e-05,
"loss": 2.9782,
"step": 3220
},
{
"epoch": 92.29,
"learning_rate": 5.4800000000000004e-05,
"loss": 2.9054,
"step": 3230
},
{
"epoch": 92.57,
"learning_rate": 5.28e-05,
"loss": 2.9512,
"step": 3240
},
{
"epoch": 92.86,
"learning_rate": 5.08e-05,
"loss": 2.9623,
"step": 3250
},
{
"epoch": 93.14,
"learning_rate": 4.880000000000001e-05,
"loss": 2.9675,
"step": 3260
},
{
"epoch": 93.43,
"learning_rate": 4.68e-05,
"loss": 2.9418,
"step": 3270
},
{
"epoch": 93.71,
"learning_rate": 4.48e-05,
"loss": 2.9781,
"step": 3280
},
{
"epoch": 94.0,
"learning_rate": 4.28e-05,
"loss": 2.929,
"step": 3290
},
{
"epoch": 94.29,
"learning_rate": 4.08e-05,
"loss": 2.9389,
"step": 3300
},
{
"epoch": 94.57,
"learning_rate": 3.88e-05,
"loss": 2.9574,
"step": 3310
},
{
"epoch": 94.86,
"learning_rate": 3.68e-05,
"loss": 2.9591,
"step": 3320
},
{
"epoch": 95.14,
"learning_rate": 3.48e-05,
"loss": 2.9468,
"step": 3330
},
{
"epoch": 95.43,
"learning_rate": 3.2800000000000004e-05,
"loss": 2.9147,
"step": 3340
},
{
"epoch": 95.71,
"learning_rate": 3.08e-05,
"loss": 2.9729,
"step": 3350
},
{
"epoch": 96.0,
"learning_rate": 2.88e-05,
"loss": 2.9574,
"step": 3360
},
{
"epoch": 96.29,
"learning_rate": 2.68e-05,
"loss": 2.9449,
"step": 3370
},
{
"epoch": 96.57,
"learning_rate": 2.48e-05,
"loss": 2.9767,
"step": 3380
},
{
"epoch": 96.86,
"learning_rate": 2.2800000000000002e-05,
"loss": 2.9346,
"step": 3390
},
{
"epoch": 97.14,
"learning_rate": 2.08e-05,
"loss": 2.9423,
"step": 3400
},
{
"epoch": 97.43,
"learning_rate": 1.88e-05,
"loss": 2.9399,
"step": 3410
},
{
"epoch": 97.71,
"learning_rate": 1.68e-05,
"loss": 2.9729,
"step": 3420
},
{
"epoch": 98.0,
"learning_rate": 1.48e-05,
"loss": 2.9432,
"step": 3430
},
{
"epoch": 98.29,
"learning_rate": 1.2800000000000001e-05,
"loss": 2.9322,
"step": 3440
},
{
"epoch": 98.57,
"learning_rate": 1.0800000000000002e-05,
"loss": 2.989,
"step": 3450
},
{
"epoch": 98.86,
"learning_rate": 8.8e-06,
"loss": 2.9351,
"step": 3460
},
{
"epoch": 99.14,
"learning_rate": 6.8e-06,
"loss": 2.9286,
"step": 3470
},
{
"epoch": 99.43,
"learning_rate": 4.8e-06,
"loss": 2.9326,
"step": 3480
},
{
"epoch": 99.71,
"learning_rate": 2.8e-06,
"loss": 2.962,
"step": 3490
},
{
"epoch": 100.0,
"learning_rate": 8.000000000000001e-07,
"loss": 2.9652,
"step": 3500
},
{
"epoch": 100.0,
"eval_loss": 2.9303946495056152,
"eval_runtime": 66.0696,
"eval_samples_per_second": 51.37,
"eval_steps_per_second": 0.817,
"eval_wer": 1.0,
"step": 3500
},
{
"epoch": 100.0,
"step": 3500,
"total_flos": 3.191123523691086e+19,
"train_loss": 3.4216162567138673,
"train_runtime": 5111.6283,
"train_samples_per_second": 42.922,
"train_steps_per_second": 0.685
}
],
"max_steps": 3500,
"num_train_epochs": 100,
"total_flos": 3.191123523691086e+19,
"trial_name": null,
"trial_params": null
}