whisper-small-dialect_levantine / trainer_state.json
otozz's picture
Upload 9 files
8c14b5c verified
raw history blame
No virus
33.3 kB
{
"best_metric": 0.7635990420800547,
"best_model_checkpoint": "./whisper-small-dialect_levantine/checkpoint-4000",
"epoch": 3.5087719298245617,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 105.01622009277344,
"learning_rate": 5.000000000000001e-07,
"loss": 4.4756,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 38.415096282958984,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.8473,
"step": 50
},
{
"epoch": 0.05,
"grad_norm": 32.70545959472656,
"learning_rate": 1.5e-06,
"loss": 3.1106,
"step": 75
},
{
"epoch": 0.07,
"grad_norm": 24.912517547607422,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.3441,
"step": 100
},
{
"epoch": 0.09,
"grad_norm": 21.164583206176758,
"learning_rate": 2.5e-06,
"loss": 2.0976,
"step": 125
},
{
"epoch": 0.11,
"grad_norm": 28.40788459777832,
"learning_rate": 3e-06,
"loss": 1.9012,
"step": 150
},
{
"epoch": 0.12,
"grad_norm": 27.274938583374023,
"learning_rate": 3.5e-06,
"loss": 1.8956,
"step": 175
},
{
"epoch": 0.14,
"grad_norm": 20.819931030273438,
"learning_rate": 4.000000000000001e-06,
"loss": 1.6958,
"step": 200
},
{
"epoch": 0.16,
"grad_norm": 27.277860641479492,
"learning_rate": 4.5e-06,
"loss": 1.7819,
"step": 225
},
{
"epoch": 0.18,
"grad_norm": 22.838720321655273,
"learning_rate": 5e-06,
"loss": 1.6489,
"step": 250
},
{
"epoch": 0.19,
"grad_norm": 23.70868492126465,
"learning_rate": 5.500000000000001e-06,
"loss": 1.5876,
"step": 275
},
{
"epoch": 0.21,
"grad_norm": 25.0347900390625,
"learning_rate": 6e-06,
"loss": 1.6304,
"step": 300
},
{
"epoch": 0.23,
"grad_norm": 23.519012451171875,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.324,
"step": 325
},
{
"epoch": 0.25,
"grad_norm": 21.76858901977539,
"learning_rate": 7e-06,
"loss": 1.3019,
"step": 350
},
{
"epoch": 0.26,
"grad_norm": 27.329273223876953,
"learning_rate": 7.500000000000001e-06,
"loss": 1.1865,
"step": 375
},
{
"epoch": 0.28,
"grad_norm": 23.374053955078125,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2699,
"step": 400
},
{
"epoch": 0.3,
"grad_norm": 23.69354820251465,
"learning_rate": 8.5e-06,
"loss": 1.2327,
"step": 425
},
{
"epoch": 0.32,
"grad_norm": 22.70684814453125,
"learning_rate": 9e-06,
"loss": 1.2815,
"step": 450
},
{
"epoch": 0.33,
"grad_norm": 20.093311309814453,
"learning_rate": 9.5e-06,
"loss": 1.2482,
"step": 475
},
{
"epoch": 0.35,
"grad_norm": 21.54157066345215,
"learning_rate": 1e-05,
"loss": 1.2527,
"step": 500
},
{
"epoch": 0.37,
"grad_norm": 20.645353317260742,
"learning_rate": 9.944444444444445e-06,
"loss": 1.2954,
"step": 525
},
{
"epoch": 0.39,
"grad_norm": 17.942337036132812,
"learning_rate": 9.88888888888889e-06,
"loss": 1.2958,
"step": 550
},
{
"epoch": 0.4,
"grad_norm": 30.65532112121582,
"learning_rate": 9.833333333333333e-06,
"loss": 1.2974,
"step": 575
},
{
"epoch": 0.42,
"grad_norm": 25.07595443725586,
"learning_rate": 9.777777777777779e-06,
"loss": 1.289,
"step": 600
},
{
"epoch": 0.44,
"grad_norm": 20.494924545288086,
"learning_rate": 9.722222222222223e-06,
"loss": 1.1602,
"step": 625
},
{
"epoch": 0.46,
"grad_norm": 15.94078540802002,
"learning_rate": 9.666666666666667e-06,
"loss": 1.2113,
"step": 650
},
{
"epoch": 0.47,
"grad_norm": 21.14436149597168,
"learning_rate": 9.611111111111112e-06,
"loss": 1.2642,
"step": 675
},
{
"epoch": 0.49,
"grad_norm": 23.884052276611328,
"learning_rate": 9.555555555555556e-06,
"loss": 1.1667,
"step": 700
},
{
"epoch": 0.51,
"grad_norm": 23.286977767944336,
"learning_rate": 9.5e-06,
"loss": 1.1886,
"step": 725
},
{
"epoch": 0.53,
"grad_norm": 25.439273834228516,
"learning_rate": 9.444444444444445e-06,
"loss": 1.1659,
"step": 750
},
{
"epoch": 0.54,
"grad_norm": 21.63466453552246,
"learning_rate": 9.38888888888889e-06,
"loss": 1.3864,
"step": 775
},
{
"epoch": 0.56,
"grad_norm": 19.906169891357422,
"learning_rate": 9.333333333333334e-06,
"loss": 1.1403,
"step": 800
},
{
"epoch": 0.58,
"grad_norm": 24.829113006591797,
"learning_rate": 9.277777777777778e-06,
"loss": 1.1671,
"step": 825
},
{
"epoch": 0.6,
"grad_norm": 17.034282684326172,
"learning_rate": 9.222222222222224e-06,
"loss": 1.1379,
"step": 850
},
{
"epoch": 0.61,
"grad_norm": 18.528812408447266,
"learning_rate": 9.166666666666666e-06,
"loss": 1.1063,
"step": 875
},
{
"epoch": 0.63,
"grad_norm": 23.627670288085938,
"learning_rate": 9.111111111111112e-06,
"loss": 1.2375,
"step": 900
},
{
"epoch": 0.65,
"grad_norm": 17.76607894897461,
"learning_rate": 9.055555555555556e-06,
"loss": 1.1779,
"step": 925
},
{
"epoch": 0.67,
"grad_norm": 21.990543365478516,
"learning_rate": 9e-06,
"loss": 1.1007,
"step": 950
},
{
"epoch": 0.68,
"grad_norm": 21.411767959594727,
"learning_rate": 8.944444444444446e-06,
"loss": 1.1753,
"step": 975
},
{
"epoch": 0.7,
"grad_norm": 22.59610939025879,
"learning_rate": 8.888888888888888e-06,
"loss": 1.1945,
"step": 1000
},
{
"epoch": 0.7,
"eval_loss": 1.1679444313049316,
"eval_runtime": 866.814,
"eval_samples_per_second": 3.288,
"eval_steps_per_second": 0.412,
"eval_wer": 0.8150872391378721,
"step": 1000
},
{
"epoch": 0.72,
"grad_norm": 17.922718048095703,
"learning_rate": 8.833333333333334e-06,
"loss": 1.1264,
"step": 1025
},
{
"epoch": 0.74,
"grad_norm": 22.84149742126465,
"learning_rate": 8.777777777777778e-06,
"loss": 1.1382,
"step": 1050
},
{
"epoch": 0.75,
"grad_norm": 21.807554244995117,
"learning_rate": 8.722222222222224e-06,
"loss": 1.1199,
"step": 1075
},
{
"epoch": 0.77,
"grad_norm": 15.542016983032227,
"learning_rate": 8.666666666666668e-06,
"loss": 1.1283,
"step": 1100
},
{
"epoch": 0.79,
"grad_norm": 19.53961181640625,
"learning_rate": 8.611111111111112e-06,
"loss": 1.1479,
"step": 1125
},
{
"epoch": 0.81,
"grad_norm": 14.435591697692871,
"learning_rate": 8.555555555555556e-06,
"loss": 1.2046,
"step": 1150
},
{
"epoch": 0.82,
"grad_norm": 15.91525650024414,
"learning_rate": 8.5e-06,
"loss": 1.2012,
"step": 1175
},
{
"epoch": 0.84,
"grad_norm": 25.986412048339844,
"learning_rate": 8.444444444444446e-06,
"loss": 1.1793,
"step": 1200
},
{
"epoch": 0.86,
"grad_norm": 19.79583740234375,
"learning_rate": 8.38888888888889e-06,
"loss": 1.1427,
"step": 1225
},
{
"epoch": 0.88,
"grad_norm": 23.57172966003418,
"learning_rate": 8.333333333333334e-06,
"loss": 1.1072,
"step": 1250
},
{
"epoch": 0.89,
"grad_norm": 22.90821647644043,
"learning_rate": 8.277777777777778e-06,
"loss": 1.1761,
"step": 1275
},
{
"epoch": 0.91,
"grad_norm": 27.41729736328125,
"learning_rate": 8.222222222222222e-06,
"loss": 1.1739,
"step": 1300
},
{
"epoch": 0.93,
"grad_norm": 30.00156593322754,
"learning_rate": 8.166666666666668e-06,
"loss": 1.2369,
"step": 1325
},
{
"epoch": 0.95,
"grad_norm": 22.23008155822754,
"learning_rate": 8.111111111111112e-06,
"loss": 1.1685,
"step": 1350
},
{
"epoch": 0.96,
"grad_norm": 18.34453582763672,
"learning_rate": 8.055555555555557e-06,
"loss": 1.1862,
"step": 1375
},
{
"epoch": 0.98,
"grad_norm": 21.947595596313477,
"learning_rate": 8.000000000000001e-06,
"loss": 1.238,
"step": 1400
},
{
"epoch": 1.0,
"grad_norm": 28.159698486328125,
"learning_rate": 7.944444444444445e-06,
"loss": 1.0259,
"step": 1425
},
{
"epoch": 1.02,
"grad_norm": 13.005045890808105,
"learning_rate": 7.88888888888889e-06,
"loss": 0.843,
"step": 1450
},
{
"epoch": 1.04,
"grad_norm": 14.627166748046875,
"learning_rate": 7.833333333333333e-06,
"loss": 0.8975,
"step": 1475
},
{
"epoch": 1.05,
"grad_norm": 12.167830467224121,
"learning_rate": 7.77777777777778e-06,
"loss": 0.857,
"step": 1500
},
{
"epoch": 1.07,
"grad_norm": 20.749853134155273,
"learning_rate": 7.722222222222223e-06,
"loss": 0.8968,
"step": 1525
},
{
"epoch": 1.09,
"grad_norm": 16.837162017822266,
"learning_rate": 7.666666666666667e-06,
"loss": 0.8388,
"step": 1550
},
{
"epoch": 1.11,
"grad_norm": 17.441505432128906,
"learning_rate": 7.611111111111111e-06,
"loss": 0.8734,
"step": 1575
},
{
"epoch": 1.12,
"grad_norm": 18.170475006103516,
"learning_rate": 7.555555555555556e-06,
"loss": 0.8283,
"step": 1600
},
{
"epoch": 1.14,
"grad_norm": 19.032150268554688,
"learning_rate": 7.500000000000001e-06,
"loss": 0.7661,
"step": 1625
},
{
"epoch": 1.16,
"grad_norm": 15.91515827178955,
"learning_rate": 7.444444444444445e-06,
"loss": 0.8611,
"step": 1650
},
{
"epoch": 1.18,
"grad_norm": 18.5009765625,
"learning_rate": 7.38888888888889e-06,
"loss": 0.8536,
"step": 1675
},
{
"epoch": 1.19,
"grad_norm": 22.158689498901367,
"learning_rate": 7.333333333333333e-06,
"loss": 0.9115,
"step": 1700
},
{
"epoch": 1.21,
"grad_norm": 13.969917297363281,
"learning_rate": 7.277777777777778e-06,
"loss": 0.9389,
"step": 1725
},
{
"epoch": 1.23,
"grad_norm": 14.708273887634277,
"learning_rate": 7.222222222222223e-06,
"loss": 0.8358,
"step": 1750
},
{
"epoch": 1.25,
"grad_norm": 18.655982971191406,
"learning_rate": 7.166666666666667e-06,
"loss": 0.9696,
"step": 1775
},
{
"epoch": 1.26,
"grad_norm": 24.355396270751953,
"learning_rate": 7.111111111111112e-06,
"loss": 0.8242,
"step": 1800
},
{
"epoch": 1.28,
"grad_norm": 15.554370880126953,
"learning_rate": 7.055555555555557e-06,
"loss": 0.9143,
"step": 1825
},
{
"epoch": 1.3,
"grad_norm": 20.58561897277832,
"learning_rate": 7e-06,
"loss": 0.8439,
"step": 1850
},
{
"epoch": 1.32,
"grad_norm": 20.269287109375,
"learning_rate": 6.944444444444445e-06,
"loss": 0.8884,
"step": 1875
},
{
"epoch": 1.33,
"grad_norm": 11.557083129882812,
"learning_rate": 6.88888888888889e-06,
"loss": 0.9044,
"step": 1900
},
{
"epoch": 1.35,
"grad_norm": 23.037826538085938,
"learning_rate": 6.833333333333334e-06,
"loss": 0.8734,
"step": 1925
},
{
"epoch": 1.37,
"grad_norm": 20.72858428955078,
"learning_rate": 6.777777777777779e-06,
"loss": 0.8953,
"step": 1950
},
{
"epoch": 1.39,
"grad_norm": 18.743228912353516,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.8319,
"step": 1975
},
{
"epoch": 1.4,
"grad_norm": 17.591102600097656,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8778,
"step": 2000
},
{
"epoch": 1.4,
"eval_loss": 1.1207834482192993,
"eval_runtime": 750.3921,
"eval_samples_per_second": 3.798,
"eval_steps_per_second": 0.476,
"eval_wer": 0.7828144600296499,
"step": 2000
},
{
"epoch": 1.42,
"grad_norm": 18.505678176879883,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.8456,
"step": 2025
},
{
"epoch": 1.44,
"grad_norm": 19.876806259155273,
"learning_rate": 6.555555555555556e-06,
"loss": 0.9283,
"step": 2050
},
{
"epoch": 1.46,
"grad_norm": 23.498214721679688,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.9199,
"step": 2075
},
{
"epoch": 1.47,
"grad_norm": 19.818599700927734,
"learning_rate": 6.444444444444445e-06,
"loss": 0.8638,
"step": 2100
},
{
"epoch": 1.49,
"grad_norm": 17.756738662719727,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.9432,
"step": 2125
},
{
"epoch": 1.51,
"grad_norm": 13.811593055725098,
"learning_rate": 6.333333333333333e-06,
"loss": 0.7516,
"step": 2150
},
{
"epoch": 1.53,
"grad_norm": 17.024320602416992,
"learning_rate": 6.277777777777778e-06,
"loss": 0.9079,
"step": 2175
},
{
"epoch": 1.54,
"grad_norm": 22.73233413696289,
"learning_rate": 6.222222222222223e-06,
"loss": 0.8704,
"step": 2200
},
{
"epoch": 1.56,
"grad_norm": 18.211219787597656,
"learning_rate": 6.166666666666667e-06,
"loss": 0.8635,
"step": 2225
},
{
"epoch": 1.58,
"grad_norm": 14.778497695922852,
"learning_rate": 6.111111111111112e-06,
"loss": 0.8439,
"step": 2250
},
{
"epoch": 1.6,
"grad_norm": 22.68735694885254,
"learning_rate": 6.055555555555555e-06,
"loss": 0.8572,
"step": 2275
},
{
"epoch": 1.61,
"grad_norm": 19.818214416503906,
"learning_rate": 6e-06,
"loss": 0.8196,
"step": 2300
},
{
"epoch": 1.63,
"grad_norm": 22.226512908935547,
"learning_rate": 5.944444444444445e-06,
"loss": 0.8498,
"step": 2325
},
{
"epoch": 1.65,
"grad_norm": 15.627732276916504,
"learning_rate": 5.88888888888889e-06,
"loss": 0.905,
"step": 2350
},
{
"epoch": 1.67,
"grad_norm": 12.331079483032227,
"learning_rate": 5.833333333333334e-06,
"loss": 0.8842,
"step": 2375
},
{
"epoch": 1.68,
"grad_norm": 17.217872619628906,
"learning_rate": 5.777777777777778e-06,
"loss": 0.7816,
"step": 2400
},
{
"epoch": 1.7,
"grad_norm": 19.87596893310547,
"learning_rate": 5.722222222222222e-06,
"loss": 0.884,
"step": 2425
},
{
"epoch": 1.72,
"grad_norm": 15.646110534667969,
"learning_rate": 5.666666666666667e-06,
"loss": 0.8369,
"step": 2450
},
{
"epoch": 1.74,
"grad_norm": 15.3438138961792,
"learning_rate": 5.611111111111112e-06,
"loss": 0.8858,
"step": 2475
},
{
"epoch": 1.75,
"grad_norm": 26.44184112548828,
"learning_rate": 5.555555555555557e-06,
"loss": 0.9208,
"step": 2500
},
{
"epoch": 1.77,
"grad_norm": 16.2983455657959,
"learning_rate": 5.500000000000001e-06,
"loss": 0.8959,
"step": 2525
},
{
"epoch": 1.79,
"grad_norm": 17.188587188720703,
"learning_rate": 5.444444444444445e-06,
"loss": 0.8785,
"step": 2550
},
{
"epoch": 1.81,
"grad_norm": 18.34478759765625,
"learning_rate": 5.388888888888889e-06,
"loss": 0.8116,
"step": 2575
},
{
"epoch": 1.82,
"grad_norm": 14.330605506896973,
"learning_rate": 5.333333333333334e-06,
"loss": 0.8592,
"step": 2600
},
{
"epoch": 1.84,
"grad_norm": 17.461301803588867,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.8729,
"step": 2625
},
{
"epoch": 1.86,
"grad_norm": 17.23073387145996,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.8403,
"step": 2650
},
{
"epoch": 1.88,
"grad_norm": 16.816059112548828,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.8266,
"step": 2675
},
{
"epoch": 1.89,
"grad_norm": 25.448545455932617,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.8047,
"step": 2700
},
{
"epoch": 1.91,
"grad_norm": 16.244035720825195,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.8034,
"step": 2725
},
{
"epoch": 1.93,
"grad_norm": 19.600101470947266,
"learning_rate": 5e-06,
"loss": 0.8126,
"step": 2750
},
{
"epoch": 1.95,
"grad_norm": 19.725372314453125,
"learning_rate": 4.944444444444445e-06,
"loss": 0.7724,
"step": 2775
},
{
"epoch": 1.96,
"grad_norm": 13.881664276123047,
"learning_rate": 4.888888888888889e-06,
"loss": 0.8932,
"step": 2800
},
{
"epoch": 1.98,
"grad_norm": 14.711101531982422,
"learning_rate": 4.833333333333333e-06,
"loss": 0.8111,
"step": 2825
},
{
"epoch": 2.0,
"grad_norm": 22.943696975708008,
"learning_rate": 4.777777777777778e-06,
"loss": 0.7908,
"step": 2850
},
{
"epoch": 2.02,
"grad_norm": 17.254667282104492,
"learning_rate": 4.722222222222222e-06,
"loss": 0.7,
"step": 2875
},
{
"epoch": 2.04,
"grad_norm": 14.545769691467285,
"learning_rate": 4.666666666666667e-06,
"loss": 0.645,
"step": 2900
},
{
"epoch": 2.05,
"grad_norm": 11.072650909423828,
"learning_rate": 4.611111111111112e-06,
"loss": 0.5782,
"step": 2925
},
{
"epoch": 2.07,
"grad_norm": 14.558616638183594,
"learning_rate": 4.555555555555556e-06,
"loss": 0.5763,
"step": 2950
},
{
"epoch": 2.09,
"grad_norm": 16.14755630493164,
"learning_rate": 4.5e-06,
"loss": 0.5983,
"step": 2975
},
{
"epoch": 2.11,
"grad_norm": 18.195594787597656,
"learning_rate": 4.444444444444444e-06,
"loss": 0.6046,
"step": 3000
},
{
"epoch": 2.11,
"eval_loss": 1.1167398691177368,
"eval_runtime": 778.5794,
"eval_samples_per_second": 3.661,
"eval_steps_per_second": 0.459,
"eval_wer": 0.9134450906602806,
"step": 3000
},
{
"epoch": 2.12,
"grad_norm": 18.549034118652344,
"learning_rate": 4.388888888888889e-06,
"loss": 0.5426,
"step": 3025
},
{
"epoch": 2.14,
"grad_norm": 11.774942398071289,
"learning_rate": 4.333333333333334e-06,
"loss": 0.6074,
"step": 3050
},
{
"epoch": 2.16,
"grad_norm": 21.260597229003906,
"learning_rate": 4.277777777777778e-06,
"loss": 0.5798,
"step": 3075
},
{
"epoch": 2.18,
"grad_norm": 18.69565773010254,
"learning_rate": 4.222222222222223e-06,
"loss": 0.5168,
"step": 3100
},
{
"epoch": 2.19,
"grad_norm": 19.02937889099121,
"learning_rate": 4.166666666666667e-06,
"loss": 0.6275,
"step": 3125
},
{
"epoch": 2.21,
"grad_norm": 12.80723762512207,
"learning_rate": 4.111111111111111e-06,
"loss": 0.6773,
"step": 3150
},
{
"epoch": 2.23,
"grad_norm": 14.226038932800293,
"learning_rate": 4.055555555555556e-06,
"loss": 0.6184,
"step": 3175
},
{
"epoch": 2.25,
"grad_norm": 15.16125774383545,
"learning_rate": 4.000000000000001e-06,
"loss": 0.5668,
"step": 3200
},
{
"epoch": 2.26,
"grad_norm": 9.370119094848633,
"learning_rate": 3.944444444444445e-06,
"loss": 0.5718,
"step": 3225
},
{
"epoch": 2.28,
"grad_norm": 15.369794845581055,
"learning_rate": 3.88888888888889e-06,
"loss": 0.6725,
"step": 3250
},
{
"epoch": 2.3,
"grad_norm": 13.635235786437988,
"learning_rate": 3.833333333333334e-06,
"loss": 0.5705,
"step": 3275
},
{
"epoch": 2.32,
"grad_norm": 12.524065971374512,
"learning_rate": 3.777777777777778e-06,
"loss": 0.6573,
"step": 3300
},
{
"epoch": 2.33,
"grad_norm": 14.678699493408203,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.6684,
"step": 3325
},
{
"epoch": 2.35,
"grad_norm": 18.842226028442383,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.5352,
"step": 3350
},
{
"epoch": 2.37,
"grad_norm": 16.38367462158203,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.6098,
"step": 3375
},
{
"epoch": 2.39,
"grad_norm": 15.753807067871094,
"learning_rate": 3.555555555555556e-06,
"loss": 0.6081,
"step": 3400
},
{
"epoch": 2.4,
"grad_norm": 12.830479621887207,
"learning_rate": 3.5e-06,
"loss": 0.5804,
"step": 3425
},
{
"epoch": 2.42,
"grad_norm": 13.615287780761719,
"learning_rate": 3.444444444444445e-06,
"loss": 0.6099,
"step": 3450
},
{
"epoch": 2.44,
"grad_norm": 13.487171173095703,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.6227,
"step": 3475
},
{
"epoch": 2.46,
"grad_norm": 14.19842529296875,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.6085,
"step": 3500
},
{
"epoch": 2.47,
"grad_norm": 19.63158416748047,
"learning_rate": 3.277777777777778e-06,
"loss": 0.6429,
"step": 3525
},
{
"epoch": 2.49,
"grad_norm": 15.446227073669434,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.5227,
"step": 3550
},
{
"epoch": 2.51,
"grad_norm": 14.606199264526367,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.5511,
"step": 3575
},
{
"epoch": 2.53,
"grad_norm": 14.071135520935059,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.5825,
"step": 3600
},
{
"epoch": 2.54,
"grad_norm": 10.74336051940918,
"learning_rate": 3.055555555555556e-06,
"loss": 0.6515,
"step": 3625
},
{
"epoch": 2.56,
"grad_norm": 13.432340621948242,
"learning_rate": 3e-06,
"loss": 0.5975,
"step": 3650
},
{
"epoch": 2.58,
"grad_norm": 15.705540657043457,
"learning_rate": 2.944444444444445e-06,
"loss": 0.5845,
"step": 3675
},
{
"epoch": 2.6,
"grad_norm": 15.272418022155762,
"learning_rate": 2.888888888888889e-06,
"loss": 0.5825,
"step": 3700
},
{
"epoch": 2.61,
"grad_norm": 23.63802719116211,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.5883,
"step": 3725
},
{
"epoch": 2.63,
"grad_norm": 15.042671203613281,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.5646,
"step": 3750
},
{
"epoch": 2.65,
"grad_norm": 15.876602172851562,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.5859,
"step": 3775
},
{
"epoch": 2.67,
"grad_norm": 15.712118148803711,
"learning_rate": 2.666666666666667e-06,
"loss": 0.6506,
"step": 3800
},
{
"epoch": 2.68,
"grad_norm": 12.527118682861328,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.6143,
"step": 3825
},
{
"epoch": 2.7,
"grad_norm": 15.08745002746582,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.6117,
"step": 3850
},
{
"epoch": 2.72,
"grad_norm": 10.62087345123291,
"learning_rate": 2.5e-06,
"loss": 0.573,
"step": 3875
},
{
"epoch": 2.74,
"grad_norm": 15.184548377990723,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.6388,
"step": 3900
},
{
"epoch": 2.75,
"grad_norm": 11.05632209777832,
"learning_rate": 2.388888888888889e-06,
"loss": 0.6476,
"step": 3925
},
{
"epoch": 2.77,
"grad_norm": 21.41426658630371,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.601,
"step": 3950
},
{
"epoch": 2.79,
"grad_norm": 14.866922378540039,
"learning_rate": 2.277777777777778e-06,
"loss": 0.5879,
"step": 3975
},
{
"epoch": 2.81,
"grad_norm": 13.731112480163574,
"learning_rate": 2.222222222222222e-06,
"loss": 0.5933,
"step": 4000
},
{
"epoch": 2.81,
"eval_loss": 1.1129027605056763,
"eval_runtime": 830.7696,
"eval_samples_per_second": 3.431,
"eval_steps_per_second": 0.43,
"eval_wer": 0.7635990420800547,
"step": 4000
},
{
"epoch": 2.82,
"grad_norm": 16.265804290771484,
"learning_rate": 2.166666666666667e-06,
"loss": 0.6687,
"step": 4025
},
{
"epoch": 2.84,
"grad_norm": 16.1932315826416,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.5827,
"step": 4050
},
{
"epoch": 2.86,
"grad_norm": 14.049725532531738,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.5921,
"step": 4075
},
{
"epoch": 2.88,
"grad_norm": 16.718233108520508,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.5516,
"step": 4100
},
{
"epoch": 2.89,
"grad_norm": 16.140684127807617,
"learning_rate": 1.944444444444445e-06,
"loss": 0.5442,
"step": 4125
},
{
"epoch": 2.91,
"grad_norm": 15.428065299987793,
"learning_rate": 1.888888888888889e-06,
"loss": 0.6088,
"step": 4150
},
{
"epoch": 2.93,
"grad_norm": 16.217172622680664,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.6119,
"step": 4175
},
{
"epoch": 2.95,
"grad_norm": 23.09720230102539,
"learning_rate": 1.777777777777778e-06,
"loss": 0.5694,
"step": 4200
},
{
"epoch": 2.96,
"grad_norm": 18.135848999023438,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.6148,
"step": 4225
},
{
"epoch": 2.98,
"grad_norm": 19.2158260345459,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.6248,
"step": 4250
},
{
"epoch": 3.0,
"grad_norm": 25.484533309936523,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.5624,
"step": 4275
},
{
"epoch": 3.02,
"grad_norm": 11.158307075500488,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.475,
"step": 4300
},
{
"epoch": 3.04,
"grad_norm": 14.582618713378906,
"learning_rate": 1.5e-06,
"loss": 0.4581,
"step": 4325
},
{
"epoch": 3.05,
"grad_norm": 11.259454727172852,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.4561,
"step": 4350
},
{
"epoch": 3.07,
"grad_norm": 12.487770080566406,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.4852,
"step": 4375
},
{
"epoch": 3.09,
"grad_norm": 10.27232837677002,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.4099,
"step": 4400
},
{
"epoch": 3.11,
"grad_norm": 10.88982105255127,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.4292,
"step": 4425
},
{
"epoch": 3.12,
"grad_norm": 12.418704986572266,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.4237,
"step": 4450
},
{
"epoch": 3.14,
"grad_norm": 9.705329895019531,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.4299,
"step": 4475
},
{
"epoch": 3.16,
"grad_norm": 11.134821891784668,
"learning_rate": 1.111111111111111e-06,
"loss": 0.4061,
"step": 4500
},
{
"epoch": 3.18,
"grad_norm": 13.798453330993652,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.4039,
"step": 4525
},
{
"epoch": 3.19,
"grad_norm": 13.317481994628906,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.4498,
"step": 4550
},
{
"epoch": 3.21,
"grad_norm": 13.943197250366211,
"learning_rate": 9.444444444444445e-07,
"loss": 0.4514,
"step": 4575
},
{
"epoch": 3.23,
"grad_norm": 9.061113357543945,
"learning_rate": 8.88888888888889e-07,
"loss": 0.436,
"step": 4600
},
{
"epoch": 3.25,
"grad_norm": 13.598713874816895,
"learning_rate": 8.333333333333333e-07,
"loss": 0.4427,
"step": 4625
},
{
"epoch": 3.26,
"grad_norm": 18.128005981445312,
"learning_rate": 7.777777777777779e-07,
"loss": 0.4219,
"step": 4650
},
{
"epoch": 3.28,
"grad_norm": 10.521560668945312,
"learning_rate": 7.222222222222222e-07,
"loss": 0.4455,
"step": 4675
},
{
"epoch": 3.3,
"grad_norm": 13.369185447692871,
"learning_rate": 6.666666666666667e-07,
"loss": 0.4168,
"step": 4700
},
{
"epoch": 3.32,
"grad_norm": 8.334280014038086,
"learning_rate": 6.111111111111112e-07,
"loss": 0.4713,
"step": 4725
},
{
"epoch": 3.33,
"grad_norm": 9.726198196411133,
"learning_rate": 5.555555555555555e-07,
"loss": 0.4642,
"step": 4750
},
{
"epoch": 3.35,
"grad_norm": 16.844810485839844,
"learning_rate": 5.000000000000001e-07,
"loss": 0.5192,
"step": 4775
},
{
"epoch": 3.37,
"grad_norm": 14.9822998046875,
"learning_rate": 4.444444444444445e-07,
"loss": 0.4789,
"step": 4800
},
{
"epoch": 3.39,
"grad_norm": 10.031481742858887,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.4984,
"step": 4825
},
{
"epoch": 3.4,
"grad_norm": 9.072463035583496,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.3774,
"step": 4850
},
{
"epoch": 3.42,
"grad_norm": 13.830499649047852,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.456,
"step": 4875
},
{
"epoch": 3.44,
"grad_norm": 12.41981315612793,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.5071,
"step": 4900
},
{
"epoch": 3.46,
"grad_norm": 14.112458229064941,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.504,
"step": 4925
},
{
"epoch": 3.47,
"grad_norm": 12.638689994812012,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.4269,
"step": 4950
},
{
"epoch": 3.49,
"grad_norm": 11.225275039672852,
"learning_rate": 5.555555555555556e-08,
"loss": 0.4078,
"step": 4975
},
{
"epoch": 3.51,
"grad_norm": 13.034343719482422,
"learning_rate": 0.0,
"loss": 0.3848,
"step": 5000
},
{
"epoch": 3.51,
"eval_loss": 1.1417452096939087,
"eval_runtime": 847.7864,
"eval_samples_per_second": 3.362,
"eval_steps_per_second": 0.421,
"eval_wer": 0.8866461398106967,
"step": 5000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 1000,
"total_flos": 1.154081874419712e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}