whisper-small-ar_tsize_1.0 / trainer_state.json
otozz's picture
Upload 9 files
0292879 verified
{
"best_metric": 0.46134513589253145,
"best_model_checkpoint": "./whisper-small-ar_tsize_1.0/checkpoint-5000",
"epoch": 1.2883277505797475,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 113.85204315185547,
"learning_rate": 5.000000000000001e-07,
"loss": 3.119,
"step": 25
},
{
"epoch": 0.01,
"grad_norm": 21.994482040405273,
"learning_rate": 1.0000000000000002e-06,
"loss": 2.7327,
"step": 50
},
{
"epoch": 0.02,
"grad_norm": 17.54548454284668,
"learning_rate": 1.5e-06,
"loss": 1.8873,
"step": 75
},
{
"epoch": 0.03,
"grad_norm": 19.763961791992188,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.2914,
"step": 100
},
{
"epoch": 0.03,
"grad_norm": 22.205575942993164,
"learning_rate": 2.5e-06,
"loss": 1.0909,
"step": 125
},
{
"epoch": 0.04,
"grad_norm": 13.473732948303223,
"learning_rate": 3e-06,
"loss": 1.0554,
"step": 150
},
{
"epoch": 0.05,
"grad_norm": 14.15457820892334,
"learning_rate": 3.5e-06,
"loss": 0.9368,
"step": 175
},
{
"epoch": 0.05,
"grad_norm": 21.088159561157227,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8853,
"step": 200
},
{
"epoch": 0.06,
"grad_norm": 16.64329719543457,
"learning_rate": 4.5e-06,
"loss": 0.8381,
"step": 225
},
{
"epoch": 0.06,
"grad_norm": 17.755544662475586,
"learning_rate": 5e-06,
"loss": 0.8109,
"step": 250
},
{
"epoch": 0.07,
"grad_norm": 15.66264820098877,
"learning_rate": 5.500000000000001e-06,
"loss": 0.7065,
"step": 275
},
{
"epoch": 0.08,
"grad_norm": 16.0575008392334,
"learning_rate": 6e-06,
"loss": 0.5877,
"step": 300
},
{
"epoch": 0.08,
"grad_norm": 9.758112907409668,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.465,
"step": 325
},
{
"epoch": 0.09,
"grad_norm": 11.255630493164062,
"learning_rate": 7e-06,
"loss": 0.4381,
"step": 350
},
{
"epoch": 0.1,
"grad_norm": 7.868831157684326,
"learning_rate": 7.500000000000001e-06,
"loss": 0.4518,
"step": 375
},
{
"epoch": 0.1,
"grad_norm": 14.623653411865234,
"learning_rate": 8.000000000000001e-06,
"loss": 0.4146,
"step": 400
},
{
"epoch": 0.11,
"grad_norm": 12.925063133239746,
"learning_rate": 8.5e-06,
"loss": 0.4376,
"step": 425
},
{
"epoch": 0.12,
"grad_norm": 11.95840835571289,
"learning_rate": 9e-06,
"loss": 0.4082,
"step": 450
},
{
"epoch": 0.12,
"grad_norm": 11.043086051940918,
"learning_rate": 9.5e-06,
"loss": 0.4204,
"step": 475
},
{
"epoch": 0.13,
"grad_norm": 13.569351196289062,
"learning_rate": 1e-05,
"loss": 0.4574,
"step": 500
},
{
"epoch": 0.14,
"grad_norm": 12.956459999084473,
"learning_rate": 9.944444444444445e-06,
"loss": 0.3668,
"step": 525
},
{
"epoch": 0.14,
"grad_norm": 12.411469459533691,
"learning_rate": 9.88888888888889e-06,
"loss": 0.392,
"step": 550
},
{
"epoch": 0.15,
"grad_norm": 11.523972511291504,
"learning_rate": 9.833333333333333e-06,
"loss": 0.3887,
"step": 575
},
{
"epoch": 0.15,
"grad_norm": 9.563281059265137,
"learning_rate": 9.777777777777779e-06,
"loss": 0.4055,
"step": 600
},
{
"epoch": 0.16,
"grad_norm": 13.318591117858887,
"learning_rate": 9.722222222222223e-06,
"loss": 0.3857,
"step": 625
},
{
"epoch": 0.17,
"grad_norm": 7.878371238708496,
"learning_rate": 9.666666666666667e-06,
"loss": 0.3565,
"step": 650
},
{
"epoch": 0.17,
"grad_norm": 10.200860023498535,
"learning_rate": 9.611111111111112e-06,
"loss": 0.3702,
"step": 675
},
{
"epoch": 0.18,
"grad_norm": 10.580467224121094,
"learning_rate": 9.555555555555556e-06,
"loss": 0.3776,
"step": 700
},
{
"epoch": 0.19,
"grad_norm": 12.521218299865723,
"learning_rate": 9.5e-06,
"loss": 0.3601,
"step": 725
},
{
"epoch": 0.19,
"grad_norm": 13.661670684814453,
"learning_rate": 9.444444444444445e-06,
"loss": 0.3829,
"step": 750
},
{
"epoch": 0.2,
"grad_norm": 12.150840759277344,
"learning_rate": 9.38888888888889e-06,
"loss": 0.3913,
"step": 775
},
{
"epoch": 0.21,
"grad_norm": 11.47882080078125,
"learning_rate": 9.333333333333334e-06,
"loss": 0.4022,
"step": 800
},
{
"epoch": 0.21,
"grad_norm": 10.39133071899414,
"learning_rate": 9.277777777777778e-06,
"loss": 0.3931,
"step": 825
},
{
"epoch": 0.22,
"grad_norm": 9.383455276489258,
"learning_rate": 9.222222222222224e-06,
"loss": 0.3703,
"step": 850
},
{
"epoch": 0.23,
"grad_norm": 11.980205535888672,
"learning_rate": 9.166666666666666e-06,
"loss": 0.3724,
"step": 875
},
{
"epoch": 0.23,
"grad_norm": 16.279930114746094,
"learning_rate": 9.111111111111112e-06,
"loss": 0.3365,
"step": 900
},
{
"epoch": 0.24,
"grad_norm": 8.593803405761719,
"learning_rate": 9.055555555555556e-06,
"loss": 0.3397,
"step": 925
},
{
"epoch": 0.24,
"grad_norm": 12.553435325622559,
"learning_rate": 9e-06,
"loss": 0.3542,
"step": 950
},
{
"epoch": 0.25,
"grad_norm": 9.077679634094238,
"learning_rate": 8.944444444444446e-06,
"loss": 0.3606,
"step": 975
},
{
"epoch": 0.26,
"grad_norm": 8.542105674743652,
"learning_rate": 8.888888888888888e-06,
"loss": 0.3577,
"step": 1000
},
{
"epoch": 0.26,
"eval_loss": 0.34235334396362305,
"eval_runtime": 2902.7007,
"eval_samples_per_second": 2.674,
"eval_steps_per_second": 0.335,
"eval_wer": 0.575619411947867,
"step": 1000
},
{
"epoch": 0.26,
"grad_norm": 18.058244705200195,
"learning_rate": 8.833333333333334e-06,
"loss": 0.3599,
"step": 1025
},
{
"epoch": 0.27,
"grad_norm": 11.067248344421387,
"learning_rate": 8.777777777777778e-06,
"loss": 0.3432,
"step": 1050
},
{
"epoch": 0.28,
"grad_norm": 12.799885749816895,
"learning_rate": 8.722222222222224e-06,
"loss": 0.3552,
"step": 1075
},
{
"epoch": 0.28,
"grad_norm": 10.083304405212402,
"learning_rate": 8.666666666666668e-06,
"loss": 0.357,
"step": 1100
},
{
"epoch": 0.29,
"grad_norm": 11.970526695251465,
"learning_rate": 8.611111111111112e-06,
"loss": 0.323,
"step": 1125
},
{
"epoch": 0.3,
"grad_norm": 10.243406295776367,
"learning_rate": 8.555555555555556e-06,
"loss": 0.3695,
"step": 1150
},
{
"epoch": 0.3,
"grad_norm": 7.486917972564697,
"learning_rate": 8.5e-06,
"loss": 0.3229,
"step": 1175
},
{
"epoch": 0.31,
"grad_norm": 11.434945106506348,
"learning_rate": 8.444444444444446e-06,
"loss": 0.3559,
"step": 1200
},
{
"epoch": 0.32,
"grad_norm": 12.598071098327637,
"learning_rate": 8.38888888888889e-06,
"loss": 0.312,
"step": 1225
},
{
"epoch": 0.32,
"grad_norm": 14.671950340270996,
"learning_rate": 8.333333333333334e-06,
"loss": 0.3205,
"step": 1250
},
{
"epoch": 0.33,
"grad_norm": 12.891206741333008,
"learning_rate": 8.277777777777778e-06,
"loss": 0.3112,
"step": 1275
},
{
"epoch": 0.33,
"grad_norm": 7.274331092834473,
"learning_rate": 8.222222222222222e-06,
"loss": 0.3434,
"step": 1300
},
{
"epoch": 0.34,
"grad_norm": 7.594877243041992,
"learning_rate": 8.166666666666668e-06,
"loss": 0.3087,
"step": 1325
},
{
"epoch": 0.35,
"grad_norm": 9.791447639465332,
"learning_rate": 8.111111111111112e-06,
"loss": 0.3337,
"step": 1350
},
{
"epoch": 0.35,
"grad_norm": 11.591399192810059,
"learning_rate": 8.055555555555557e-06,
"loss": 0.3143,
"step": 1375
},
{
"epoch": 0.36,
"grad_norm": 10.390214920043945,
"learning_rate": 8.000000000000001e-06,
"loss": 0.3216,
"step": 1400
},
{
"epoch": 0.37,
"grad_norm": 16.003393173217773,
"learning_rate": 7.944444444444445e-06,
"loss": 0.3405,
"step": 1425
},
{
"epoch": 0.37,
"grad_norm": 6.049975395202637,
"learning_rate": 7.88888888888889e-06,
"loss": 0.3171,
"step": 1450
},
{
"epoch": 0.38,
"grad_norm": 11.825002670288086,
"learning_rate": 7.833333333333333e-06,
"loss": 0.3348,
"step": 1475
},
{
"epoch": 0.39,
"grad_norm": 9.303126335144043,
"learning_rate": 7.77777777777778e-06,
"loss": 0.3446,
"step": 1500
},
{
"epoch": 0.39,
"grad_norm": 11.592827796936035,
"learning_rate": 7.722222222222223e-06,
"loss": 0.3402,
"step": 1525
},
{
"epoch": 0.4,
"grad_norm": 11.242114067077637,
"learning_rate": 7.666666666666667e-06,
"loss": 0.2915,
"step": 1550
},
{
"epoch": 0.41,
"grad_norm": 5.611409664154053,
"learning_rate": 7.611111111111111e-06,
"loss": 0.2787,
"step": 1575
},
{
"epoch": 0.41,
"grad_norm": 11.119479179382324,
"learning_rate": 7.555555555555556e-06,
"loss": 0.32,
"step": 1600
},
{
"epoch": 0.42,
"grad_norm": 8.848244667053223,
"learning_rate": 7.500000000000001e-06,
"loss": 0.307,
"step": 1625
},
{
"epoch": 0.43,
"grad_norm": 15.980573654174805,
"learning_rate": 7.444444444444445e-06,
"loss": 0.3652,
"step": 1650
},
{
"epoch": 0.43,
"grad_norm": 13.006916999816895,
"learning_rate": 7.38888888888889e-06,
"loss": 0.2934,
"step": 1675
},
{
"epoch": 0.44,
"grad_norm": 12.559382438659668,
"learning_rate": 7.333333333333333e-06,
"loss": 0.3096,
"step": 1700
},
{
"epoch": 0.44,
"grad_norm": 10.66994857788086,
"learning_rate": 7.277777777777778e-06,
"loss": 0.3007,
"step": 1725
},
{
"epoch": 0.45,
"grad_norm": 11.067940711975098,
"learning_rate": 7.222222222222223e-06,
"loss": 0.2485,
"step": 1750
},
{
"epoch": 0.46,
"grad_norm": 10.337029457092285,
"learning_rate": 7.166666666666667e-06,
"loss": 0.3069,
"step": 1775
},
{
"epoch": 0.46,
"grad_norm": 10.672614097595215,
"learning_rate": 7.111111111111112e-06,
"loss": 0.3061,
"step": 1800
},
{
"epoch": 0.47,
"grad_norm": 15.886311531066895,
"learning_rate": 7.055555555555557e-06,
"loss": 0.3161,
"step": 1825
},
{
"epoch": 0.48,
"grad_norm": 8.174284934997559,
"learning_rate": 7e-06,
"loss": 0.2671,
"step": 1850
},
{
"epoch": 0.48,
"grad_norm": 10.332029342651367,
"learning_rate": 6.944444444444445e-06,
"loss": 0.2884,
"step": 1875
},
{
"epoch": 0.49,
"grad_norm": 8.286322593688965,
"learning_rate": 6.88888888888889e-06,
"loss": 0.2855,
"step": 1900
},
{
"epoch": 0.5,
"grad_norm": 12.562649726867676,
"learning_rate": 6.833333333333334e-06,
"loss": 0.2988,
"step": 1925
},
{
"epoch": 0.5,
"grad_norm": 11.962130546569824,
"learning_rate": 6.777777777777779e-06,
"loss": 0.3186,
"step": 1950
},
{
"epoch": 0.51,
"grad_norm": 6.070617198944092,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.2963,
"step": 1975
},
{
"epoch": 0.52,
"grad_norm": 9.177543640136719,
"learning_rate": 6.666666666666667e-06,
"loss": 0.2801,
"step": 2000
},
{
"epoch": 0.52,
"eval_loss": 0.28868672251701355,
"eval_runtime": 2551.4189,
"eval_samples_per_second": 3.042,
"eval_steps_per_second": 0.381,
"eval_wer": 0.5197722521240158,
"step": 2000
},
{
"epoch": 0.52,
"grad_norm": 10.780439376831055,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.2512,
"step": 2025
},
{
"epoch": 0.53,
"grad_norm": 11.412617683410645,
"learning_rate": 6.555555555555556e-06,
"loss": 0.2724,
"step": 2050
},
{
"epoch": 0.53,
"grad_norm": 10.96927547454834,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.3125,
"step": 2075
},
{
"epoch": 0.54,
"grad_norm": 14.052020072937012,
"learning_rate": 6.444444444444445e-06,
"loss": 0.2899,
"step": 2100
},
{
"epoch": 0.55,
"grad_norm": 5.423408031463623,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.2857,
"step": 2125
},
{
"epoch": 0.55,
"grad_norm": 10.999767303466797,
"learning_rate": 6.333333333333333e-06,
"loss": 0.2716,
"step": 2150
},
{
"epoch": 0.56,
"grad_norm": 7.399983882904053,
"learning_rate": 6.277777777777778e-06,
"loss": 0.2954,
"step": 2175
},
{
"epoch": 0.57,
"grad_norm": 11.7737455368042,
"learning_rate": 6.222222222222223e-06,
"loss": 0.2872,
"step": 2200
},
{
"epoch": 0.57,
"grad_norm": 10.09986686706543,
"learning_rate": 6.166666666666667e-06,
"loss": 0.2609,
"step": 2225
},
{
"epoch": 0.58,
"grad_norm": 6.141210079193115,
"learning_rate": 6.111111111111112e-06,
"loss": 0.2572,
"step": 2250
},
{
"epoch": 0.59,
"grad_norm": 12.591606140136719,
"learning_rate": 6.055555555555555e-06,
"loss": 0.2913,
"step": 2275
},
{
"epoch": 0.59,
"grad_norm": 9.1317720413208,
"learning_rate": 6e-06,
"loss": 0.2718,
"step": 2300
},
{
"epoch": 0.6,
"grad_norm": 13.439702033996582,
"learning_rate": 5.944444444444445e-06,
"loss": 0.2832,
"step": 2325
},
{
"epoch": 0.61,
"grad_norm": 7.997286796569824,
"learning_rate": 5.88888888888889e-06,
"loss": 0.2515,
"step": 2350
},
{
"epoch": 0.61,
"grad_norm": 14.81768798828125,
"learning_rate": 5.833333333333334e-06,
"loss": 0.2903,
"step": 2375
},
{
"epoch": 0.62,
"grad_norm": 8.115462303161621,
"learning_rate": 5.777777777777778e-06,
"loss": 0.2724,
"step": 2400
},
{
"epoch": 0.62,
"grad_norm": 9.983502388000488,
"learning_rate": 5.722222222222222e-06,
"loss": 0.3023,
"step": 2425
},
{
"epoch": 0.63,
"grad_norm": 5.400686740875244,
"learning_rate": 5.666666666666667e-06,
"loss": 0.2853,
"step": 2450
},
{
"epoch": 0.64,
"grad_norm": 7.283702373504639,
"learning_rate": 5.611111111111112e-06,
"loss": 0.2635,
"step": 2475
},
{
"epoch": 0.64,
"grad_norm": 8.558086395263672,
"learning_rate": 5.555555555555557e-06,
"loss": 0.2282,
"step": 2500
},
{
"epoch": 0.65,
"grad_norm": 9.957533836364746,
"learning_rate": 5.500000000000001e-06,
"loss": 0.2873,
"step": 2525
},
{
"epoch": 0.66,
"grad_norm": 9.129793167114258,
"learning_rate": 5.444444444444445e-06,
"loss": 0.2651,
"step": 2550
},
{
"epoch": 0.66,
"grad_norm": 14.834382057189941,
"learning_rate": 5.388888888888889e-06,
"loss": 0.2867,
"step": 2575
},
{
"epoch": 0.67,
"grad_norm": 7.231227874755859,
"learning_rate": 5.333333333333334e-06,
"loss": 0.2546,
"step": 2600
},
{
"epoch": 0.68,
"grad_norm": 9.39655876159668,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.2773,
"step": 2625
},
{
"epoch": 0.68,
"grad_norm": 13.442676544189453,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.2761,
"step": 2650
},
{
"epoch": 0.69,
"grad_norm": 8.369064331054688,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.2525,
"step": 2675
},
{
"epoch": 0.7,
"grad_norm": 10.61405086517334,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.2906,
"step": 2700
},
{
"epoch": 0.7,
"grad_norm": 11.205185890197754,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.2681,
"step": 2725
},
{
"epoch": 0.71,
"grad_norm": 7.688168048858643,
"learning_rate": 5e-06,
"loss": 0.2567,
"step": 2750
},
{
"epoch": 0.72,
"grad_norm": 9.2816162109375,
"learning_rate": 4.944444444444445e-06,
"loss": 0.2692,
"step": 2775
},
{
"epoch": 0.72,
"grad_norm": 11.392304420471191,
"learning_rate": 4.888888888888889e-06,
"loss": 0.2699,
"step": 2800
},
{
"epoch": 0.73,
"grad_norm": 7.2947211265563965,
"learning_rate": 4.833333333333333e-06,
"loss": 0.2547,
"step": 2825
},
{
"epoch": 0.73,
"grad_norm": 9.321547508239746,
"learning_rate": 4.777777777777778e-06,
"loss": 0.2542,
"step": 2850
},
{
"epoch": 0.74,
"grad_norm": 10.214452743530273,
"learning_rate": 4.722222222222222e-06,
"loss": 0.2629,
"step": 2875
},
{
"epoch": 0.75,
"grad_norm": 12.231490135192871,
"learning_rate": 4.666666666666667e-06,
"loss": 0.2816,
"step": 2900
},
{
"epoch": 0.75,
"grad_norm": 13.022787094116211,
"learning_rate": 4.611111111111112e-06,
"loss": 0.2688,
"step": 2925
},
{
"epoch": 0.76,
"grad_norm": 7.756204128265381,
"learning_rate": 4.555555555555556e-06,
"loss": 0.251,
"step": 2950
},
{
"epoch": 0.77,
"grad_norm": 5.8169145584106445,
"learning_rate": 4.5e-06,
"loss": 0.2485,
"step": 2975
},
{
"epoch": 0.77,
"grad_norm": 9.137019157409668,
"learning_rate": 4.444444444444444e-06,
"loss": 0.2804,
"step": 3000
},
{
"epoch": 0.77,
"eval_loss": 0.2576030194759369,
"eval_runtime": 2577.0627,
"eval_samples_per_second": 3.012,
"eval_steps_per_second": 0.377,
"eval_wer": 0.48732262799697523,
"step": 3000
},
{
"epoch": 0.78,
"grad_norm": 10.181511878967285,
"learning_rate": 4.388888888888889e-06,
"loss": 0.2726,
"step": 3025
},
{
"epoch": 0.79,
"grad_norm": 9.567985534667969,
"learning_rate": 4.333333333333334e-06,
"loss": 0.2648,
"step": 3050
},
{
"epoch": 0.79,
"grad_norm": 9.701679229736328,
"learning_rate": 4.277777777777778e-06,
"loss": 0.2331,
"step": 3075
},
{
"epoch": 0.8,
"grad_norm": 11.3832426071167,
"learning_rate": 4.222222222222223e-06,
"loss": 0.2313,
"step": 3100
},
{
"epoch": 0.81,
"grad_norm": 9.855692863464355,
"learning_rate": 4.166666666666667e-06,
"loss": 0.2713,
"step": 3125
},
{
"epoch": 0.81,
"grad_norm": 15.739023208618164,
"learning_rate": 4.111111111111111e-06,
"loss": 0.2597,
"step": 3150
},
{
"epoch": 0.82,
"grad_norm": 9.282655715942383,
"learning_rate": 4.055555555555556e-06,
"loss": 0.259,
"step": 3175
},
{
"epoch": 0.82,
"grad_norm": 7.786560535430908,
"learning_rate": 4.000000000000001e-06,
"loss": 0.2059,
"step": 3200
},
{
"epoch": 0.83,
"grad_norm": 10.460468292236328,
"learning_rate": 3.944444444444445e-06,
"loss": 0.2479,
"step": 3225
},
{
"epoch": 0.84,
"grad_norm": 6.100764751434326,
"learning_rate": 3.88888888888889e-06,
"loss": 0.2504,
"step": 3250
},
{
"epoch": 0.84,
"grad_norm": 8.303024291992188,
"learning_rate": 3.833333333333334e-06,
"loss": 0.2521,
"step": 3275
},
{
"epoch": 0.85,
"grad_norm": 11.755902290344238,
"learning_rate": 3.777777777777778e-06,
"loss": 0.2559,
"step": 3300
},
{
"epoch": 0.86,
"grad_norm": 6.604350566864014,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.2345,
"step": 3325
},
{
"epoch": 0.86,
"grad_norm": 10.639260292053223,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.2705,
"step": 3350
},
{
"epoch": 0.87,
"grad_norm": 11.366083145141602,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.2138,
"step": 3375
},
{
"epoch": 0.88,
"grad_norm": 8.11689281463623,
"learning_rate": 3.555555555555556e-06,
"loss": 0.2414,
"step": 3400
},
{
"epoch": 0.88,
"grad_norm": 9.813558578491211,
"learning_rate": 3.5e-06,
"loss": 0.2522,
"step": 3425
},
{
"epoch": 0.89,
"grad_norm": 9.525871276855469,
"learning_rate": 3.444444444444445e-06,
"loss": 0.2537,
"step": 3450
},
{
"epoch": 0.9,
"grad_norm": 9.531010627746582,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.2453,
"step": 3475
},
{
"epoch": 0.9,
"grad_norm": 12.289505958557129,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.2243,
"step": 3500
},
{
"epoch": 0.91,
"grad_norm": 7.548771858215332,
"learning_rate": 3.277777777777778e-06,
"loss": 0.2562,
"step": 3525
},
{
"epoch": 0.91,
"grad_norm": 10.303834915161133,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.2427,
"step": 3550
},
{
"epoch": 0.92,
"grad_norm": 6.49224328994751,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.2311,
"step": 3575
},
{
"epoch": 0.93,
"grad_norm": 7.009319305419922,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.2164,
"step": 3600
},
{
"epoch": 0.93,
"grad_norm": 6.967477798461914,
"learning_rate": 3.055555555555556e-06,
"loss": 0.2315,
"step": 3625
},
{
"epoch": 0.94,
"grad_norm": 8.28270149230957,
"learning_rate": 3e-06,
"loss": 0.2758,
"step": 3650
},
{
"epoch": 0.95,
"grad_norm": 8.78381633758545,
"learning_rate": 2.944444444444445e-06,
"loss": 0.2611,
"step": 3675
},
{
"epoch": 0.95,
"grad_norm": 7.542636394500732,
"learning_rate": 2.888888888888889e-06,
"loss": 0.2703,
"step": 3700
},
{
"epoch": 0.96,
"grad_norm": 13.231207847595215,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.2286,
"step": 3725
},
{
"epoch": 0.97,
"grad_norm": 11.00623607635498,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.2745,
"step": 3750
},
{
"epoch": 0.97,
"grad_norm": 16.842548370361328,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.2506,
"step": 3775
},
{
"epoch": 0.98,
"grad_norm": 15.040122032165527,
"learning_rate": 2.666666666666667e-06,
"loss": 0.2306,
"step": 3800
},
{
"epoch": 0.99,
"grad_norm": 4.292383193969727,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.2446,
"step": 3825
},
{
"epoch": 0.99,
"grad_norm": 7.2162065505981445,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.2368,
"step": 3850
},
{
"epoch": 1.0,
"grad_norm": 8.940288543701172,
"learning_rate": 2.5e-06,
"loss": 0.2478,
"step": 3875
},
{
"epoch": 1.0,
"grad_norm": 10.557374000549316,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.1443,
"step": 3900
},
{
"epoch": 1.01,
"grad_norm": 6.950507640838623,
"learning_rate": 2.388888888888889e-06,
"loss": 0.1581,
"step": 3925
},
{
"epoch": 1.02,
"grad_norm": 7.295450687408447,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.1544,
"step": 3950
},
{
"epoch": 1.02,
"grad_norm": 8.835938453674316,
"learning_rate": 2.277777777777778e-06,
"loss": 0.18,
"step": 3975
},
{
"epoch": 1.03,
"grad_norm": 8.587409019470215,
"learning_rate": 2.222222222222222e-06,
"loss": 0.1791,
"step": 4000
},
{
"epoch": 1.03,
"eval_loss": 0.2372235357761383,
"eval_runtime": 2357.7111,
"eval_samples_per_second": 3.292,
"eval_steps_per_second": 0.412,
"eval_wer": 0.47144255148792313,
"step": 4000
},
{
"epoch": 1.04,
"grad_norm": 5.195342540740967,
"learning_rate": 2.166666666666667e-06,
"loss": 0.1568,
"step": 4025
},
{
"epoch": 1.04,
"grad_norm": 5.360496520996094,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.1615,
"step": 4050
},
{
"epoch": 1.05,
"grad_norm": 4.146684646606445,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.1424,
"step": 4075
},
{
"epoch": 1.06,
"grad_norm": 8.721048355102539,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.1152,
"step": 4100
},
{
"epoch": 1.06,
"grad_norm": 6.823887348175049,
"learning_rate": 1.944444444444445e-06,
"loss": 0.1571,
"step": 4125
},
{
"epoch": 1.07,
"grad_norm": 7.744790554046631,
"learning_rate": 1.888888888888889e-06,
"loss": 0.1454,
"step": 4150
},
{
"epoch": 1.08,
"grad_norm": 8.660394668579102,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.1559,
"step": 4175
},
{
"epoch": 1.08,
"grad_norm": 6.369543552398682,
"learning_rate": 1.777777777777778e-06,
"loss": 0.1386,
"step": 4200
},
{
"epoch": 1.09,
"grad_norm": 6.839955806732178,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.149,
"step": 4225
},
{
"epoch": 1.1,
"grad_norm": 5.007684230804443,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.1547,
"step": 4250
},
{
"epoch": 1.1,
"grad_norm": 8.356603622436523,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.1625,
"step": 4275
},
{
"epoch": 1.11,
"grad_norm": 4.997384548187256,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.1568,
"step": 4300
},
{
"epoch": 1.11,
"grad_norm": 5.098149299621582,
"learning_rate": 1.5e-06,
"loss": 0.1621,
"step": 4325
},
{
"epoch": 1.12,
"grad_norm": 7.223897457122803,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.1408,
"step": 4350
},
{
"epoch": 1.13,
"grad_norm": 7.733861446380615,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.145,
"step": 4375
},
{
"epoch": 1.13,
"grad_norm": 10.624513626098633,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.1783,
"step": 4400
},
{
"epoch": 1.14,
"grad_norm": 6.46937370300293,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.1575,
"step": 4425
},
{
"epoch": 1.15,
"grad_norm": 6.646976947784424,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.1416,
"step": 4450
},
{
"epoch": 1.15,
"grad_norm": 9.44258975982666,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.136,
"step": 4475
},
{
"epoch": 1.16,
"grad_norm": 5.2407026290893555,
"learning_rate": 1.111111111111111e-06,
"loss": 0.1617,
"step": 4500
},
{
"epoch": 1.17,
"grad_norm": 6.014922142028809,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.144,
"step": 4525
},
{
"epoch": 1.17,
"grad_norm": 6.770383358001709,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.1655,
"step": 4550
},
{
"epoch": 1.18,
"grad_norm": 6.713871955871582,
"learning_rate": 9.444444444444445e-07,
"loss": 0.1619,
"step": 4575
},
{
"epoch": 1.19,
"grad_norm": 6.117084503173828,
"learning_rate": 8.88888888888889e-07,
"loss": 0.1577,
"step": 4600
},
{
"epoch": 1.19,
"grad_norm": 6.789885520935059,
"learning_rate": 8.333333333333333e-07,
"loss": 0.1503,
"step": 4625
},
{
"epoch": 1.2,
"grad_norm": 6.120697975158691,
"learning_rate": 7.777777777777779e-07,
"loss": 0.1752,
"step": 4650
},
{
"epoch": 1.2,
"grad_norm": 7.277804851531982,
"learning_rate": 7.222222222222222e-07,
"loss": 0.1331,
"step": 4675
},
{
"epoch": 1.21,
"grad_norm": 7.9337158203125,
"learning_rate": 6.666666666666667e-07,
"loss": 0.1495,
"step": 4700
},
{
"epoch": 1.22,
"grad_norm": 8.645359992980957,
"learning_rate": 6.111111111111112e-07,
"loss": 0.1416,
"step": 4725
},
{
"epoch": 1.22,
"grad_norm": 5.204892635345459,
"learning_rate": 5.555555555555555e-07,
"loss": 0.1363,
"step": 4750
},
{
"epoch": 1.23,
"grad_norm": 7.200200080871582,
"learning_rate": 5.000000000000001e-07,
"loss": 0.155,
"step": 4775
},
{
"epoch": 1.24,
"grad_norm": 3.2822766304016113,
"learning_rate": 4.444444444444445e-07,
"loss": 0.1556,
"step": 4800
},
{
"epoch": 1.24,
"grad_norm": 8.234893798828125,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.1577,
"step": 4825
},
{
"epoch": 1.25,
"grad_norm": 6.150988578796387,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.1666,
"step": 4850
},
{
"epoch": 1.26,
"grad_norm": 10.273090362548828,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.157,
"step": 4875
},
{
"epoch": 1.26,
"grad_norm": 6.537949562072754,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.1601,
"step": 4900
},
{
"epoch": 1.27,
"grad_norm": 8.629070281982422,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.1381,
"step": 4925
},
{
"epoch": 1.28,
"grad_norm": 9.110799789428711,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.1487,
"step": 4950
},
{
"epoch": 1.28,
"grad_norm": 3.492276191711426,
"learning_rate": 5.555555555555556e-08,
"loss": 0.1514,
"step": 4975
},
{
"epoch": 1.29,
"grad_norm": 8.911520004272461,
"learning_rate": 0.0,
"loss": 0.1529,
"step": 5000
},
{
"epoch": 1.29,
"eval_loss": 0.23124288022518158,
"eval_runtime": 2607.326,
"eval_samples_per_second": 2.977,
"eval_steps_per_second": 0.372,
"eval_wer": 0.46134513589253145,
"step": 5000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 1.154226167119872e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}