whisper-medium-pt-cv16 / trainer_state.json
fsicoli's picture
Upload 72 files
bb50e9e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.32156408772268313,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 58.440006256103516,
"learning_rate": 1e-08,
"loss": 1.3813,
"step": 25
},
{
"epoch": 0.0,
"grad_norm": 59.934757232666016,
"learning_rate": 2.2e-08,
"loss": 1.469,
"step": 50
},
{
"epoch": 0.0,
"grad_norm": 31.011018753051758,
"learning_rate": 3.4500000000000005e-08,
"loss": 1.2226,
"step": 75
},
{
"epoch": 0.01,
"grad_norm": 59.818233489990234,
"learning_rate": 4.7e-08,
"loss": 1.2458,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 60.51572036743164,
"learning_rate": 5.95e-08,
"loss": 1.2781,
"step": 125
},
{
"epoch": 0.01,
"grad_norm": 51.360103607177734,
"learning_rate": 7.2e-08,
"loss": 1.4055,
"step": 150
},
{
"epoch": 0.01,
"grad_norm": 73.35002136230469,
"learning_rate": 8.45e-08,
"loss": 1.3354,
"step": 175
},
{
"epoch": 0.01,
"grad_norm": 69.32823944091797,
"learning_rate": 9.7e-08,
"loss": 1.2005,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 51.02174377441406,
"learning_rate": 1.095e-07,
"loss": 1.3853,
"step": 225
},
{
"epoch": 0.02,
"grad_norm": 72.20179748535156,
"learning_rate": 1.2199999999999998e-07,
"loss": 1.4476,
"step": 250
},
{
"epoch": 0.02,
"grad_norm": 108.30382537841797,
"learning_rate": 1.345e-07,
"loss": 1.2339,
"step": 275
},
{
"epoch": 0.02,
"grad_norm": 66.15994262695312,
"learning_rate": 1.4699999999999998e-07,
"loss": 1.379,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 47.82923126220703,
"learning_rate": 1.595e-07,
"loss": 1.1467,
"step": 325
},
{
"epoch": 0.02,
"grad_norm": 85.7218246459961,
"learning_rate": 1.7199999999999998e-07,
"loss": 1.1622,
"step": 350
},
{
"epoch": 0.02,
"grad_norm": 68.25504302978516,
"learning_rate": 1.845e-07,
"loss": 1.1413,
"step": 375
},
{
"epoch": 0.03,
"grad_norm": 106.06077575683594,
"learning_rate": 1.97e-07,
"loss": 1.0855,
"step": 400
},
{
"epoch": 0.03,
"grad_norm": 79.60690307617188,
"learning_rate": 2.095e-07,
"loss": 0.929,
"step": 425
},
{
"epoch": 0.03,
"grad_norm": 42.14814376831055,
"learning_rate": 2.22e-07,
"loss": 0.8728,
"step": 450
},
{
"epoch": 0.03,
"grad_norm": 37.4913444519043,
"learning_rate": 2.3449999999999996e-07,
"loss": 0.6651,
"step": 475
},
{
"epoch": 0.03,
"grad_norm": 41.89991760253906,
"learning_rate": 2.47e-07,
"loss": 0.5875,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 75.21453094482422,
"learning_rate": 2.595e-07,
"loss": 0.6868,
"step": 525
},
{
"epoch": 0.04,
"grad_norm": 21.09180450439453,
"learning_rate": 2.72e-07,
"loss": 0.741,
"step": 550
},
{
"epoch": 0.04,
"grad_norm": 44.54707336425781,
"learning_rate": 2.845e-07,
"loss": 0.3898,
"step": 575
},
{
"epoch": 0.04,
"grad_norm": 31.656843185424805,
"learning_rate": 2.9699999999999997e-07,
"loss": 0.422,
"step": 600
},
{
"epoch": 0.04,
"grad_norm": 56.28642654418945,
"learning_rate": 3.0949999999999996e-07,
"loss": 0.3803,
"step": 625
},
{
"epoch": 0.04,
"grad_norm": 38.66410827636719,
"learning_rate": 3.22e-07,
"loss": 0.5062,
"step": 650
},
{
"epoch": 0.04,
"grad_norm": 31.183727264404297,
"learning_rate": 3.345e-07,
"loss": 0.4075,
"step": 675
},
{
"epoch": 0.05,
"grad_norm": 23.618703842163086,
"learning_rate": 3.4699999999999997e-07,
"loss": 0.3627,
"step": 700
},
{
"epoch": 0.05,
"grad_norm": 70.09487915039062,
"learning_rate": 3.5949999999999996e-07,
"loss": 0.3087,
"step": 725
},
{
"epoch": 0.05,
"grad_norm": 74.42188262939453,
"learning_rate": 3.72e-07,
"loss": 0.4021,
"step": 750
},
{
"epoch": 0.05,
"grad_norm": 44.99939727783203,
"learning_rate": 3.845e-07,
"loss": 0.3203,
"step": 775
},
{
"epoch": 0.05,
"grad_norm": 42.77998352050781,
"learning_rate": 3.97e-07,
"loss": 0.3797,
"step": 800
},
{
"epoch": 0.05,
"grad_norm": 64.61412811279297,
"learning_rate": 4.0949999999999995e-07,
"loss": 0.3403,
"step": 825
},
{
"epoch": 0.05,
"grad_norm": 29.286806106567383,
"learning_rate": 4.2199999999999994e-07,
"loss": 0.2879,
"step": 850
},
{
"epoch": 0.06,
"grad_norm": 58.146263122558594,
"learning_rate": 4.345e-07,
"loss": 0.4017,
"step": 875
},
{
"epoch": 0.06,
"grad_norm": 44.624202728271484,
"learning_rate": 4.4699999999999997e-07,
"loss": 0.3698,
"step": 900
},
{
"epoch": 0.06,
"grad_norm": 47.91656494140625,
"learning_rate": 4.595e-07,
"loss": 0.4008,
"step": 925
},
{
"epoch": 0.06,
"grad_norm": 36.263668060302734,
"learning_rate": 4.7199999999999994e-07,
"loss": 0.2041,
"step": 950
},
{
"epoch": 0.06,
"grad_norm": 12.398943901062012,
"learning_rate": 4.845e-07,
"loss": 0.2978,
"step": 975
},
{
"epoch": 0.06,
"grad_norm": 4.42283821105957,
"learning_rate": 4.97e-07,
"loss": 0.2614,
"step": 1000
},
{
"epoch": 0.06,
"eval_loss": 0.29864633083343506,
"eval_runtime": 7667.7674,
"eval_samples_per_second": 1.228,
"eval_steps_per_second": 0.614,
"eval_wer": 0.14664944291942517,
"step": 1000
},
{
"epoch": 0.07,
"grad_norm": 59.681270599365234,
"learning_rate": 5.095e-07,
"loss": 0.2588,
"step": 1025
},
{
"epoch": 0.07,
"grad_norm": 27.44911766052246,
"learning_rate": 5.22e-07,
"loss": 0.2839,
"step": 1050
},
{
"epoch": 0.07,
"grad_norm": 56.26525115966797,
"learning_rate": 5.344999999999999e-07,
"loss": 0.2329,
"step": 1075
},
{
"epoch": 0.07,
"grad_norm": 112.37168884277344,
"learning_rate": 5.47e-07,
"loss": 0.2746,
"step": 1100
},
{
"epoch": 0.07,
"grad_norm": 92.82706451416016,
"learning_rate": 5.595e-07,
"loss": 0.2621,
"step": 1125
},
{
"epoch": 0.07,
"grad_norm": 17.20562744140625,
"learning_rate": 5.719999999999999e-07,
"loss": 0.1826,
"step": 1150
},
{
"epoch": 0.08,
"grad_norm": 31.119354248046875,
"learning_rate": 5.845e-07,
"loss": 0.2487,
"step": 1175
},
{
"epoch": 0.08,
"grad_norm": 7.4088850021362305,
"learning_rate": 5.97e-07,
"loss": 0.297,
"step": 1200
},
{
"epoch": 0.08,
"grad_norm": 72.84540557861328,
"learning_rate": 6.095e-07,
"loss": 0.3108,
"step": 1225
},
{
"epoch": 0.08,
"grad_norm": 38.68337631225586,
"learning_rate": 6.219999999999999e-07,
"loss": 0.2819,
"step": 1250
},
{
"epoch": 0.08,
"grad_norm": 5.215000152587891,
"learning_rate": 6.344999999999999e-07,
"loss": 0.1953,
"step": 1275
},
{
"epoch": 0.08,
"grad_norm": 41.42685317993164,
"learning_rate": 6.47e-07,
"loss": 0.2333,
"step": 1300
},
{
"epoch": 0.09,
"grad_norm": 6.224233150482178,
"learning_rate": 6.595e-07,
"loss": 0.2213,
"step": 1325
},
{
"epoch": 0.09,
"grad_norm": 48.12126541137695,
"learning_rate": 6.72e-07,
"loss": 0.3121,
"step": 1350
},
{
"epoch": 0.09,
"grad_norm": 23.151887893676758,
"learning_rate": 6.845e-07,
"loss": 0.2076,
"step": 1375
},
{
"epoch": 0.09,
"grad_norm": 53.516395568847656,
"learning_rate": 6.97e-07,
"loss": 0.2835,
"step": 1400
},
{
"epoch": 0.09,
"grad_norm": 41.62558364868164,
"learning_rate": 7.094999999999999e-07,
"loss": 0.2574,
"step": 1425
},
{
"epoch": 0.09,
"grad_norm": 98.05493927001953,
"learning_rate": 7.219999999999999e-07,
"loss": 0.3034,
"step": 1450
},
{
"epoch": 0.09,
"grad_norm": 86.28963470458984,
"learning_rate": 7.345e-07,
"loss": 0.2657,
"step": 1475
},
{
"epoch": 0.1,
"grad_norm": 2.8914854526519775,
"learning_rate": 7.47e-07,
"loss": 0.2636,
"step": 1500
},
{
"epoch": 0.1,
"grad_norm": 56.13273239135742,
"learning_rate": 7.594999999999999e-07,
"loss": 0.1522,
"step": 1525
},
{
"epoch": 0.1,
"grad_norm": 12.941767692565918,
"learning_rate": 7.72e-07,
"loss": 0.2097,
"step": 1550
},
{
"epoch": 0.1,
"grad_norm": 13.613518714904785,
"learning_rate": 7.845e-07,
"loss": 0.2303,
"step": 1575
},
{
"epoch": 0.1,
"grad_norm": 23.761892318725586,
"learning_rate": 7.970000000000001e-07,
"loss": 0.2763,
"step": 1600
},
{
"epoch": 0.1,
"grad_norm": 31.896230697631836,
"learning_rate": 8.094999999999999e-07,
"loss": 0.2722,
"step": 1625
},
{
"epoch": 0.11,
"grad_norm": 51.43158721923828,
"learning_rate": 8.219999999999999e-07,
"loss": 0.2228,
"step": 1650
},
{
"epoch": 0.11,
"grad_norm": 2.879077196121216,
"learning_rate": 8.345e-07,
"loss": 0.2694,
"step": 1675
},
{
"epoch": 0.11,
"grad_norm": 98.36167907714844,
"learning_rate": 8.469999999999999e-07,
"loss": 0.3088,
"step": 1700
},
{
"epoch": 0.11,
"grad_norm": 16.084274291992188,
"learning_rate": 8.595e-07,
"loss": 0.1828,
"step": 1725
},
{
"epoch": 0.11,
"grad_norm": 5.136277675628662,
"learning_rate": 8.72e-07,
"loss": 0.1753,
"step": 1750
},
{
"epoch": 0.11,
"grad_norm": 64.78803253173828,
"learning_rate": 8.845e-07,
"loss": 0.198,
"step": 1775
},
{
"epoch": 0.12,
"grad_norm": 41.84619903564453,
"learning_rate": 8.969999999999999e-07,
"loss": 0.2894,
"step": 1800
},
{
"epoch": 0.12,
"grad_norm": 45.18673324584961,
"learning_rate": 9.094999999999999e-07,
"loss": 0.1844,
"step": 1825
},
{
"epoch": 0.12,
"grad_norm": 20.42123794555664,
"learning_rate": 9.22e-07,
"loss": 0.2576,
"step": 1850
},
{
"epoch": 0.12,
"grad_norm": 8.751657485961914,
"learning_rate": 9.344999999999999e-07,
"loss": 0.2492,
"step": 1875
},
{
"epoch": 0.12,
"grad_norm": 29.69828224182129,
"learning_rate": 9.469999999999999e-07,
"loss": 0.1479,
"step": 1900
},
{
"epoch": 0.12,
"grad_norm": 22.91360855102539,
"learning_rate": 9.594999999999999e-07,
"loss": 0.2164,
"step": 1925
},
{
"epoch": 0.13,
"grad_norm": 17.22205352783203,
"learning_rate": 9.72e-07,
"loss": 0.2276,
"step": 1950
},
{
"epoch": 0.13,
"grad_norm": 83.4366683959961,
"learning_rate": 9.845e-07,
"loss": 0.2717,
"step": 1975
},
{
"epoch": 0.13,
"grad_norm": 11.829337120056152,
"learning_rate": 9.97e-07,
"loss": 0.2632,
"step": 2000
},
{
"epoch": 0.13,
"eval_loss": 0.22439254820346832,
"eval_runtime": 7771.0673,
"eval_samples_per_second": 1.211,
"eval_steps_per_second": 0.606,
"eval_wer": 0.13156789924107865,
"step": 2000
},
{
"epoch": 0.13,
"grad_norm": 42.36271667480469,
"learning_rate": 9.936666666666667e-07,
"loss": 0.21,
"step": 2025
},
{
"epoch": 0.13,
"grad_norm": 57.45354461669922,
"learning_rate": 9.853333333333333e-07,
"loss": 0.2244,
"step": 2050
},
{
"epoch": 0.13,
"grad_norm": 17.302165985107422,
"learning_rate": 9.773333333333333e-07,
"loss": 0.2492,
"step": 2075
},
{
"epoch": 0.14,
"grad_norm": 41.777069091796875,
"learning_rate": 9.69e-07,
"loss": 0.1598,
"step": 2100
},
{
"epoch": 0.14,
"grad_norm": 37.14485549926758,
"learning_rate": 9.606666666666666e-07,
"loss": 0.2483,
"step": 2125
},
{
"epoch": 0.14,
"grad_norm": 53.22433090209961,
"learning_rate": 9.523333333333333e-07,
"loss": 0.1913,
"step": 2150
},
{
"epoch": 0.14,
"grad_norm": 78.79158782958984,
"learning_rate": 9.439999999999999e-07,
"loss": 0.3075,
"step": 2175
},
{
"epoch": 0.14,
"grad_norm": 2.1396484375,
"learning_rate": 9.356666666666666e-07,
"loss": 0.2427,
"step": 2200
},
{
"epoch": 0.14,
"grad_norm": 9.334494590759277,
"learning_rate": 9.273333333333333e-07,
"loss": 0.2201,
"step": 2225
},
{
"epoch": 0.14,
"grad_norm": 8.948480606079102,
"learning_rate": 9.19e-07,
"loss": 0.2047,
"step": 2250
},
{
"epoch": 0.15,
"grad_norm": 3.004768133163452,
"learning_rate": 9.106666666666666e-07,
"loss": 0.1928,
"step": 2275
},
{
"epoch": 0.15,
"grad_norm": 3.458395481109619,
"learning_rate": 9.023333333333333e-07,
"loss": 0.1788,
"step": 2300
},
{
"epoch": 0.15,
"grad_norm": 61.66895294189453,
"learning_rate": 8.939999999999999e-07,
"loss": 0.1959,
"step": 2325
},
{
"epoch": 0.15,
"grad_norm": 45.452789306640625,
"learning_rate": 8.856666666666666e-07,
"loss": 0.211,
"step": 2350
},
{
"epoch": 0.15,
"grad_norm": 18.07378578186035,
"learning_rate": 8.773333333333332e-07,
"loss": 0.2391,
"step": 2375
},
{
"epoch": 0.15,
"grad_norm": 46.68052291870117,
"learning_rate": 8.69e-07,
"loss": 0.1782,
"step": 2400
},
{
"epoch": 0.16,
"grad_norm": 5.451249599456787,
"learning_rate": 8.606666666666667e-07,
"loss": 0.1569,
"step": 2425
},
{
"epoch": 0.16,
"grad_norm": 1.6330296993255615,
"learning_rate": 8.523333333333334e-07,
"loss": 0.1381,
"step": 2450
},
{
"epoch": 0.16,
"grad_norm": 43.628761291503906,
"learning_rate": 8.439999999999999e-07,
"loss": 0.1943,
"step": 2475
},
{
"epoch": 0.16,
"grad_norm": 42.83442687988281,
"learning_rate": 8.356666666666666e-07,
"loss": 0.1937,
"step": 2500
},
{
"epoch": 0.16,
"grad_norm": 41.783485412597656,
"learning_rate": 8.273333333333333e-07,
"loss": 0.1738,
"step": 2525
},
{
"epoch": 0.16,
"grad_norm": 43.905025482177734,
"learning_rate": 8.189999999999999e-07,
"loss": 0.2068,
"step": 2550
},
{
"epoch": 0.17,
"grad_norm": 35.906982421875,
"learning_rate": 8.106666666666666e-07,
"loss": 0.3004,
"step": 2575
},
{
"epoch": 0.17,
"grad_norm": 75.37654113769531,
"learning_rate": 8.023333333333333e-07,
"loss": 0.2073,
"step": 2600
},
{
"epoch": 0.17,
"grad_norm": 0.4306688904762268,
"learning_rate": 7.94e-07,
"loss": 0.1722,
"step": 2625
},
{
"epoch": 0.17,
"grad_norm": 48.76789093017578,
"learning_rate": 7.856666666666665e-07,
"loss": 0.2808,
"step": 2650
},
{
"epoch": 0.17,
"grad_norm": 21.527475357055664,
"learning_rate": 7.773333333333333e-07,
"loss": 0.1416,
"step": 2675
},
{
"epoch": 0.17,
"grad_norm": 6.267962455749512,
"learning_rate": 7.69e-07,
"loss": 0.1522,
"step": 2700
},
{
"epoch": 0.18,
"grad_norm": 38.2205696105957,
"learning_rate": 7.606666666666667e-07,
"loss": 0.1988,
"step": 2725
},
{
"epoch": 0.18,
"grad_norm": 2.1994435787200928,
"learning_rate": 7.523333333333333e-07,
"loss": 0.2384,
"step": 2750
},
{
"epoch": 0.18,
"grad_norm": 21.002376556396484,
"learning_rate": 7.44e-07,
"loss": 0.198,
"step": 2775
},
{
"epoch": 0.18,
"grad_norm": 66.96015167236328,
"learning_rate": 7.356666666666667e-07,
"loss": 0.2185,
"step": 2800
},
{
"epoch": 0.18,
"grad_norm": 16.91470718383789,
"learning_rate": 7.273333333333333e-07,
"loss": 0.2149,
"step": 2825
},
{
"epoch": 0.18,
"grad_norm": 12.189261436462402,
"learning_rate": 7.189999999999999e-07,
"loss": 0.1907,
"step": 2850
},
{
"epoch": 0.18,
"grad_norm": 5.648806095123291,
"learning_rate": 7.106666666666666e-07,
"loss": 0.1634,
"step": 2875
},
{
"epoch": 0.19,
"grad_norm": 6.627074241638184,
"learning_rate": 7.023333333333333e-07,
"loss": 0.2275,
"step": 2900
},
{
"epoch": 0.19,
"grad_norm": 42.446556091308594,
"learning_rate": 6.939999999999999e-07,
"loss": 0.1888,
"step": 2925
},
{
"epoch": 0.19,
"grad_norm": 19.29751968383789,
"learning_rate": 6.856666666666667e-07,
"loss": 0.1603,
"step": 2950
},
{
"epoch": 0.19,
"grad_norm": 88.98928833007812,
"learning_rate": 6.773333333333334e-07,
"loss": 0.2295,
"step": 2975
},
{
"epoch": 0.19,
"grad_norm": 13.686836242675781,
"learning_rate": 6.69e-07,
"loss": 0.1694,
"step": 3000
},
{
"epoch": 0.19,
"eval_loss": 0.20859745144844055,
"eval_runtime": 7797.8116,
"eval_samples_per_second": 1.207,
"eval_steps_per_second": 0.604,
"eval_wer": 0.12344582593250444,
"step": 3000
},
{
"epoch": 0.19,
"grad_norm": 44.50759506225586,
"learning_rate": 6.606666666666666e-07,
"loss": 0.2145,
"step": 3025
},
{
"epoch": 0.2,
"grad_norm": 53.09928512573242,
"learning_rate": 6.523333333333333e-07,
"loss": 0.1721,
"step": 3050
},
{
"epoch": 0.2,
"grad_norm": 53.15538024902344,
"learning_rate": 6.44e-07,
"loss": 0.2002,
"step": 3075
},
{
"epoch": 0.2,
"grad_norm": 28.469669342041016,
"learning_rate": 6.356666666666667e-07,
"loss": 0.184,
"step": 3100
},
{
"epoch": 0.2,
"grad_norm": 63.475502014160156,
"learning_rate": 6.273333333333333e-07,
"loss": 0.1787,
"step": 3125
},
{
"epoch": 0.2,
"grad_norm": 38.70827865600586,
"learning_rate": 6.189999999999999e-07,
"loss": 0.1815,
"step": 3150
},
{
"epoch": 0.2,
"grad_norm": 48.54985809326172,
"learning_rate": 6.106666666666666e-07,
"loss": 0.1589,
"step": 3175
},
{
"epoch": 0.21,
"grad_norm": 25.11480140686035,
"learning_rate": 6.023333333333333e-07,
"loss": 0.1795,
"step": 3200
},
{
"epoch": 0.21,
"grad_norm": 17.55191421508789,
"learning_rate": 5.939999999999999e-07,
"loss": 0.1464,
"step": 3225
},
{
"epoch": 0.21,
"grad_norm": 53.10033416748047,
"learning_rate": 5.856666666666667e-07,
"loss": 0.2208,
"step": 3250
},
{
"epoch": 0.21,
"grad_norm": 1.9264570474624634,
"learning_rate": 5.773333333333334e-07,
"loss": 0.1538,
"step": 3275
},
{
"epoch": 0.21,
"grad_norm": 11.413477897644043,
"learning_rate": 5.69e-07,
"loss": 0.2038,
"step": 3300
},
{
"epoch": 0.21,
"grad_norm": 53.29780578613281,
"learning_rate": 5.606666666666666e-07,
"loss": 0.1312,
"step": 3325
},
{
"epoch": 0.22,
"grad_norm": 3.7282674312591553,
"learning_rate": 5.523333333333333e-07,
"loss": 0.2328,
"step": 3350
},
{
"epoch": 0.22,
"grad_norm": Infinity,
"learning_rate": 5.443333333333333e-07,
"loss": 0.2432,
"step": 3375
},
{
"epoch": 0.22,
"grad_norm": 22.73953628540039,
"learning_rate": 5.36e-07,
"loss": 0.2353,
"step": 3400
},
{
"epoch": 0.22,
"grad_norm": 19.394702911376953,
"learning_rate": 5.276666666666666e-07,
"loss": 0.0976,
"step": 3425
},
{
"epoch": 0.22,
"grad_norm": 1.5477691888809204,
"learning_rate": 5.193333333333332e-07,
"loss": 0.1921,
"step": 3450
},
{
"epoch": 0.22,
"grad_norm": 33.865806579589844,
"learning_rate": 5.11e-07,
"loss": 0.1957,
"step": 3475
},
{
"epoch": 0.23,
"grad_norm": 8.566771507263184,
"learning_rate": 5.026666666666667e-07,
"loss": 0.1601,
"step": 3500
},
{
"epoch": 0.23,
"grad_norm": 22.204965591430664,
"learning_rate": 4.943333333333333e-07,
"loss": 0.1894,
"step": 3525
},
{
"epoch": 0.23,
"grad_norm": 32.81788635253906,
"learning_rate": 4.86e-07,
"loss": 0.2235,
"step": 3550
},
{
"epoch": 0.23,
"grad_norm": 60.057193756103516,
"learning_rate": 4.776666666666667e-07,
"loss": 0.1714,
"step": 3575
},
{
"epoch": 0.23,
"grad_norm": 11.461939811706543,
"learning_rate": 4.693333333333333e-07,
"loss": 0.1757,
"step": 3600
},
{
"epoch": 0.23,
"grad_norm": 29.48383331298828,
"learning_rate": 4.61e-07,
"loss": 0.2581,
"step": 3625
},
{
"epoch": 0.23,
"grad_norm": 5.3872270584106445,
"learning_rate": 4.526666666666666e-07,
"loss": 0.1493,
"step": 3650
},
{
"epoch": 0.24,
"grad_norm": 24.588903427124023,
"learning_rate": 4.4433333333333333e-07,
"loss": 0.2003,
"step": 3675
},
{
"epoch": 0.24,
"grad_norm": 42.52607727050781,
"learning_rate": 4.36e-07,
"loss": 0.2212,
"step": 3700
},
{
"epoch": 0.24,
"grad_norm": 17.575077056884766,
"learning_rate": 4.2766666666666664e-07,
"loss": 0.1639,
"step": 3725
},
{
"epoch": 0.24,
"grad_norm": 77.39998626708984,
"learning_rate": 4.193333333333333e-07,
"loss": 0.2154,
"step": 3750
},
{
"epoch": 0.24,
"grad_norm": 65.65005493164062,
"learning_rate": 4.1099999999999996e-07,
"loss": 0.2396,
"step": 3775
},
{
"epoch": 0.24,
"grad_norm": 45.75455093383789,
"learning_rate": 4.0266666666666667e-07,
"loss": 0.159,
"step": 3800
},
{
"epoch": 0.25,
"grad_norm": 44.56821060180664,
"learning_rate": 3.943333333333333e-07,
"loss": 0.1935,
"step": 3825
},
{
"epoch": 0.25,
"grad_norm": 11.89593505859375,
"learning_rate": 3.86e-07,
"loss": 0.1649,
"step": 3850
},
{
"epoch": 0.25,
"grad_norm": 7.169790267944336,
"learning_rate": 3.7766666666666665e-07,
"loss": 0.1925,
"step": 3875
},
{
"epoch": 0.25,
"grad_norm": 141.48680114746094,
"learning_rate": 3.693333333333333e-07,
"loss": 0.1757,
"step": 3900
},
{
"epoch": 0.25,
"grad_norm": 1.0640227794647217,
"learning_rate": 3.6099999999999996e-07,
"loss": 0.1733,
"step": 3925
},
{
"epoch": 0.25,
"grad_norm": 6.541628360748291,
"learning_rate": 3.526666666666667e-07,
"loss": 0.1857,
"step": 3950
},
{
"epoch": 0.26,
"grad_norm": 62.5667610168457,
"learning_rate": 3.4433333333333333e-07,
"loss": 0.1946,
"step": 3975
},
{
"epoch": 0.26,
"grad_norm": 2.5611398220062256,
"learning_rate": 3.36e-07,
"loss": 0.1658,
"step": 4000
},
{
"epoch": 0.26,
"eval_loss": 0.19874949753284454,
"eval_runtime": 7779.7728,
"eval_samples_per_second": 1.21,
"eval_steps_per_second": 0.605,
"eval_wer": 0.12049087679638301,
"step": 4000
},
{
"epoch": 0.26,
"grad_norm": 17.723182678222656,
"learning_rate": 3.2766666666666665e-07,
"loss": 0.1771,
"step": 4025
},
{
"epoch": 0.26,
"grad_norm": 126.36833953857422,
"learning_rate": 3.1933333333333336e-07,
"loss": 0.1923,
"step": 4050
},
{
"epoch": 0.26,
"grad_norm": 37.344024658203125,
"learning_rate": 3.1099999999999997e-07,
"loss": 0.1657,
"step": 4075
},
{
"epoch": 0.26,
"grad_norm": 44.135009765625,
"learning_rate": 3.026666666666666e-07,
"loss": 0.2138,
"step": 4100
},
{
"epoch": 0.27,
"grad_norm": 35.51801300048828,
"learning_rate": 2.9433333333333334e-07,
"loss": 0.2213,
"step": 4125
},
{
"epoch": 0.27,
"grad_norm": 2.6660304069519043,
"learning_rate": 2.8599999999999994e-07,
"loss": 0.1414,
"step": 4150
},
{
"epoch": 0.27,
"grad_norm": 27.147336959838867,
"learning_rate": 2.7766666666666665e-07,
"loss": 0.2362,
"step": 4175
},
{
"epoch": 0.27,
"grad_norm": 44.95083999633789,
"learning_rate": 2.693333333333333e-07,
"loss": 0.1714,
"step": 4200
},
{
"epoch": 0.27,
"grad_norm": 58.55304718017578,
"learning_rate": 2.61e-07,
"loss": 0.2604,
"step": 4225
},
{
"epoch": 0.27,
"grad_norm": 16.97317123413086,
"learning_rate": 2.526666666666666e-07,
"loss": 0.1999,
"step": 4250
},
{
"epoch": 0.27,
"grad_norm": 53.99211502075195,
"learning_rate": 2.4433333333333334e-07,
"loss": 0.1567,
"step": 4275
},
{
"epoch": 0.28,
"grad_norm": 2.541445016860962,
"learning_rate": 2.3599999999999997e-07,
"loss": 0.1516,
"step": 4300
},
{
"epoch": 0.28,
"grad_norm": 10.404768943786621,
"learning_rate": 2.2766666666666665e-07,
"loss": 0.1302,
"step": 4325
},
{
"epoch": 0.28,
"grad_norm": 49.73779296875,
"learning_rate": 2.193333333333333e-07,
"loss": 0.1937,
"step": 4350
},
{
"epoch": 0.28,
"grad_norm": 28.959285736083984,
"learning_rate": 2.1099999999999997e-07,
"loss": 0.2463,
"step": 4375
},
{
"epoch": 0.28,
"grad_norm": 32.72858428955078,
"learning_rate": 2.0266666666666666e-07,
"loss": 0.1744,
"step": 4400
},
{
"epoch": 0.28,
"grad_norm": 27.81814193725586,
"learning_rate": 1.9433333333333331e-07,
"loss": 0.1937,
"step": 4425
},
{
"epoch": 0.29,
"grad_norm": 6.327540874481201,
"learning_rate": 1.86e-07,
"loss": 0.159,
"step": 4450
},
{
"epoch": 0.29,
"grad_norm": 16.851764678955078,
"learning_rate": 1.7766666666666666e-07,
"loss": 0.1587,
"step": 4475
},
{
"epoch": 0.29,
"grad_norm": 81.7389907836914,
"learning_rate": 1.6933333333333334e-07,
"loss": 0.1913,
"step": 4500
},
{
"epoch": 0.29,
"grad_norm": 114.85218811035156,
"learning_rate": 1.61e-07,
"loss": 0.2065,
"step": 4525
},
{
"epoch": 0.29,
"grad_norm": 41.08122253417969,
"learning_rate": 1.5266666666666666e-07,
"loss": 0.1874,
"step": 4550
},
{
"epoch": 0.29,
"grad_norm": 36.792579650878906,
"learning_rate": 1.4433333333333334e-07,
"loss": 0.2145,
"step": 4575
},
{
"epoch": 0.3,
"grad_norm": 2.39276123046875,
"learning_rate": 1.36e-07,
"loss": 0.1627,
"step": 4600
},
{
"epoch": 0.3,
"grad_norm": 29.183069229125977,
"learning_rate": 1.2766666666666668e-07,
"loss": 0.1903,
"step": 4625
},
{
"epoch": 0.3,
"grad_norm": 58.75885009765625,
"learning_rate": 1.1933333333333332e-07,
"loss": 0.2567,
"step": 4650
},
{
"epoch": 0.3,
"grad_norm": 27.792022705078125,
"learning_rate": 1.11e-07,
"loss": 0.1841,
"step": 4675
},
{
"epoch": 0.3,
"grad_norm": 36.91849899291992,
"learning_rate": 1.0266666666666666e-07,
"loss": 0.1458,
"step": 4700
},
{
"epoch": 0.3,
"grad_norm": 40.75591278076172,
"learning_rate": 9.433333333333333e-08,
"loss": 0.1731,
"step": 4725
},
{
"epoch": 0.31,
"grad_norm": 63.75225830078125,
"learning_rate": 8.599999999999999e-08,
"loss": 0.1597,
"step": 4750
},
{
"epoch": 0.31,
"grad_norm": 39.913543701171875,
"learning_rate": 7.766666666666666e-08,
"loss": 0.2853,
"step": 4775
},
{
"epoch": 0.31,
"grad_norm": 9.823902130126953,
"learning_rate": 6.933333333333333e-08,
"loss": 0.2302,
"step": 4800
},
{
"epoch": 0.31,
"grad_norm": 31.42165184020996,
"learning_rate": 6.099999999999999e-08,
"loss": 0.2028,
"step": 4825
},
{
"epoch": 0.31,
"grad_norm": 3.4078125953674316,
"learning_rate": 5.266666666666666e-08,
"loss": 0.1645,
"step": 4850
},
{
"epoch": 0.31,
"grad_norm": 10.65793514251709,
"learning_rate": 4.433333333333333e-08,
"loss": 0.2271,
"step": 4875
},
{
"epoch": 0.32,
"grad_norm": 16.98055648803711,
"learning_rate": 3.6e-08,
"loss": 0.1374,
"step": 4900
},
{
"epoch": 0.32,
"grad_norm": 5.180314064025879,
"learning_rate": 2.7666666666666663e-08,
"loss": 0.1134,
"step": 4925
},
{
"epoch": 0.32,
"grad_norm": 34.10147476196289,
"learning_rate": 1.9333333333333334e-08,
"loss": 0.1612,
"step": 4950
},
{
"epoch": 0.32,
"grad_norm": 39.326698303222656,
"learning_rate": 1.1e-08,
"loss": 0.1835,
"step": 4975
},
{
"epoch": 0.32,
"grad_norm": 22.33516502380371,
"learning_rate": 2.6666666666666666e-09,
"loss": 0.1391,
"step": 5000
},
{
"epoch": 0.32,
"eval_loss": 0.19753539562225342,
"eval_runtime": 7730.2125,
"eval_samples_per_second": 1.218,
"eval_steps_per_second": 0.609,
"eval_wer": 0.11905377038591959,
"step": 5000
},
{
"epoch": 0.32,
"step": 5000,
"total_flos": 1.0206049181106176e+19,
"train_loss": 0.32176169362068174,
"train_runtime": 49101.8688,
"train_samples_per_second": 0.204,
"train_steps_per_second": 0.102
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"total_flos": 1.0206049181106176e+19,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}