whisper-small-dialect_gulf / trainer_state.json
otozz's picture
Upload 9 files
7ff79aa verified
{
"best_metric": 0.7512307808831327,
"best_model_checkpoint": "./whisper-small-dialect_gulf/checkpoint-5000",
"epoch": 2.311604253351826,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 90.13593292236328,
"learning_rate": 5.000000000000001e-07,
"loss": 4.8868,
"step": 25
},
{
"epoch": 0.02,
"grad_norm": 33.31315231323242,
"learning_rate": 1.0000000000000002e-06,
"loss": 4.1622,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 35.78958511352539,
"learning_rate": 1.5e-06,
"loss": 3.3472,
"step": 75
},
{
"epoch": 0.05,
"grad_norm": 32.040348052978516,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.8814,
"step": 100
},
{
"epoch": 0.06,
"grad_norm": 31.268539428710938,
"learning_rate": 2.5e-06,
"loss": 2.4352,
"step": 125
},
{
"epoch": 0.07,
"grad_norm": 31.179664611816406,
"learning_rate": 3e-06,
"loss": 2.3021,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 23.714216232299805,
"learning_rate": 3.5e-06,
"loss": 2.0636,
"step": 175
},
{
"epoch": 0.09,
"grad_norm": 27.635934829711914,
"learning_rate": 4.000000000000001e-06,
"loss": 1.9784,
"step": 200
},
{
"epoch": 0.1,
"grad_norm": 23.310068130493164,
"learning_rate": 4.5e-06,
"loss": 2.0005,
"step": 225
},
{
"epoch": 0.12,
"grad_norm": 22.89619255065918,
"learning_rate": 5e-06,
"loss": 1.9184,
"step": 250
},
{
"epoch": 0.13,
"grad_norm": 25.67959976196289,
"learning_rate": 5.500000000000001e-06,
"loss": 1.8769,
"step": 275
},
{
"epoch": 0.14,
"grad_norm": 24.483964920043945,
"learning_rate": 6e-06,
"loss": 1.7286,
"step": 300
},
{
"epoch": 0.15,
"grad_norm": 27.483518600463867,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.762,
"step": 325
},
{
"epoch": 0.16,
"grad_norm": 26.084688186645508,
"learning_rate": 7e-06,
"loss": 1.5194,
"step": 350
},
{
"epoch": 0.17,
"grad_norm": 23.23436164855957,
"learning_rate": 7.500000000000001e-06,
"loss": 1.5613,
"step": 375
},
{
"epoch": 0.18,
"grad_norm": 27.090152740478516,
"learning_rate": 8.000000000000001e-06,
"loss": 1.3849,
"step": 400
},
{
"epoch": 0.2,
"grad_norm": 20.59851837158203,
"learning_rate": 8.5e-06,
"loss": 1.3407,
"step": 425
},
{
"epoch": 0.21,
"grad_norm": 21.621706008911133,
"learning_rate": 9e-06,
"loss": 1.4216,
"step": 450
},
{
"epoch": 0.22,
"grad_norm": 20.763568878173828,
"learning_rate": 9.5e-06,
"loss": 1.3252,
"step": 475
},
{
"epoch": 0.23,
"grad_norm": 23.10639762878418,
"learning_rate": 1e-05,
"loss": 1.2477,
"step": 500
},
{
"epoch": 0.24,
"grad_norm": 21.58993911743164,
"learning_rate": 9.944444444444445e-06,
"loss": 1.2779,
"step": 525
},
{
"epoch": 0.25,
"grad_norm": 25.219682693481445,
"learning_rate": 9.88888888888889e-06,
"loss": 1.4722,
"step": 550
},
{
"epoch": 0.27,
"grad_norm": 27.170644760131836,
"learning_rate": 9.833333333333333e-06,
"loss": 1.2436,
"step": 575
},
{
"epoch": 0.28,
"grad_norm": 21.273935317993164,
"learning_rate": 9.777777777777779e-06,
"loss": 1.219,
"step": 600
},
{
"epoch": 0.29,
"grad_norm": 28.816940307617188,
"learning_rate": 9.722222222222223e-06,
"loss": 1.4722,
"step": 625
},
{
"epoch": 0.3,
"grad_norm": 28.660903930664062,
"learning_rate": 9.666666666666667e-06,
"loss": 1.2638,
"step": 650
},
{
"epoch": 0.31,
"grad_norm": 22.813350677490234,
"learning_rate": 9.611111111111112e-06,
"loss": 1.4662,
"step": 675
},
{
"epoch": 0.32,
"grad_norm": 20.098535537719727,
"learning_rate": 9.555555555555556e-06,
"loss": 1.3016,
"step": 700
},
{
"epoch": 0.34,
"grad_norm": 23.036521911621094,
"learning_rate": 9.5e-06,
"loss": 1.3634,
"step": 725
},
{
"epoch": 0.35,
"grad_norm": 27.71617889404297,
"learning_rate": 9.444444444444445e-06,
"loss": 1.3038,
"step": 750
},
{
"epoch": 0.36,
"grad_norm": 22.73453140258789,
"learning_rate": 9.38888888888889e-06,
"loss": 1.2634,
"step": 775
},
{
"epoch": 0.37,
"grad_norm": 26.822202682495117,
"learning_rate": 9.333333333333334e-06,
"loss": 1.28,
"step": 800
},
{
"epoch": 0.38,
"grad_norm": 23.140613555908203,
"learning_rate": 9.277777777777778e-06,
"loss": 1.3511,
"step": 825
},
{
"epoch": 0.39,
"grad_norm": 25.16455078125,
"learning_rate": 9.222222222222224e-06,
"loss": 1.2653,
"step": 850
},
{
"epoch": 0.4,
"grad_norm": 26.37599754333496,
"learning_rate": 9.166666666666666e-06,
"loss": 1.3031,
"step": 875
},
{
"epoch": 0.42,
"grad_norm": 24.899930953979492,
"learning_rate": 9.111111111111112e-06,
"loss": 1.2706,
"step": 900
},
{
"epoch": 0.43,
"grad_norm": 22.268238067626953,
"learning_rate": 9.055555555555556e-06,
"loss": 1.3006,
"step": 925
},
{
"epoch": 0.44,
"grad_norm": 20.132190704345703,
"learning_rate": 9e-06,
"loss": 1.2915,
"step": 950
},
{
"epoch": 0.45,
"grad_norm": 21.949169158935547,
"learning_rate": 8.944444444444446e-06,
"loss": 1.2265,
"step": 975
},
{
"epoch": 0.46,
"grad_norm": 26.4661922454834,
"learning_rate": 8.888888888888888e-06,
"loss": 1.3905,
"step": 1000
},
{
"epoch": 0.46,
"eval_loss": 1.2462494373321533,
"eval_runtime": 1237.1982,
"eval_samples_per_second": 3.497,
"eval_steps_per_second": 0.437,
"eval_wer": 0.903961220934636,
"step": 1000
},
{
"epoch": 0.47,
"grad_norm": 20.229887008666992,
"learning_rate": 8.833333333333334e-06,
"loss": 1.2907,
"step": 1025
},
{
"epoch": 0.49,
"grad_norm": 22.81121253967285,
"learning_rate": 8.777777777777778e-06,
"loss": 1.3117,
"step": 1050
},
{
"epoch": 0.5,
"grad_norm": 23.232364654541016,
"learning_rate": 8.722222222222224e-06,
"loss": 1.2393,
"step": 1075
},
{
"epoch": 0.51,
"grad_norm": 19.813980102539062,
"learning_rate": 8.666666666666668e-06,
"loss": 1.253,
"step": 1100
},
{
"epoch": 0.52,
"grad_norm": 22.089860916137695,
"learning_rate": 8.611111111111112e-06,
"loss": 1.2862,
"step": 1125
},
{
"epoch": 0.53,
"grad_norm": 24.085248947143555,
"learning_rate": 8.555555555555556e-06,
"loss": 1.3403,
"step": 1150
},
{
"epoch": 0.54,
"grad_norm": 21.81845474243164,
"learning_rate": 8.5e-06,
"loss": 1.2885,
"step": 1175
},
{
"epoch": 0.55,
"grad_norm": 23.052608489990234,
"learning_rate": 8.444444444444446e-06,
"loss": 1.3301,
"step": 1200
},
{
"epoch": 0.57,
"grad_norm": 18.781269073486328,
"learning_rate": 8.38888888888889e-06,
"loss": 1.3634,
"step": 1225
},
{
"epoch": 0.58,
"grad_norm": 19.929943084716797,
"learning_rate": 8.333333333333334e-06,
"loss": 1.2226,
"step": 1250
},
{
"epoch": 0.59,
"grad_norm": 30.602449417114258,
"learning_rate": 8.277777777777778e-06,
"loss": 1.3299,
"step": 1275
},
{
"epoch": 0.6,
"grad_norm": 23.87131118774414,
"learning_rate": 8.222222222222222e-06,
"loss": 1.2976,
"step": 1300
},
{
"epoch": 0.61,
"grad_norm": 22.464336395263672,
"learning_rate": 8.166666666666668e-06,
"loss": 1.2414,
"step": 1325
},
{
"epoch": 0.62,
"grad_norm": 17.31785774230957,
"learning_rate": 8.111111111111112e-06,
"loss": 1.221,
"step": 1350
},
{
"epoch": 0.64,
"grad_norm": 26.171546936035156,
"learning_rate": 8.055555555555557e-06,
"loss": 1.2388,
"step": 1375
},
{
"epoch": 0.65,
"grad_norm": 19.773033142089844,
"learning_rate": 8.000000000000001e-06,
"loss": 1.1601,
"step": 1400
},
{
"epoch": 0.66,
"grad_norm": 31.665481567382812,
"learning_rate": 7.944444444444445e-06,
"loss": 1.3741,
"step": 1425
},
{
"epoch": 0.67,
"grad_norm": 24.492671966552734,
"learning_rate": 7.88888888888889e-06,
"loss": 1.2745,
"step": 1450
},
{
"epoch": 0.68,
"grad_norm": 24.3986873626709,
"learning_rate": 7.833333333333333e-06,
"loss": 1.2371,
"step": 1475
},
{
"epoch": 0.69,
"grad_norm": 22.881649017333984,
"learning_rate": 7.77777777777778e-06,
"loss": 1.254,
"step": 1500
},
{
"epoch": 0.71,
"grad_norm": 23.861434936523438,
"learning_rate": 7.722222222222223e-06,
"loss": 1.1979,
"step": 1525
},
{
"epoch": 0.72,
"grad_norm": 20.938926696777344,
"learning_rate": 7.666666666666667e-06,
"loss": 1.1873,
"step": 1550
},
{
"epoch": 0.73,
"grad_norm": 31.408279418945312,
"learning_rate": 7.611111111111111e-06,
"loss": 1.1919,
"step": 1575
},
{
"epoch": 0.74,
"grad_norm": 23.88611602783203,
"learning_rate": 7.555555555555556e-06,
"loss": 1.2749,
"step": 1600
},
{
"epoch": 0.75,
"grad_norm": 20.98951530456543,
"learning_rate": 7.500000000000001e-06,
"loss": 1.0798,
"step": 1625
},
{
"epoch": 0.76,
"grad_norm": 19.691837310791016,
"learning_rate": 7.444444444444445e-06,
"loss": 1.1863,
"step": 1650
},
{
"epoch": 0.77,
"grad_norm": 18.230724334716797,
"learning_rate": 7.38888888888889e-06,
"loss": 1.2122,
"step": 1675
},
{
"epoch": 0.79,
"grad_norm": 18.965030670166016,
"learning_rate": 7.333333333333333e-06,
"loss": 1.147,
"step": 1700
},
{
"epoch": 0.8,
"grad_norm": 26.649892807006836,
"learning_rate": 7.277777777777778e-06,
"loss": 1.3043,
"step": 1725
},
{
"epoch": 0.81,
"grad_norm": 19.97573471069336,
"learning_rate": 7.222222222222223e-06,
"loss": 1.18,
"step": 1750
},
{
"epoch": 0.82,
"grad_norm": 18.884883880615234,
"learning_rate": 7.166666666666667e-06,
"loss": 1.1741,
"step": 1775
},
{
"epoch": 0.83,
"grad_norm": 21.117107391357422,
"learning_rate": 7.111111111111112e-06,
"loss": 1.1664,
"step": 1800
},
{
"epoch": 0.84,
"grad_norm": 28.765390396118164,
"learning_rate": 7.055555555555557e-06,
"loss": 1.3056,
"step": 1825
},
{
"epoch": 0.86,
"grad_norm": 22.94930076599121,
"learning_rate": 7e-06,
"loss": 1.153,
"step": 1850
},
{
"epoch": 0.87,
"grad_norm": 22.228673934936523,
"learning_rate": 6.944444444444445e-06,
"loss": 1.1835,
"step": 1875
},
{
"epoch": 0.88,
"grad_norm": 27.348892211914062,
"learning_rate": 6.88888888888889e-06,
"loss": 1.2132,
"step": 1900
},
{
"epoch": 0.89,
"grad_norm": 19.449748992919922,
"learning_rate": 6.833333333333334e-06,
"loss": 1.2001,
"step": 1925
},
{
"epoch": 0.9,
"grad_norm": 23.61025047302246,
"learning_rate": 6.777777777777779e-06,
"loss": 1.0774,
"step": 1950
},
{
"epoch": 0.91,
"grad_norm": 24.239519119262695,
"learning_rate": 6.7222222222222235e-06,
"loss": 1.1935,
"step": 1975
},
{
"epoch": 0.92,
"grad_norm": 19.700180053710938,
"learning_rate": 6.666666666666667e-06,
"loss": 1.3338,
"step": 2000
},
{
"epoch": 0.92,
"eval_loss": 1.1501092910766602,
"eval_runtime": 1305.57,
"eval_samples_per_second": 3.313,
"eval_steps_per_second": 0.414,
"eval_wer": 0.902900855866091,
"step": 2000
},
{
"epoch": 0.94,
"grad_norm": 24.58094024658203,
"learning_rate": 6.6111111111111115e-06,
"loss": 1.1615,
"step": 2025
},
{
"epoch": 0.95,
"grad_norm": 30.994125366210938,
"learning_rate": 6.555555555555556e-06,
"loss": 1.2499,
"step": 2050
},
{
"epoch": 0.96,
"grad_norm": 22.892292022705078,
"learning_rate": 6.5000000000000004e-06,
"loss": 1.1866,
"step": 2075
},
{
"epoch": 0.97,
"grad_norm": 23.727901458740234,
"learning_rate": 6.444444444444445e-06,
"loss": 1.241,
"step": 2100
},
{
"epoch": 0.98,
"grad_norm": 22.032814025878906,
"learning_rate": 6.3888888888888885e-06,
"loss": 1.2042,
"step": 2125
},
{
"epoch": 0.99,
"grad_norm": 29.404563903808594,
"learning_rate": 6.333333333333333e-06,
"loss": 1.1603,
"step": 2150
},
{
"epoch": 1.01,
"grad_norm": 14.76396656036377,
"learning_rate": 6.277777777777778e-06,
"loss": 1.0143,
"step": 2175
},
{
"epoch": 1.02,
"grad_norm": 13.789732933044434,
"learning_rate": 6.222222222222223e-06,
"loss": 0.8707,
"step": 2200
},
{
"epoch": 1.03,
"grad_norm": 21.95280647277832,
"learning_rate": 6.166666666666667e-06,
"loss": 0.9127,
"step": 2225
},
{
"epoch": 1.04,
"grad_norm": 16.230443954467773,
"learning_rate": 6.111111111111112e-06,
"loss": 0.8598,
"step": 2250
},
{
"epoch": 1.05,
"grad_norm": 19.354671478271484,
"learning_rate": 6.055555555555555e-06,
"loss": 0.8387,
"step": 2275
},
{
"epoch": 1.06,
"grad_norm": 18.098474502563477,
"learning_rate": 6e-06,
"loss": 0.7913,
"step": 2300
},
{
"epoch": 1.07,
"grad_norm": 16.24185562133789,
"learning_rate": 5.944444444444445e-06,
"loss": 0.8831,
"step": 2325
},
{
"epoch": 1.09,
"grad_norm": 15.916351318359375,
"learning_rate": 5.88888888888889e-06,
"loss": 0.9592,
"step": 2350
},
{
"epoch": 1.1,
"grad_norm": 19.874614715576172,
"learning_rate": 5.833333333333334e-06,
"loss": 0.869,
"step": 2375
},
{
"epoch": 1.11,
"grad_norm": 14.805360794067383,
"learning_rate": 5.777777777777778e-06,
"loss": 0.7478,
"step": 2400
},
{
"epoch": 1.12,
"grad_norm": 24.80560874938965,
"learning_rate": 5.722222222222222e-06,
"loss": 0.9983,
"step": 2425
},
{
"epoch": 1.13,
"grad_norm": 14.018525123596191,
"learning_rate": 5.666666666666667e-06,
"loss": 0.9365,
"step": 2450
},
{
"epoch": 1.14,
"grad_norm": 16.925121307373047,
"learning_rate": 5.611111111111112e-06,
"loss": 0.8463,
"step": 2475
},
{
"epoch": 1.16,
"grad_norm": 12.009042739868164,
"learning_rate": 5.555555555555557e-06,
"loss": 0.7888,
"step": 2500
},
{
"epoch": 1.17,
"grad_norm": 17.96883773803711,
"learning_rate": 5.500000000000001e-06,
"loss": 1.0053,
"step": 2525
},
{
"epoch": 1.18,
"grad_norm": 26.6944522857666,
"learning_rate": 5.444444444444445e-06,
"loss": 0.9585,
"step": 2550
},
{
"epoch": 1.19,
"grad_norm": 17.153728485107422,
"learning_rate": 5.388888888888889e-06,
"loss": 0.9195,
"step": 2575
},
{
"epoch": 1.2,
"grad_norm": 18.12151527404785,
"learning_rate": 5.333333333333334e-06,
"loss": 0.8074,
"step": 2600
},
{
"epoch": 1.21,
"grad_norm": 15.750487327575684,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.9239,
"step": 2625
},
{
"epoch": 1.23,
"grad_norm": 17.816286087036133,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.9279,
"step": 2650
},
{
"epoch": 1.24,
"grad_norm": 21.26312255859375,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.8332,
"step": 2675
},
{
"epoch": 1.25,
"grad_norm": 13.23405647277832,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.8514,
"step": 2700
},
{
"epoch": 1.26,
"grad_norm": 14.837157249450684,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.8468,
"step": 2725
},
{
"epoch": 1.27,
"grad_norm": 16.646862030029297,
"learning_rate": 5e-06,
"loss": 0.8662,
"step": 2750
},
{
"epoch": 1.28,
"grad_norm": 18.599933624267578,
"learning_rate": 4.944444444444445e-06,
"loss": 1.0057,
"step": 2775
},
{
"epoch": 1.29,
"grad_norm": 18.52560043334961,
"learning_rate": 4.888888888888889e-06,
"loss": 0.8529,
"step": 2800
},
{
"epoch": 1.31,
"grad_norm": 23.720380783081055,
"learning_rate": 4.833333333333333e-06,
"loss": 0.9172,
"step": 2825
},
{
"epoch": 1.32,
"grad_norm": 16.967018127441406,
"learning_rate": 4.777777777777778e-06,
"loss": 0.8931,
"step": 2850
},
{
"epoch": 1.33,
"grad_norm": 20.544633865356445,
"learning_rate": 4.722222222222222e-06,
"loss": 0.8951,
"step": 2875
},
{
"epoch": 1.34,
"grad_norm": 24.084787368774414,
"learning_rate": 4.666666666666667e-06,
"loss": 0.8265,
"step": 2900
},
{
"epoch": 1.35,
"grad_norm": 23.94223976135254,
"learning_rate": 4.611111111111112e-06,
"loss": 0.9139,
"step": 2925
},
{
"epoch": 1.36,
"grad_norm": 19.657983779907227,
"learning_rate": 4.555555555555556e-06,
"loss": 0.8641,
"step": 2950
},
{
"epoch": 1.38,
"grad_norm": 21.78879737854004,
"learning_rate": 4.5e-06,
"loss": 0.9701,
"step": 2975
},
{
"epoch": 1.39,
"grad_norm": 15.626680374145508,
"learning_rate": 4.444444444444444e-06,
"loss": 0.9277,
"step": 3000
},
{
"epoch": 1.39,
"eval_loss": 1.1189059019088745,
"eval_runtime": 1297.5862,
"eval_samples_per_second": 3.334,
"eval_steps_per_second": 0.417,
"eval_wer": 0.818488222373703,
"step": 3000
},
{
"epoch": 1.4,
"grad_norm": 16.269102096557617,
"learning_rate": 4.388888888888889e-06,
"loss": 0.8269,
"step": 3025
},
{
"epoch": 1.41,
"grad_norm": 16.133930206298828,
"learning_rate": 4.333333333333334e-06,
"loss": 0.8246,
"step": 3050
},
{
"epoch": 1.42,
"grad_norm": 23.846628189086914,
"learning_rate": 4.277777777777778e-06,
"loss": 0.9221,
"step": 3075
},
{
"epoch": 1.43,
"grad_norm": 14.437520027160645,
"learning_rate": 4.222222222222223e-06,
"loss": 0.9663,
"step": 3100
},
{
"epoch": 1.44,
"grad_norm": 15.177382469177246,
"learning_rate": 4.166666666666667e-06,
"loss": 0.8925,
"step": 3125
},
{
"epoch": 1.46,
"grad_norm": 18.592479705810547,
"learning_rate": 4.111111111111111e-06,
"loss": 0.8471,
"step": 3150
},
{
"epoch": 1.47,
"grad_norm": 16.767436981201172,
"learning_rate": 4.055555555555556e-06,
"loss": 0.8792,
"step": 3175
},
{
"epoch": 1.48,
"grad_norm": 25.8040828704834,
"learning_rate": 4.000000000000001e-06,
"loss": 0.9201,
"step": 3200
},
{
"epoch": 1.49,
"grad_norm": 21.563386917114258,
"learning_rate": 3.944444444444445e-06,
"loss": 0.833,
"step": 3225
},
{
"epoch": 1.5,
"grad_norm": 18.131933212280273,
"learning_rate": 3.88888888888889e-06,
"loss": 0.8592,
"step": 3250
},
{
"epoch": 1.51,
"grad_norm": 19.301654815673828,
"learning_rate": 3.833333333333334e-06,
"loss": 0.9349,
"step": 3275
},
{
"epoch": 1.53,
"grad_norm": 21.040206909179688,
"learning_rate": 3.777777777777778e-06,
"loss": 0.8683,
"step": 3300
},
{
"epoch": 1.54,
"grad_norm": 17.75210952758789,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.8643,
"step": 3325
},
{
"epoch": 1.55,
"grad_norm": 15.362386703491211,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.8959,
"step": 3350
},
{
"epoch": 1.56,
"grad_norm": 17.963134765625,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.9468,
"step": 3375
},
{
"epoch": 1.57,
"grad_norm": 22.818912506103516,
"learning_rate": 3.555555555555556e-06,
"loss": 0.8257,
"step": 3400
},
{
"epoch": 1.58,
"grad_norm": 17.68602752685547,
"learning_rate": 3.5e-06,
"loss": 0.8878,
"step": 3425
},
{
"epoch": 1.6,
"grad_norm": 15.19880199432373,
"learning_rate": 3.444444444444445e-06,
"loss": 0.8924,
"step": 3450
},
{
"epoch": 1.61,
"grad_norm": 21.873252868652344,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.8991,
"step": 3475
},
{
"epoch": 1.62,
"grad_norm": 18.36728286743164,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.8458,
"step": 3500
},
{
"epoch": 1.63,
"grad_norm": 18.431533813476562,
"learning_rate": 3.277777777777778e-06,
"loss": 0.7883,
"step": 3525
},
{
"epoch": 1.64,
"grad_norm": 13.323966026306152,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.808,
"step": 3550
},
{
"epoch": 1.65,
"grad_norm": 20.740156173706055,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.8501,
"step": 3575
},
{
"epoch": 1.66,
"grad_norm": 17.27450180053711,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.862,
"step": 3600
},
{
"epoch": 1.68,
"grad_norm": 23.401592254638672,
"learning_rate": 3.055555555555556e-06,
"loss": 0.8988,
"step": 3625
},
{
"epoch": 1.69,
"grad_norm": 18.699295043945312,
"learning_rate": 3e-06,
"loss": 0.8809,
"step": 3650
},
{
"epoch": 1.7,
"grad_norm": 17.551536560058594,
"learning_rate": 2.944444444444445e-06,
"loss": 0.8908,
"step": 3675
},
{
"epoch": 1.71,
"grad_norm": 16.441694259643555,
"learning_rate": 2.888888888888889e-06,
"loss": 0.875,
"step": 3700
},
{
"epoch": 1.72,
"grad_norm": 27.084482192993164,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.9332,
"step": 3725
},
{
"epoch": 1.73,
"grad_norm": 15.829773902893066,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.8631,
"step": 3750
},
{
"epoch": 1.75,
"grad_norm": 17.43021583557129,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.835,
"step": 3775
},
{
"epoch": 1.76,
"grad_norm": 16.65917205810547,
"learning_rate": 2.666666666666667e-06,
"loss": 0.8623,
"step": 3800
},
{
"epoch": 1.77,
"grad_norm": 20.004430770874023,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.89,
"step": 3825
},
{
"epoch": 1.78,
"grad_norm": 19.59697151184082,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.9355,
"step": 3850
},
{
"epoch": 1.79,
"grad_norm": 15.853771209716797,
"learning_rate": 2.5e-06,
"loss": 0.8388,
"step": 3875
},
{
"epoch": 1.8,
"grad_norm": 18.28623390197754,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.866,
"step": 3900
},
{
"epoch": 1.81,
"grad_norm": 12.830389976501465,
"learning_rate": 2.388888888888889e-06,
"loss": 0.7384,
"step": 3925
},
{
"epoch": 1.83,
"grad_norm": 20.37066078186035,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.7555,
"step": 3950
},
{
"epoch": 1.84,
"grad_norm": 15.657346725463867,
"learning_rate": 2.277777777777778e-06,
"loss": 0.8481,
"step": 3975
},
{
"epoch": 1.85,
"grad_norm": 17.99932098388672,
"learning_rate": 2.222222222222222e-06,
"loss": 0.8347,
"step": 4000
},
{
"epoch": 1.85,
"eval_loss": 1.0929533243179321,
"eval_runtime": 1274.136,
"eval_samples_per_second": 3.395,
"eval_steps_per_second": 0.425,
"eval_wer": 0.8011058092857684,
"step": 4000
},
{
"epoch": 1.86,
"grad_norm": 18.715171813964844,
"learning_rate": 2.166666666666667e-06,
"loss": 0.9483,
"step": 4025
},
{
"epoch": 1.87,
"grad_norm": 14.89651870727539,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.829,
"step": 4050
},
{
"epoch": 1.88,
"grad_norm": 13.295916557312012,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.7805,
"step": 4075
},
{
"epoch": 1.9,
"grad_norm": 19.08588218688965,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.7778,
"step": 4100
},
{
"epoch": 1.91,
"grad_norm": 12.506399154663086,
"learning_rate": 1.944444444444445e-06,
"loss": 0.7863,
"step": 4125
},
{
"epoch": 1.92,
"grad_norm": 18.24089241027832,
"learning_rate": 1.888888888888889e-06,
"loss": 0.7969,
"step": 4150
},
{
"epoch": 1.93,
"grad_norm": 20.995338439941406,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.9124,
"step": 4175
},
{
"epoch": 1.94,
"grad_norm": 16.891035079956055,
"learning_rate": 1.777777777777778e-06,
"loss": 0.8769,
"step": 4200
},
{
"epoch": 1.95,
"grad_norm": 18.63488006591797,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.8952,
"step": 4225
},
{
"epoch": 1.96,
"grad_norm": 19.848390579223633,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.8361,
"step": 4250
},
{
"epoch": 1.98,
"grad_norm": 18.764118194580078,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.795,
"step": 4275
},
{
"epoch": 1.99,
"grad_norm": 20.843151092529297,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.9876,
"step": 4300
},
{
"epoch": 2.0,
"grad_norm": 19.12489891052246,
"learning_rate": 1.5e-06,
"loss": 0.8053,
"step": 4325
},
{
"epoch": 2.01,
"grad_norm": 14.411953926086426,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.5857,
"step": 4350
},
{
"epoch": 2.02,
"grad_norm": 21.73296546936035,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.7082,
"step": 4375
},
{
"epoch": 2.03,
"grad_norm": 18.120756149291992,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.6726,
"step": 4400
},
{
"epoch": 2.05,
"grad_norm": 12.758247375488281,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.6329,
"step": 4425
},
{
"epoch": 2.06,
"grad_norm": 10.172853469848633,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.5615,
"step": 4450
},
{
"epoch": 2.07,
"grad_norm": 18.662038803100586,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.5792,
"step": 4475
},
{
"epoch": 2.08,
"grad_norm": 12.439136505126953,
"learning_rate": 1.111111111111111e-06,
"loss": 0.7267,
"step": 4500
},
{
"epoch": 2.09,
"grad_norm": 16.754854202270508,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.6498,
"step": 4525
},
{
"epoch": 2.1,
"grad_norm": 16.47597312927246,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.6845,
"step": 4550
},
{
"epoch": 2.12,
"grad_norm": 16.60858154296875,
"learning_rate": 9.444444444444445e-07,
"loss": 0.5911,
"step": 4575
},
{
"epoch": 2.13,
"grad_norm": 12.026427268981934,
"learning_rate": 8.88888888888889e-07,
"loss": 0.5721,
"step": 4600
},
{
"epoch": 2.14,
"grad_norm": 18.53736686706543,
"learning_rate": 8.333333333333333e-07,
"loss": 0.66,
"step": 4625
},
{
"epoch": 2.15,
"grad_norm": 16.173511505126953,
"learning_rate": 7.777777777777779e-07,
"loss": 0.6813,
"step": 4650
},
{
"epoch": 2.16,
"grad_norm": 14.248098373413086,
"learning_rate": 7.222222222222222e-07,
"loss": 0.6902,
"step": 4675
},
{
"epoch": 2.17,
"grad_norm": 18.32253646850586,
"learning_rate": 6.666666666666667e-07,
"loss": 0.6995,
"step": 4700
},
{
"epoch": 2.18,
"grad_norm": 10.451536178588867,
"learning_rate": 6.111111111111112e-07,
"loss": 0.6376,
"step": 4725
},
{
"epoch": 2.2,
"grad_norm": 8.597134590148926,
"learning_rate": 5.555555555555555e-07,
"loss": 0.6471,
"step": 4750
},
{
"epoch": 2.21,
"grad_norm": 12.938069343566895,
"learning_rate": 5.000000000000001e-07,
"loss": 0.658,
"step": 4775
},
{
"epoch": 2.22,
"grad_norm": 12.209549903869629,
"learning_rate": 4.444444444444445e-07,
"loss": 0.636,
"step": 4800
},
{
"epoch": 2.23,
"grad_norm": 9.981619834899902,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.5447,
"step": 4825
},
{
"epoch": 2.24,
"grad_norm": 17.577558517456055,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.6757,
"step": 4850
},
{
"epoch": 2.25,
"grad_norm": 17.589126586914062,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.6394,
"step": 4875
},
{
"epoch": 2.27,
"grad_norm": 18.11832618713379,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.6869,
"step": 4900
},
{
"epoch": 2.28,
"grad_norm": 14.544137954711914,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.6432,
"step": 4925
},
{
"epoch": 2.29,
"grad_norm": 15.83905029296875,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.609,
"step": 4950
},
{
"epoch": 2.3,
"grad_norm": 19.050613403320312,
"learning_rate": 5.555555555555556e-08,
"loss": 0.6702,
"step": 4975
},
{
"epoch": 2.31,
"grad_norm": 17.937570571899414,
"learning_rate": 0.0,
"loss": 0.6948,
"step": 5000
},
{
"epoch": 2.31,
"eval_loss": 1.1064683198928833,
"eval_runtime": 1237.1908,
"eval_samples_per_second": 3.497,
"eval_steps_per_second": 0.437,
"eval_wer": 0.7512307808831327,
"step": 5000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"total_flos": 1.154283884199936e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}