DewiBrynJones's picture
End of training
8e161f7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.072135785007072,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03536067892503536,
"grad_norm": 15.248167037963867,
"learning_rate": 4.800000000000001e-07,
"loss": 1.6047,
"step": 25
},
{
"epoch": 0.07072135785007072,
"grad_norm": 11.99557876586914,
"learning_rate": 9.800000000000001e-07,
"loss": 0.7922,
"step": 50
},
{
"epoch": 0.10608203677510608,
"grad_norm": 10.12234878540039,
"learning_rate": 1.48e-06,
"loss": 0.6289,
"step": 75
},
{
"epoch": 0.14144271570014144,
"grad_norm": 10.784132957458496,
"learning_rate": 1.98e-06,
"loss": 0.5627,
"step": 100
},
{
"epoch": 0.1768033946251768,
"grad_norm": 10.604392051696777,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.563,
"step": 125
},
{
"epoch": 0.21216407355021216,
"grad_norm": 9.61458969116211,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.5218,
"step": 150
},
{
"epoch": 0.24752475247524752,
"grad_norm": 10.1797513961792,
"learning_rate": 3.48e-06,
"loss": 0.5316,
"step": 175
},
{
"epoch": 0.2828854314002829,
"grad_norm": 9.834571838378906,
"learning_rate": 3.980000000000001e-06,
"loss": 0.5232,
"step": 200
},
{
"epoch": 0.31824611032531824,
"grad_norm": 9.805900573730469,
"learning_rate": 4.48e-06,
"loss": 0.4902,
"step": 225
},
{
"epoch": 0.3536067892503536,
"grad_norm": 10.328847885131836,
"learning_rate": 4.980000000000001e-06,
"loss": 0.4885,
"step": 250
},
{
"epoch": 0.38896746817538896,
"grad_norm": 9.624419212341309,
"learning_rate": 5.480000000000001e-06,
"loss": 0.4615,
"step": 275
},
{
"epoch": 0.4243281471004243,
"grad_norm": 12.492682456970215,
"learning_rate": 5.98e-06,
"loss": 0.4366,
"step": 300
},
{
"epoch": 0.4596888260254597,
"grad_norm": 9.505845069885254,
"learning_rate": 6.480000000000001e-06,
"loss": 0.4758,
"step": 325
},
{
"epoch": 0.49504950495049505,
"grad_norm": 8.755816459655762,
"learning_rate": 6.98e-06,
"loss": 0.44,
"step": 350
},
{
"epoch": 0.5304101838755304,
"grad_norm": 9.016213417053223,
"learning_rate": 7.48e-06,
"loss": 0.4061,
"step": 375
},
{
"epoch": 0.5657708628005658,
"grad_norm": 9.07496166229248,
"learning_rate": 7.980000000000002e-06,
"loss": 0.4171,
"step": 400
},
{
"epoch": 0.6011315417256011,
"grad_norm": 7.877910137176514,
"learning_rate": 8.48e-06,
"loss": 0.4141,
"step": 425
},
{
"epoch": 0.6364922206506365,
"grad_norm": 8.071303367614746,
"learning_rate": 8.98e-06,
"loss": 0.3969,
"step": 450
},
{
"epoch": 0.6718528995756718,
"grad_norm": 8.29423713684082,
"learning_rate": 9.48e-06,
"loss": 0.3924,
"step": 475
},
{
"epoch": 0.7072135785007072,
"grad_norm": 8.465646743774414,
"learning_rate": 9.980000000000001e-06,
"loss": 0.4029,
"step": 500
},
{
"epoch": 0.7425742574257426,
"grad_norm": 7.924498081207275,
"learning_rate": 9.946666666666667e-06,
"loss": 0.379,
"step": 525
},
{
"epoch": 0.7779349363507779,
"grad_norm": 8.063438415527344,
"learning_rate": 9.891111111111113e-06,
"loss": 0.3814,
"step": 550
},
{
"epoch": 0.8132956152758133,
"grad_norm": 7.638169765472412,
"learning_rate": 9.835555555555556e-06,
"loss": 0.3476,
"step": 575
},
{
"epoch": 0.8486562942008486,
"grad_norm": 8.318132400512695,
"learning_rate": 9.780000000000001e-06,
"loss": 0.3499,
"step": 600
},
{
"epoch": 0.884016973125884,
"grad_norm": 7.153702735900879,
"learning_rate": 9.724444444444445e-06,
"loss": 0.3411,
"step": 625
},
{
"epoch": 0.9193776520509194,
"grad_norm": 5.998073101043701,
"learning_rate": 9.671111111111112e-06,
"loss": 0.3206,
"step": 650
},
{
"epoch": 0.9547383309759547,
"grad_norm": 7.312229156494141,
"learning_rate": 9.615555555555558e-06,
"loss": 0.3463,
"step": 675
},
{
"epoch": 0.9900990099009901,
"grad_norm": 6.737645626068115,
"learning_rate": 9.56e-06,
"loss": 0.3285,
"step": 700
},
{
"epoch": 1.0254596888260255,
"grad_norm": 6.451304912567139,
"learning_rate": 9.504444444444446e-06,
"loss": 0.2746,
"step": 725
},
{
"epoch": 1.0608203677510608,
"grad_norm": 5.666613578796387,
"learning_rate": 9.44888888888889e-06,
"loss": 0.2455,
"step": 750
},
{
"epoch": 1.0961810466760962,
"grad_norm": 6.165246963500977,
"learning_rate": 9.393333333333334e-06,
"loss": 0.2246,
"step": 775
},
{
"epoch": 1.1315417256011315,
"grad_norm": 5.314395904541016,
"learning_rate": 9.33777777777778e-06,
"loss": 0.2313,
"step": 800
},
{
"epoch": 1.166902404526167,
"grad_norm": 6.031562328338623,
"learning_rate": 9.282222222222222e-06,
"loss": 0.2445,
"step": 825
},
{
"epoch": 1.2022630834512023,
"grad_norm": 8.143284797668457,
"learning_rate": 9.226666666666668e-06,
"loss": 0.2248,
"step": 850
},
{
"epoch": 1.2376237623762376,
"grad_norm": 4.90247106552124,
"learning_rate": 9.171111111111112e-06,
"loss": 0.2296,
"step": 875
},
{
"epoch": 1.272984441301273,
"grad_norm": 5.334079265594482,
"learning_rate": 9.115555555555556e-06,
"loss": 0.2252,
"step": 900
},
{
"epoch": 1.3083451202263083,
"grad_norm": 6.497454643249512,
"learning_rate": 9.060000000000001e-06,
"loss": 0.2261,
"step": 925
},
{
"epoch": 1.3437057991513437,
"grad_norm": 7.067910671234131,
"learning_rate": 9.004444444444445e-06,
"loss": 0.2416,
"step": 950
},
{
"epoch": 1.379066478076379,
"grad_norm": 8.470537185668945,
"learning_rate": 8.94888888888889e-06,
"loss": 0.2219,
"step": 975
},
{
"epoch": 1.4144271570014144,
"grad_norm": 4.9936747550964355,
"learning_rate": 8.893333333333333e-06,
"loss": 0.2147,
"step": 1000
},
{
"epoch": 1.4144271570014144,
"eval_loss": 0.3066270351409912,
"eval_runtime": 1765.706,
"eval_samples_per_second": 3.048,
"eval_steps_per_second": 0.191,
"eval_wer": 0.23493975903614459,
"step": 1000
},
{
"epoch": 1.4497878359264498,
"grad_norm": 7.876005172729492,
"learning_rate": 8.83777777777778e-06,
"loss": 0.2032,
"step": 1025
},
{
"epoch": 1.4851485148514851,
"grad_norm": 6.1690826416015625,
"learning_rate": 8.782222222222223e-06,
"loss": 0.216,
"step": 1050
},
{
"epoch": 1.5205091937765205,
"grad_norm": 5.094773769378662,
"learning_rate": 8.726666666666667e-06,
"loss": 0.2189,
"step": 1075
},
{
"epoch": 1.5558698727015559,
"grad_norm": 6.5309834480285645,
"learning_rate": 8.671111111111113e-06,
"loss": 0.2188,
"step": 1100
},
{
"epoch": 1.5912305516265912,
"grad_norm": 7.060642242431641,
"learning_rate": 8.615555555555555e-06,
"loss": 0.2233,
"step": 1125
},
{
"epoch": 1.6265912305516266,
"grad_norm": 5.661276817321777,
"learning_rate": 8.560000000000001e-06,
"loss": 0.2124,
"step": 1150
},
{
"epoch": 1.661951909476662,
"grad_norm": 5.289186477661133,
"learning_rate": 8.504444444444445e-06,
"loss": 0.2022,
"step": 1175
},
{
"epoch": 1.6973125884016973,
"grad_norm": 4.7772064208984375,
"learning_rate": 8.448888888888889e-06,
"loss": 0.2018,
"step": 1200
},
{
"epoch": 1.7326732673267327,
"grad_norm": 5.6891303062438965,
"learning_rate": 8.393333333333335e-06,
"loss": 0.2059,
"step": 1225
},
{
"epoch": 1.768033946251768,
"grad_norm": 5.304312229156494,
"learning_rate": 8.337777777777777e-06,
"loss": 0.1949,
"step": 1250
},
{
"epoch": 1.8033946251768034,
"grad_norm": 5.472725868225098,
"learning_rate": 8.282222222222223e-06,
"loss": 0.21,
"step": 1275
},
{
"epoch": 1.8387553041018387,
"grad_norm": 4.937655448913574,
"learning_rate": 8.226666666666667e-06,
"loss": 0.2137,
"step": 1300
},
{
"epoch": 1.874115983026874,
"grad_norm": 6.363350868225098,
"learning_rate": 8.171111111111113e-06,
"loss": 0.1919,
"step": 1325
},
{
"epoch": 1.9094766619519095,
"grad_norm": 5.087652206420898,
"learning_rate": 8.115555555555557e-06,
"loss": 0.1783,
"step": 1350
},
{
"epoch": 1.9448373408769448,
"grad_norm": 4.039090633392334,
"learning_rate": 8.06e-06,
"loss": 0.1879,
"step": 1375
},
{
"epoch": 1.9801980198019802,
"grad_norm": 5.647103309631348,
"learning_rate": 8.004444444444445e-06,
"loss": 0.1904,
"step": 1400
},
{
"epoch": 2.0155586987270158,
"grad_norm": 3.8179798126220703,
"learning_rate": 7.948888888888889e-06,
"loss": 0.1572,
"step": 1425
},
{
"epoch": 2.050919377652051,
"grad_norm": 5.048496246337891,
"learning_rate": 7.893333333333335e-06,
"loss": 0.1087,
"step": 1450
},
{
"epoch": 2.0862800565770865,
"grad_norm": 3.450915813446045,
"learning_rate": 7.837777777777779e-06,
"loss": 0.1088,
"step": 1475
},
{
"epoch": 2.1216407355021216,
"grad_norm": 4.442863464355469,
"learning_rate": 7.782222222222223e-06,
"loss": 0.1049,
"step": 1500
},
{
"epoch": 2.157001414427157,
"grad_norm": 4.130264759063721,
"learning_rate": 7.726666666666667e-06,
"loss": 0.1118,
"step": 1525
},
{
"epoch": 2.1923620933521923,
"grad_norm": 3.8330652713775635,
"learning_rate": 7.67111111111111e-06,
"loss": 0.1068,
"step": 1550
},
{
"epoch": 2.227722772277228,
"grad_norm": 4.3345489501953125,
"learning_rate": 7.6155555555555564e-06,
"loss": 0.1133,
"step": 1575
},
{
"epoch": 2.263083451202263,
"grad_norm": 4.276752948760986,
"learning_rate": 7.5600000000000005e-06,
"loss": 0.1105,
"step": 1600
},
{
"epoch": 2.298444130127298,
"grad_norm": 3.8332560062408447,
"learning_rate": 7.504444444444445e-06,
"loss": 0.1011,
"step": 1625
},
{
"epoch": 2.333804809052334,
"grad_norm": 4.976287841796875,
"learning_rate": 7.44888888888889e-06,
"loss": 0.1151,
"step": 1650
},
{
"epoch": 2.3691654879773694,
"grad_norm": 3.8312933444976807,
"learning_rate": 7.393333333333333e-06,
"loss": 0.1124,
"step": 1675
},
{
"epoch": 2.4045261669024045,
"grad_norm": 4.4323248863220215,
"learning_rate": 7.337777777777778e-06,
"loss": 0.1148,
"step": 1700
},
{
"epoch": 2.4398868458274396,
"grad_norm": 3.492539882659912,
"learning_rate": 7.282222222222222e-06,
"loss": 0.112,
"step": 1725
},
{
"epoch": 2.4752475247524752,
"grad_norm": 4.846162796020508,
"learning_rate": 7.226666666666667e-06,
"loss": 0.1128,
"step": 1750
},
{
"epoch": 2.510608203677511,
"grad_norm": 5.806790828704834,
"learning_rate": 7.171111111111112e-06,
"loss": 0.1072,
"step": 1775
},
{
"epoch": 2.545968882602546,
"grad_norm": 3.3074755668640137,
"learning_rate": 7.115555555555557e-06,
"loss": 0.1067,
"step": 1800
},
{
"epoch": 2.581329561527581,
"grad_norm": 3.70501446723938,
"learning_rate": 7.06e-06,
"loss": 0.1016,
"step": 1825
},
{
"epoch": 2.6166902404526167,
"grad_norm": 2.6926796436309814,
"learning_rate": 7.004444444444445e-06,
"loss": 0.1018,
"step": 1850
},
{
"epoch": 2.6520509193776522,
"grad_norm": 4.052884578704834,
"learning_rate": 6.948888888888889e-06,
"loss": 0.1106,
"step": 1875
},
{
"epoch": 2.6874115983026874,
"grad_norm": 4.722410678863525,
"learning_rate": 6.893333333333334e-06,
"loss": 0.1133,
"step": 1900
},
{
"epoch": 2.7227722772277225,
"grad_norm": 4.194309711456299,
"learning_rate": 6.837777777777779e-06,
"loss": 0.1012,
"step": 1925
},
{
"epoch": 2.758132956152758,
"grad_norm": 3.4879953861236572,
"learning_rate": 6.782222222222222e-06,
"loss": 0.1091,
"step": 1950
},
{
"epoch": 2.7934936350777937,
"grad_norm": 2.7147629261016846,
"learning_rate": 6.726666666666667e-06,
"loss": 0.0909,
"step": 1975
},
{
"epoch": 2.828854314002829,
"grad_norm": 4.304833889007568,
"learning_rate": 6.671111111111112e-06,
"loss": 0.0989,
"step": 2000
},
{
"epoch": 2.828854314002829,
"eval_loss": 0.2774756848812103,
"eval_runtime": 1765.8044,
"eval_samples_per_second": 3.047,
"eval_steps_per_second": 0.191,
"eval_wer": 0.20720505785518312,
"step": 2000
},
{
"epoch": 2.864214992927864,
"grad_norm": 5.535168170928955,
"learning_rate": 6.615555555555556e-06,
"loss": 0.1188,
"step": 2025
},
{
"epoch": 2.8995756718528995,
"grad_norm": 5.256195068359375,
"learning_rate": 6.560000000000001e-06,
"loss": 0.097,
"step": 2050
},
{
"epoch": 2.934936350777935,
"grad_norm": 4.056241035461426,
"learning_rate": 6.504444444444446e-06,
"loss": 0.096,
"step": 2075
},
{
"epoch": 2.9702970297029703,
"grad_norm": 4.049830913543701,
"learning_rate": 6.448888888888889e-06,
"loss": 0.1031,
"step": 2100
},
{
"epoch": 3.005657708628006,
"grad_norm": 2.4076595306396484,
"learning_rate": 6.393333333333334e-06,
"loss": 0.0931,
"step": 2125
},
{
"epoch": 3.041018387553041,
"grad_norm": 1.8493136167526245,
"learning_rate": 6.3377777777777786e-06,
"loss": 0.0591,
"step": 2150
},
{
"epoch": 3.0763790664780766,
"grad_norm": 2.923140525817871,
"learning_rate": 6.282222222222223e-06,
"loss": 0.0498,
"step": 2175
},
{
"epoch": 3.1117397454031117,
"grad_norm": 3.3305506706237793,
"learning_rate": 6.2266666666666675e-06,
"loss": 0.0538,
"step": 2200
},
{
"epoch": 3.1471004243281473,
"grad_norm": 2.396880626678467,
"learning_rate": 6.171111111111112e-06,
"loss": 0.0496,
"step": 2225
},
{
"epoch": 3.1824611032531824,
"grad_norm": 3.4060046672821045,
"learning_rate": 6.1155555555555555e-06,
"loss": 0.0559,
"step": 2250
},
{
"epoch": 3.217821782178218,
"grad_norm": 4.2796478271484375,
"learning_rate": 6.0600000000000004e-06,
"loss": 0.0479,
"step": 2275
},
{
"epoch": 3.253182461103253,
"grad_norm": 3.096123456954956,
"learning_rate": 6.004444444444445e-06,
"loss": 0.0547,
"step": 2300
},
{
"epoch": 3.2885431400282887,
"grad_norm": 3.5626742839813232,
"learning_rate": 5.948888888888889e-06,
"loss": 0.058,
"step": 2325
},
{
"epoch": 3.323903818953324,
"grad_norm": 3.2983179092407227,
"learning_rate": 5.893333333333334e-06,
"loss": 0.0586,
"step": 2350
},
{
"epoch": 3.3592644978783595,
"grad_norm": 3.978823184967041,
"learning_rate": 5.837777777777777e-06,
"loss": 0.0554,
"step": 2375
},
{
"epoch": 3.3946251768033946,
"grad_norm": 3.5360333919525146,
"learning_rate": 5.782222222222222e-06,
"loss": 0.0549,
"step": 2400
},
{
"epoch": 3.42998585572843,
"grad_norm": 3.2348575592041016,
"learning_rate": 5.726666666666667e-06,
"loss": 0.0609,
"step": 2425
},
{
"epoch": 3.4653465346534653,
"grad_norm": 4.142767906188965,
"learning_rate": 5.671111111111112e-06,
"loss": 0.0615,
"step": 2450
},
{
"epoch": 3.500707213578501,
"grad_norm": 3.2305757999420166,
"learning_rate": 5.615555555555556e-06,
"loss": 0.0485,
"step": 2475
},
{
"epoch": 3.536067892503536,
"grad_norm": 2.9658920764923096,
"learning_rate": 5.560000000000001e-06,
"loss": 0.054,
"step": 2500
},
{
"epoch": 3.571428571428571,
"grad_norm": 3.1340267658233643,
"learning_rate": 5.504444444444444e-06,
"loss": 0.0633,
"step": 2525
},
{
"epoch": 3.6067892503536068,
"grad_norm": 2.692047119140625,
"learning_rate": 5.448888888888889e-06,
"loss": 0.0549,
"step": 2550
},
{
"epoch": 3.6421499292786423,
"grad_norm": 2.9761712551116943,
"learning_rate": 5.393333333333334e-06,
"loss": 0.0537,
"step": 2575
},
{
"epoch": 3.6775106082036775,
"grad_norm": 2.88071346282959,
"learning_rate": 5.337777777777779e-06,
"loss": 0.0497,
"step": 2600
},
{
"epoch": 3.7128712871287126,
"grad_norm": 3.6829402446746826,
"learning_rate": 5.282222222222223e-06,
"loss": 0.0609,
"step": 2625
},
{
"epoch": 3.748231966053748,
"grad_norm": 4.3834614753723145,
"learning_rate": 5.226666666666667e-06,
"loss": 0.0567,
"step": 2650
},
{
"epoch": 3.783592644978784,
"grad_norm": 2.6627509593963623,
"learning_rate": 5.171111111111111e-06,
"loss": 0.052,
"step": 2675
},
{
"epoch": 3.818953323903819,
"grad_norm": 3.6826014518737793,
"learning_rate": 5.115555555555556e-06,
"loss": 0.0529,
"step": 2700
},
{
"epoch": 3.854314002828854,
"grad_norm": 3.048292636871338,
"learning_rate": 5.060000000000001e-06,
"loss": 0.0617,
"step": 2725
},
{
"epoch": 3.8896746817538896,
"grad_norm": 2.5703887939453125,
"learning_rate": 5.004444444444445e-06,
"loss": 0.0507,
"step": 2750
},
{
"epoch": 3.9250353606789252,
"grad_norm": 2.8455772399902344,
"learning_rate": 4.94888888888889e-06,
"loss": 0.0562,
"step": 2775
},
{
"epoch": 3.9603960396039604,
"grad_norm": 2.147115468978882,
"learning_rate": 4.893333333333334e-06,
"loss": 0.0479,
"step": 2800
},
{
"epoch": 3.9957567185289955,
"grad_norm": 4.647951126098633,
"learning_rate": 4.837777777777778e-06,
"loss": 0.0555,
"step": 2825
},
{
"epoch": 4.0311173974540315,
"grad_norm": 1.5333168506622314,
"learning_rate": 4.7822222222222226e-06,
"loss": 0.0241,
"step": 2850
},
{
"epoch": 4.066478076379067,
"grad_norm": 2.611342668533325,
"learning_rate": 4.728888888888889e-06,
"loss": 0.0297,
"step": 2875
},
{
"epoch": 4.101838755304102,
"grad_norm": 1.1682252883911133,
"learning_rate": 4.673333333333333e-06,
"loss": 0.028,
"step": 2900
},
{
"epoch": 4.137199434229137,
"grad_norm": 1.971526861190796,
"learning_rate": 4.617777777777778e-06,
"loss": 0.0256,
"step": 2925
},
{
"epoch": 4.172560113154173,
"grad_norm": 2.9451370239257812,
"learning_rate": 4.562222222222222e-06,
"loss": 0.0248,
"step": 2950
},
{
"epoch": 4.207920792079208,
"grad_norm": 2.203016757965088,
"learning_rate": 4.506666666666667e-06,
"loss": 0.0254,
"step": 2975
},
{
"epoch": 4.243281471004243,
"grad_norm": 3.0037150382995605,
"learning_rate": 4.451111111111112e-06,
"loss": 0.0295,
"step": 3000
},
{
"epoch": 4.243281471004243,
"eval_loss": 0.29346707463264465,
"eval_runtime": 1760.8686,
"eval_samples_per_second": 3.056,
"eval_steps_per_second": 0.191,
"eval_wer": 0.1918962980635413,
"step": 3000
},
{
"epoch": 4.278642149929278,
"grad_norm": 2.936363697052002,
"learning_rate": 4.395555555555556e-06,
"loss": 0.0235,
"step": 3025
},
{
"epoch": 4.314002828854314,
"grad_norm": 1.909324288368225,
"learning_rate": 4.34e-06,
"loss": 0.027,
"step": 3050
},
{
"epoch": 4.3493635077793495,
"grad_norm": 1.8977993726730347,
"learning_rate": 4.284444444444445e-06,
"loss": 0.0267,
"step": 3075
},
{
"epoch": 4.384724186704385,
"grad_norm": 2.092729330062866,
"learning_rate": 4.228888888888889e-06,
"loss": 0.0277,
"step": 3100
},
{
"epoch": 4.42008486562942,
"grad_norm": 2.539109945297241,
"learning_rate": 4.173333333333334e-06,
"loss": 0.0232,
"step": 3125
},
{
"epoch": 4.455445544554456,
"grad_norm": 1.8122098445892334,
"learning_rate": 4.117777777777779e-06,
"loss": 0.0276,
"step": 3150
},
{
"epoch": 4.490806223479491,
"grad_norm": 1.4347445964813232,
"learning_rate": 4.062222222222223e-06,
"loss": 0.021,
"step": 3175
},
{
"epoch": 4.526166902404526,
"grad_norm": 2.03843355178833,
"learning_rate": 4.006666666666667e-06,
"loss": 0.0278,
"step": 3200
},
{
"epoch": 4.561527581329561,
"grad_norm": 2.6143271923065186,
"learning_rate": 3.951111111111112e-06,
"loss": 0.0316,
"step": 3225
},
{
"epoch": 4.596888260254596,
"grad_norm": 2.1741061210632324,
"learning_rate": 3.895555555555556e-06,
"loss": 0.0277,
"step": 3250
},
{
"epoch": 4.632248939179632,
"grad_norm": 3.5832366943359375,
"learning_rate": 3.8400000000000005e-06,
"loss": 0.025,
"step": 3275
},
{
"epoch": 4.667609618104668,
"grad_norm": 2.232801675796509,
"learning_rate": 3.784444444444445e-06,
"loss": 0.0282,
"step": 3300
},
{
"epoch": 4.702970297029703,
"grad_norm": 2.736708641052246,
"learning_rate": 3.728888888888889e-06,
"loss": 0.0308,
"step": 3325
},
{
"epoch": 4.738330975954739,
"grad_norm": 2.0116610527038574,
"learning_rate": 3.673333333333334e-06,
"loss": 0.0305,
"step": 3350
},
{
"epoch": 4.773691654879774,
"grad_norm": 2.6088578701019287,
"learning_rate": 3.617777777777778e-06,
"loss": 0.025,
"step": 3375
},
{
"epoch": 4.809052333804809,
"grad_norm": 1.400682806968689,
"learning_rate": 3.5622222222222224e-06,
"loss": 0.0248,
"step": 3400
},
{
"epoch": 4.844413012729844,
"grad_norm": 1.956292748451233,
"learning_rate": 3.5066666666666673e-06,
"loss": 0.0258,
"step": 3425
},
{
"epoch": 4.879773691654879,
"grad_norm": 2.2373459339141846,
"learning_rate": 3.4511111111111113e-06,
"loss": 0.0229,
"step": 3450
},
{
"epoch": 4.915134370579915,
"grad_norm": 2.9303152561187744,
"learning_rate": 3.3955555555555558e-06,
"loss": 0.0255,
"step": 3475
},
{
"epoch": 4.9504950495049505,
"grad_norm": 2.3725924491882324,
"learning_rate": 3.3400000000000006e-06,
"loss": 0.0289,
"step": 3500
},
{
"epoch": 4.985855728429986,
"grad_norm": 2.325536012649536,
"learning_rate": 3.2844444444444447e-06,
"loss": 0.0277,
"step": 3525
},
{
"epoch": 5.021216407355022,
"grad_norm": 3.026657819747925,
"learning_rate": 3.228888888888889e-06,
"loss": 0.0184,
"step": 3550
},
{
"epoch": 5.056577086280057,
"grad_norm": 1.5165703296661377,
"learning_rate": 3.173333333333334e-06,
"loss": 0.0148,
"step": 3575
},
{
"epoch": 5.091937765205092,
"grad_norm": 1.2629361152648926,
"learning_rate": 3.117777777777778e-06,
"loss": 0.0104,
"step": 3600
},
{
"epoch": 5.127298444130127,
"grad_norm": 1.312657117843628,
"learning_rate": 3.0622222222222225e-06,
"loss": 0.0132,
"step": 3625
},
{
"epoch": 5.162659123055163,
"grad_norm": 1.1759546995162964,
"learning_rate": 3.0066666666666674e-06,
"loss": 0.0096,
"step": 3650
},
{
"epoch": 5.198019801980198,
"grad_norm": 0.8515748381614685,
"learning_rate": 2.9511111111111114e-06,
"loss": 0.0125,
"step": 3675
},
{
"epoch": 5.233380480905233,
"grad_norm": 1.0795273780822754,
"learning_rate": 2.895555555555556e-06,
"loss": 0.0134,
"step": 3700
},
{
"epoch": 5.2687411598302685,
"grad_norm": 1.2719004154205322,
"learning_rate": 2.84e-06,
"loss": 0.0104,
"step": 3725
},
{
"epoch": 5.3041018387553045,
"grad_norm": 0.8354203701019287,
"learning_rate": 2.784444444444445e-06,
"loss": 0.0106,
"step": 3750
},
{
"epoch": 5.33946251768034,
"grad_norm": 2.3398609161376953,
"learning_rate": 2.7288888888888893e-06,
"loss": 0.0126,
"step": 3775
},
{
"epoch": 5.374823196605375,
"grad_norm": 2.567042350769043,
"learning_rate": 2.6733333333333333e-06,
"loss": 0.0109,
"step": 3800
},
{
"epoch": 5.41018387553041,
"grad_norm": 1.6744190454483032,
"learning_rate": 2.617777777777778e-06,
"loss": 0.0117,
"step": 3825
},
{
"epoch": 5.445544554455446,
"grad_norm": 1.0200034379959106,
"learning_rate": 2.5622222222222226e-06,
"loss": 0.0118,
"step": 3850
},
{
"epoch": 5.480905233380481,
"grad_norm": 0.8212645053863525,
"learning_rate": 2.5066666666666667e-06,
"loss": 0.0106,
"step": 3875
},
{
"epoch": 5.516265912305516,
"grad_norm": 0.9394924640655518,
"learning_rate": 2.451111111111111e-06,
"loss": 0.0103,
"step": 3900
},
{
"epoch": 5.551626591230551,
"grad_norm": 2.0436089038848877,
"learning_rate": 2.3955555555555556e-06,
"loss": 0.0177,
"step": 3925
},
{
"epoch": 5.586987270155587,
"grad_norm": 1.194536566734314,
"learning_rate": 2.3400000000000005e-06,
"loss": 0.0105,
"step": 3950
},
{
"epoch": 5.6223479490806225,
"grad_norm": 1.526948094367981,
"learning_rate": 2.2844444444444445e-06,
"loss": 0.0097,
"step": 3975
},
{
"epoch": 5.657708628005658,
"grad_norm": 1.8187007904052734,
"learning_rate": 2.228888888888889e-06,
"loss": 0.0109,
"step": 4000
},
{
"epoch": 5.657708628005658,
"eval_loss": 0.30111825466156006,
"eval_runtime": 1778.4921,
"eval_samples_per_second": 3.026,
"eval_steps_per_second": 0.189,
"eval_wer": 0.18279056821344786,
"step": 4000
},
{
"epoch": 5.693069306930693,
"grad_norm": 2.2238926887512207,
"learning_rate": 2.1733333333333334e-06,
"loss": 0.0096,
"step": 4025
},
{
"epoch": 5.728429985855728,
"grad_norm": 1.989168405532837,
"learning_rate": 2.117777777777778e-06,
"loss": 0.0136,
"step": 4050
},
{
"epoch": 5.763790664780764,
"grad_norm": 1.6722816228866577,
"learning_rate": 2.0622222222222223e-06,
"loss": 0.0107,
"step": 4075
},
{
"epoch": 5.799151343705799,
"grad_norm": 1.0947668552398682,
"learning_rate": 2.006666666666667e-06,
"loss": 0.0083,
"step": 4100
},
{
"epoch": 5.834512022630834,
"grad_norm": 0.5356242656707764,
"learning_rate": 1.9511111111111113e-06,
"loss": 0.0108,
"step": 4125
},
{
"epoch": 5.86987270155587,
"grad_norm": 2.1374714374542236,
"learning_rate": 1.8955555555555557e-06,
"loss": 0.0107,
"step": 4150
},
{
"epoch": 5.905233380480905,
"grad_norm": 0.9695949554443359,
"learning_rate": 1.8400000000000002e-06,
"loss": 0.0103,
"step": 4175
},
{
"epoch": 5.9405940594059405,
"grad_norm": 1.3653473854064941,
"learning_rate": 1.7844444444444444e-06,
"loss": 0.0116,
"step": 4200
},
{
"epoch": 5.975954738330976,
"grad_norm": 1.5049793720245361,
"learning_rate": 1.728888888888889e-06,
"loss": 0.0129,
"step": 4225
},
{
"epoch": 6.011315417256012,
"grad_norm": 2.540705442428589,
"learning_rate": 1.6733333333333335e-06,
"loss": 0.0085,
"step": 4250
},
{
"epoch": 6.046676096181047,
"grad_norm": 0.5889933705329895,
"learning_rate": 1.6177777777777778e-06,
"loss": 0.0043,
"step": 4275
},
{
"epoch": 6.082036775106082,
"grad_norm": 1.6074843406677246,
"learning_rate": 1.5622222222222225e-06,
"loss": 0.006,
"step": 4300
},
{
"epoch": 6.117397454031117,
"grad_norm": 0.8578146696090698,
"learning_rate": 1.506666666666667e-06,
"loss": 0.0054,
"step": 4325
},
{
"epoch": 6.152758132956153,
"grad_norm": 0.6143340468406677,
"learning_rate": 1.4511111111111112e-06,
"loss": 0.0043,
"step": 4350
},
{
"epoch": 6.188118811881188,
"grad_norm": 0.3723517656326294,
"learning_rate": 1.3955555555555556e-06,
"loss": 0.0034,
"step": 4375
},
{
"epoch": 6.223479490806223,
"grad_norm": 1.0041955709457397,
"learning_rate": 1.34e-06,
"loss": 0.0059,
"step": 4400
},
{
"epoch": 6.258840169731259,
"grad_norm": 1.2664568424224854,
"learning_rate": 1.2844444444444445e-06,
"loss": 0.0039,
"step": 4425
},
{
"epoch": 6.294200848656295,
"grad_norm": 0.259546160697937,
"learning_rate": 1.228888888888889e-06,
"loss": 0.0051,
"step": 4450
},
{
"epoch": 6.32956152758133,
"grad_norm": 0.9940450191497803,
"learning_rate": 1.1733333333333335e-06,
"loss": 0.0027,
"step": 4475
},
{
"epoch": 6.364922206506365,
"grad_norm": 0.23329602181911469,
"learning_rate": 1.117777777777778e-06,
"loss": 0.0038,
"step": 4500
},
{
"epoch": 6.4002828854314,
"grad_norm": 0.12971219420433044,
"learning_rate": 1.0622222222222222e-06,
"loss": 0.0035,
"step": 4525
},
{
"epoch": 6.435643564356436,
"grad_norm": 0.4987533688545227,
"learning_rate": 1.0066666666666668e-06,
"loss": 0.0081,
"step": 4550
},
{
"epoch": 6.471004243281471,
"grad_norm": 0.27808260917663574,
"learning_rate": 9.511111111111111e-07,
"loss": 0.0034,
"step": 4575
},
{
"epoch": 6.506364922206506,
"grad_norm": 0.23258398473262787,
"learning_rate": 8.955555555555557e-07,
"loss": 0.0033,
"step": 4600
},
{
"epoch": 6.5417256011315414,
"grad_norm": 0.3250856101512909,
"learning_rate": 8.400000000000001e-07,
"loss": 0.0038,
"step": 4625
},
{
"epoch": 6.5770862800565775,
"grad_norm": 0.5452969074249268,
"learning_rate": 7.844444444444445e-07,
"loss": 0.0033,
"step": 4650
},
{
"epoch": 6.612446958981613,
"grad_norm": 0.22152584791183472,
"learning_rate": 7.28888888888889e-07,
"loss": 0.0067,
"step": 4675
},
{
"epoch": 6.647807637906648,
"grad_norm": 1.5061694383621216,
"learning_rate": 6.733333333333334e-07,
"loss": 0.0081,
"step": 4700
},
{
"epoch": 6.683168316831683,
"grad_norm": 0.2032124400138855,
"learning_rate": 6.177777777777778e-07,
"loss": 0.0042,
"step": 4725
},
{
"epoch": 6.718528995756719,
"grad_norm": 0.25647231936454773,
"learning_rate": 5.622222222222223e-07,
"loss": 0.0061,
"step": 4750
},
{
"epoch": 6.753889674681754,
"grad_norm": 0.27424803376197815,
"learning_rate": 5.066666666666667e-07,
"loss": 0.0034,
"step": 4775
},
{
"epoch": 6.789250353606789,
"grad_norm": 0.4183076322078705,
"learning_rate": 4.511111111111111e-07,
"loss": 0.004,
"step": 4800
},
{
"epoch": 6.824611032531824,
"grad_norm": 0.40847113728523254,
"learning_rate": 3.9555555555555557e-07,
"loss": 0.0029,
"step": 4825
},
{
"epoch": 6.85997171145686,
"grad_norm": 0.2560099959373474,
"learning_rate": 3.4000000000000003e-07,
"loss": 0.0028,
"step": 4850
},
{
"epoch": 6.8953323903818955,
"grad_norm": 0.13006103038787842,
"learning_rate": 2.844444444444445e-07,
"loss": 0.0042,
"step": 4875
},
{
"epoch": 6.930693069306931,
"grad_norm": 0.38391485810279846,
"learning_rate": 2.2888888888888892e-07,
"loss": 0.0029,
"step": 4900
},
{
"epoch": 6.966053748231966,
"grad_norm": 0.4023485481739044,
"learning_rate": 1.7333333333333335e-07,
"loss": 0.0027,
"step": 4925
},
{
"epoch": 7.001414427157002,
"grad_norm": 0.1441173106431961,
"learning_rate": 1.1777777777777778e-07,
"loss": 0.0042,
"step": 4950
},
{
"epoch": 7.036775106082037,
"grad_norm": 0.18802694976329803,
"learning_rate": 6.222222222222223e-08,
"loss": 0.0033,
"step": 4975
},
{
"epoch": 7.072135785007072,
"grad_norm": 0.14834970235824585,
"learning_rate": 6.666666666666667e-09,
"loss": 0.0016,
"step": 5000
},
{
"epoch": 7.072135785007072,
"eval_loss": 0.3134021461009979,
"eval_runtime": 1756.5453,
"eval_samples_per_second": 3.063,
"eval_steps_per_second": 0.192,
"eval_wer": 0.17463915066205415,
"step": 5000
},
{
"epoch": 7.072135785007072,
"step": 5000,
"total_flos": 2.7263703386750976e+20,
"train_loss": 0.12853609090298415,
"train_runtime": 49407.2097,
"train_samples_per_second": 3.238,
"train_steps_per_second": 0.101
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.7263703386750976e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}