xls-r-uzbek-cv8 / trainer_state.json
Kabul7785's picture
End of training
08fc7b8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 12330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08110300081103,
"grad_norm": 28.603330612182617,
"learning_rate": 5.8800000000000005e-06,
"loss": 12.3758,
"step": 100
},
{
"epoch": 0.16220600162206,
"grad_norm": 19.174470901489258,
"learning_rate": 1.1880000000000001e-05,
"loss": 5.5109,
"step": 200
},
{
"epoch": 0.24330900243309003,
"grad_norm": 13.463099479675293,
"learning_rate": 1.7879999999999998e-05,
"loss": 3.8591,
"step": 300
},
{
"epoch": 0.32441200324412,
"grad_norm": 7.847727298736572,
"learning_rate": 2.3880000000000002e-05,
"loss": 3.3292,
"step": 400
},
{
"epoch": 0.40551500405515006,
"grad_norm": 7.480207443237305,
"learning_rate": 2.9880000000000002e-05,
"loss": 3.1444,
"step": 500
},
{
"epoch": 0.40551500405515006,
"eval_cer": 1.0,
"eval_loss": 3.1199822425842285,
"eval_runtime": 244.3979,
"eval_samples_per_second": 22.197,
"eval_steps_per_second": 5.552,
"eval_wer": 1.0,
"step": 500
},
{
"epoch": 0.48661800486618007,
"grad_norm": 5.100943565368652,
"learning_rate": 2.9751479289940832e-05,
"loss": 3.0927,
"step": 600
},
{
"epoch": 0.5677210056772101,
"grad_norm": 4.977197647094727,
"learning_rate": 2.9497886728655962e-05,
"loss": 3.0491,
"step": 700
},
{
"epoch": 0.64882400648824,
"grad_norm": 4.224981307983398,
"learning_rate": 2.9244294167371092e-05,
"loss": 3.0201,
"step": 800
},
{
"epoch": 0.7299270072992701,
"grad_norm": 5.425148010253906,
"learning_rate": 2.8990701606086222e-05,
"loss": 2.9859,
"step": 900
},
{
"epoch": 0.8110300081103001,
"grad_norm": 1.351231336593628,
"learning_rate": 2.8737109044801352e-05,
"loss": 2.9488,
"step": 1000
},
{
"epoch": 0.8110300081103001,
"eval_cer": 0.9806716021231193,
"eval_loss": 2.9561665058135986,
"eval_runtime": 243.1917,
"eval_samples_per_second": 22.308,
"eval_steps_per_second": 5.58,
"eval_wer": 1.0,
"step": 1000
},
{
"epoch": 0.8921330089213301,
"grad_norm": 1.8469974994659424,
"learning_rate": 2.8483516483516482e-05,
"loss": 2.8852,
"step": 1100
},
{
"epoch": 0.9732360097323601,
"grad_norm": 2.408888101577759,
"learning_rate": 2.8229923922231613e-05,
"loss": 2.542,
"step": 1200
},
{
"epoch": 1.05433901054339,
"grad_norm": 1.82163405418396,
"learning_rate": 2.7976331360946746e-05,
"loss": 1.978,
"step": 1300
},
{
"epoch": 1.1354420113544201,
"grad_norm": 1.5221664905548096,
"learning_rate": 2.7722738799661876e-05,
"loss": 1.6172,
"step": 1400
},
{
"epoch": 1.2165450121654502,
"grad_norm": 1.6060534715652466,
"learning_rate": 2.7469146238377006e-05,
"loss": 1.4553,
"step": 1500
},
{
"epoch": 1.2165450121654502,
"eval_cer": 0.16442234359713118,
"eval_loss": 0.7867947816848755,
"eval_runtime": 245.9044,
"eval_samples_per_second": 22.061,
"eval_steps_per_second": 5.518,
"eval_wer": 0.7034394979872128,
"step": 1500
},
{
"epoch": 1.29764801297648,
"grad_norm": 1.677599310874939,
"learning_rate": 2.721555367709214e-05,
"loss": 1.3456,
"step": 1600
},
{
"epoch": 1.37875101378751,
"grad_norm": 1.7622677087783813,
"learning_rate": 2.696196111580727e-05,
"loss": 1.2794,
"step": 1700
},
{
"epoch": 1.4598540145985401,
"grad_norm": 2.301548480987549,
"learning_rate": 2.67083685545224e-05,
"loss": 1.2401,
"step": 1800
},
{
"epoch": 1.5409570154095702,
"grad_norm": 1.7648967504501343,
"learning_rate": 2.6454775993237533e-05,
"loss": 1.2073,
"step": 1900
},
{
"epoch": 1.6220600162206003,
"grad_norm": 1.7196375131607056,
"learning_rate": 2.6201183431952664e-05,
"loss": 1.1495,
"step": 2000
},
{
"epoch": 1.6220600162206003,
"eval_cer": 0.13369643091002936,
"eval_loss": 0.559805154800415,
"eval_runtime": 246.7531,
"eval_samples_per_second": 21.986,
"eval_steps_per_second": 5.499,
"eval_wer": 0.6075953113900071,
"step": 2000
},
{
"epoch": 1.7031630170316303,
"grad_norm": 1.7879103422164917,
"learning_rate": 2.5947590870667794e-05,
"loss": 1.1087,
"step": 2100
},
{
"epoch": 1.7842660178426601,
"grad_norm": 1.6160575151443481,
"learning_rate": 2.5693998309382927e-05,
"loss": 1.0974,
"step": 2200
},
{
"epoch": 1.8653690186536902,
"grad_norm": 2.434413194656372,
"learning_rate": 2.5440405748098057e-05,
"loss": 1.0571,
"step": 2300
},
{
"epoch": 1.94647201946472,
"grad_norm": 1.7195013761520386,
"learning_rate": 2.5186813186813187e-05,
"loss": 1.0384,
"step": 2400
},
{
"epoch": 2.02757502027575,
"grad_norm": 2.289520263671875,
"learning_rate": 2.4933220625528317e-05,
"loss": 1.041,
"step": 2500
},
{
"epoch": 2.02757502027575,
"eval_cer": 0.11740326171528939,
"eval_loss": 0.46495679020881653,
"eval_runtime": 246.6192,
"eval_samples_per_second": 21.997,
"eval_steps_per_second": 5.502,
"eval_wer": 0.5536644565474781,
"step": 2500
},
{
"epoch": 2.10867802108678,
"grad_norm": 2.3239097595214844,
"learning_rate": 2.467962806424345e-05,
"loss": 1.002,
"step": 2600
},
{
"epoch": 2.18978102189781,
"grad_norm": 2.725745677947998,
"learning_rate": 2.442603550295858e-05,
"loss": 0.9909,
"step": 2700
},
{
"epoch": 2.2708840227088403,
"grad_norm": 2.6716995239257812,
"learning_rate": 2.417244294167371e-05,
"loss": 0.9665,
"step": 2800
},
{
"epoch": 2.3519870235198703,
"grad_norm": 2.5717599391937256,
"learning_rate": 2.3918850380388845e-05,
"loss": 0.9682,
"step": 2900
},
{
"epoch": 2.4330900243309004,
"grad_norm": 2.675999164581299,
"learning_rate": 2.3665257819103975e-05,
"loss": 0.9524,
"step": 3000
},
{
"epoch": 2.4330900243309004,
"eval_cer": 0.10609819041822986,
"eval_loss": 0.420441210269928,
"eval_runtime": 247.0264,
"eval_samples_per_second": 21.961,
"eval_steps_per_second": 5.493,
"eval_wer": 0.5097679374852001,
"step": 3000
},
{
"epoch": 2.51419302514193,
"grad_norm": 2.483025550842285,
"learning_rate": 2.3411665257819105e-05,
"loss": 0.9359,
"step": 3100
},
{
"epoch": 2.59529602595296,
"grad_norm": 3.9523444175720215,
"learning_rate": 2.315807269653424e-05,
"loss": 0.9349,
"step": 3200
},
{
"epoch": 2.67639902676399,
"grad_norm": 1.923336148262024,
"learning_rate": 2.290448013524937e-05,
"loss": 0.915,
"step": 3300
},
{
"epoch": 2.75750202757502,
"grad_norm": 2.7855656147003174,
"learning_rate": 2.2650887573964495e-05,
"loss": 0.902,
"step": 3400
},
{
"epoch": 2.83860502838605,
"grad_norm": 2.1685218811035156,
"learning_rate": 2.239729501267963e-05,
"loss": 0.902,
"step": 3500
},
{
"epoch": 2.83860502838605,
"eval_cer": 0.10258533691808734,
"eval_loss": 0.3919403553009033,
"eval_runtime": 247.0433,
"eval_samples_per_second": 21.96,
"eval_steps_per_second": 5.493,
"eval_wer": 0.49840161022969454,
"step": 3500
},
{
"epoch": 2.9197080291970803,
"grad_norm": 2.623002052307129,
"learning_rate": 2.214370245139476e-05,
"loss": 0.886,
"step": 3600
},
{
"epoch": 3.0008110300081103,
"grad_norm": 2.3057801723480225,
"learning_rate": 2.189010989010989e-05,
"loss": 0.8985,
"step": 3700
},
{
"epoch": 3.0819140308191404,
"grad_norm": 1.5028676986694336,
"learning_rate": 2.163651732882502e-05,
"loss": 0.8698,
"step": 3800
},
{
"epoch": 3.1630170316301705,
"grad_norm": 1.727243423461914,
"learning_rate": 2.1382924767540153e-05,
"loss": 0.8694,
"step": 3900
},
{
"epoch": 3.2441200324412005,
"grad_norm": 1.664138913154602,
"learning_rate": 2.1129332206255283e-05,
"loss": 0.8505,
"step": 4000
},
{
"epoch": 3.2441200324412005,
"eval_cer": 0.09652258317990278,
"eval_loss": 0.3688036799430847,
"eval_runtime": 255.621,
"eval_samples_per_second": 21.223,
"eval_steps_per_second": 5.309,
"eval_wer": 0.4677954061094009,
"step": 4000
},
{
"epoch": 3.3252230332522306,
"grad_norm": 1.814882755279541,
"learning_rate": 2.0875739644970413e-05,
"loss": 0.848,
"step": 4100
},
{
"epoch": 3.40632603406326,
"grad_norm": 1.9209064245224,
"learning_rate": 2.0622147083685546e-05,
"loss": 0.8697,
"step": 4200
},
{
"epoch": 3.4874290348742902,
"grad_norm": 2.795137882232666,
"learning_rate": 2.0368554522400676e-05,
"loss": 0.8474,
"step": 4300
},
{
"epoch": 3.5685320356853203,
"grad_norm": 1.8658416271209717,
"learning_rate": 2.0114961961115806e-05,
"loss": 0.8287,
"step": 4400
},
{
"epoch": 3.6496350364963503,
"grad_norm": 2.143341541290283,
"learning_rate": 1.986136939983094e-05,
"loss": 0.8353,
"step": 4500
},
{
"epoch": 3.6496350364963503,
"eval_cer": 0.09146900446039952,
"eval_loss": 0.34911593794822693,
"eval_runtime": 247.7614,
"eval_samples_per_second": 21.896,
"eval_steps_per_second": 5.477,
"eval_wer": 0.44882192753966377,
"step": 4500
},
{
"epoch": 3.7307380373073804,
"grad_norm": 1.6942418813705444,
"learning_rate": 1.960777683854607e-05,
"loss": 0.8344,
"step": 4600
},
{
"epoch": 3.8118410381184105,
"grad_norm": 2.338500499725342,
"learning_rate": 1.93541842772612e-05,
"loss": 0.8232,
"step": 4700
},
{
"epoch": 3.8929440389294405,
"grad_norm": 2.209710121154785,
"learning_rate": 1.9100591715976334e-05,
"loss": 0.8208,
"step": 4800
},
{
"epoch": 3.97404703974047,
"grad_norm": 2.0351734161376953,
"learning_rate": 1.8846999154691464e-05,
"loss": 0.8382,
"step": 4900
},
{
"epoch": 4.0551500405515,
"grad_norm": 2.32840633392334,
"learning_rate": 1.8593406593406594e-05,
"loss": 0.8015,
"step": 5000
},
{
"epoch": 4.0551500405515,
"eval_cer": 0.0895970233188762,
"eval_loss": 0.3409828543663025,
"eval_runtime": 247.0784,
"eval_samples_per_second": 21.957,
"eval_steps_per_second": 5.492,
"eval_wer": 0.435620412029363,
"step": 5000
},
{
"epoch": 4.13625304136253,
"grad_norm": 3.6171586513519287,
"learning_rate": 1.8339814032121724e-05,
"loss": 0.8046,
"step": 5100
},
{
"epoch": 4.21735604217356,
"grad_norm": 2.958981513977051,
"learning_rate": 1.8086221470836857e-05,
"loss": 0.7952,
"step": 5200
},
{
"epoch": 4.29845904298459,
"grad_norm": 2.3776564598083496,
"learning_rate": 1.7832628909551988e-05,
"loss": 0.8001,
"step": 5300
},
{
"epoch": 4.37956204379562,
"grad_norm": 1.985517978668213,
"learning_rate": 1.7579036348267118e-05,
"loss": 0.8129,
"step": 5400
},
{
"epoch": 4.4606650446066505,
"grad_norm": 2.3717586994171143,
"learning_rate": 1.732544378698225e-05,
"loss": 0.7771,
"step": 5500
},
{
"epoch": 4.4606650446066505,
"eval_cer": 0.08826429600412915,
"eval_loss": 0.3367164433002472,
"eval_runtime": 248.3247,
"eval_samples_per_second": 21.846,
"eval_steps_per_second": 5.465,
"eval_wer": 0.4330452285105375,
"step": 5500
},
{
"epoch": 4.5417680454176805,
"grad_norm": 4.7962188720703125,
"learning_rate": 1.707185122569738e-05,
"loss": 0.7874,
"step": 5600
},
{
"epoch": 4.622871046228711,
"grad_norm": 2.154824733734131,
"learning_rate": 1.682079459002536e-05,
"loss": 0.7793,
"step": 5700
},
{
"epoch": 4.703974047039741,
"grad_norm": 1.78102445602417,
"learning_rate": 1.656720202874049e-05,
"loss": 0.79,
"step": 5800
},
{
"epoch": 4.785077047850771,
"grad_norm": 5.108635902404785,
"learning_rate": 1.6313609467455624e-05,
"loss": 0.7702,
"step": 5900
},
{
"epoch": 4.866180048661801,
"grad_norm": 2.097984552383423,
"learning_rate": 1.6060016906170754e-05,
"loss": 0.7894,
"step": 6000
},
{
"epoch": 4.866180048661801,
"eval_cer": 0.08582609834449074,
"eval_loss": 0.32740598917007446,
"eval_runtime": 259.0642,
"eval_samples_per_second": 20.941,
"eval_steps_per_second": 5.238,
"eval_wer": 0.42008051148472647,
"step": 6000
},
{
"epoch": 4.947283049472831,
"grad_norm": 3.253312349319458,
"learning_rate": 1.5806424344885884e-05,
"loss": 0.7845,
"step": 6100
},
{
"epoch": 5.028386050283861,
"grad_norm": 2.6536951065063477,
"learning_rate": 1.5552831783601017e-05,
"loss": 0.7613,
"step": 6200
},
{
"epoch": 5.109489051094891,
"grad_norm": 3.970766544342041,
"learning_rate": 1.5299239222316148e-05,
"loss": 0.7546,
"step": 6300
},
{
"epoch": 5.19059205190592,
"grad_norm": 3.2884013652801514,
"learning_rate": 1.5045646661031278e-05,
"loss": 0.7666,
"step": 6400
},
{
"epoch": 5.27169505271695,
"grad_norm": 2.420640707015991,
"learning_rate": 1.4792054099746408e-05,
"loss": 0.7624,
"step": 6500
},
{
"epoch": 5.27169505271695,
"eval_cer": 0.08352271414154643,
"eval_loss": 0.3266396224498749,
"eval_runtime": 275.6132,
"eval_samples_per_second": 19.683,
"eval_steps_per_second": 4.924,
"eval_wer": 0.41152616623253613,
"step": 6500
},
{
"epoch": 5.35279805352798,
"grad_norm": 3.38496470451355,
"learning_rate": 1.453846153846154e-05,
"loss": 0.7502,
"step": 6600
},
{
"epoch": 5.43390105433901,
"grad_norm": 4.515685081481934,
"learning_rate": 1.428486897717667e-05,
"loss": 0.7602,
"step": 6700
},
{
"epoch": 5.51500405515004,
"grad_norm": 4.857451915740967,
"learning_rate": 1.4031276415891802e-05,
"loss": 0.7547,
"step": 6800
},
{
"epoch": 5.59610705596107,
"grad_norm": 2.685760974884033,
"learning_rate": 1.3777683854606932e-05,
"loss": 0.7619,
"step": 6900
},
{
"epoch": 5.6772100567721,
"grad_norm": 2.401392698287964,
"learning_rate": 1.3524091293322063e-05,
"loss": 0.7522,
"step": 7000
},
{
"epoch": 5.6772100567721,
"eval_cer": 0.0824557619271391,
"eval_loss": 0.31717103719711304,
"eval_runtime": 275.5677,
"eval_samples_per_second": 19.687,
"eval_steps_per_second": 4.924,
"eval_wer": 0.4072341937011603,
"step": 7000
},
{
"epoch": 5.7583130575831305,
"grad_norm": 6.463571548461914,
"learning_rate": 1.3270498732037195e-05,
"loss": 0.7555,
"step": 7100
},
{
"epoch": 5.839416058394161,
"grad_norm": 2.7044007778167725,
"learning_rate": 1.3016906170752325e-05,
"loss": 0.7544,
"step": 7200
},
{
"epoch": 5.920519059205191,
"grad_norm": 2.792142152786255,
"learning_rate": 1.2763313609467455e-05,
"loss": 0.7479,
"step": 7300
},
{
"epoch": 6.001622060016221,
"grad_norm": 2.3005964756011963,
"learning_rate": 1.2509721048182586e-05,
"loss": 0.7378,
"step": 7400
},
{
"epoch": 6.082725060827251,
"grad_norm": 1.4675862789154053,
"learning_rate": 1.2256128486897717e-05,
"loss": 0.7545,
"step": 7500
},
{
"epoch": 6.082725060827251,
"eval_cer": 0.08170851019574914,
"eval_loss": 0.30963605642318726,
"eval_runtime": 275.6684,
"eval_samples_per_second": 19.679,
"eval_steps_per_second": 4.923,
"eval_wer": 0.4033862183282027,
"step": 7500
},
{
"epoch": 6.163828061638281,
"grad_norm": 1.5494076013565063,
"learning_rate": 1.2002535925612849e-05,
"loss": 0.7299,
"step": 7600
},
{
"epoch": 6.244931062449311,
"grad_norm": 1.931067705154419,
"learning_rate": 1.175147928994083e-05,
"loss": 0.7178,
"step": 7700
},
{
"epoch": 6.326034063260341,
"grad_norm": 2.1837286949157715,
"learning_rate": 1.149788672865596e-05,
"loss": 0.7326,
"step": 7800
},
{
"epoch": 6.407137064071371,
"grad_norm": 2.1483843326568604,
"learning_rate": 1.1244294167371092e-05,
"loss": 0.7401,
"step": 7900
},
{
"epoch": 6.488240064882401,
"grad_norm": 1.7043359279632568,
"learning_rate": 1.0990701606086222e-05,
"loss": 0.7412,
"step": 8000
},
{
"epoch": 6.488240064882401,
"eval_cer": 0.08098436934264959,
"eval_loss": 0.3062187731266022,
"eval_runtime": 275.8064,
"eval_samples_per_second": 19.67,
"eval_steps_per_second": 4.92,
"eval_wer": 0.40137343121004027,
"step": 8000
},
{
"epoch": 6.569343065693431,
"grad_norm": 2.9203195571899414,
"learning_rate": 1.0737109044801352e-05,
"loss": 0.7424,
"step": 8100
},
{
"epoch": 6.650446066504461,
"grad_norm": 2.0401434898376465,
"learning_rate": 1.0483516483516484e-05,
"loss": 0.7338,
"step": 8200
},
{
"epoch": 6.73154906731549,
"grad_norm": 3.5595827102661133,
"learning_rate": 1.0229923922231614e-05,
"loss": 0.7345,
"step": 8300
},
{
"epoch": 6.81265206812652,
"grad_norm": 2.4295144081115723,
"learning_rate": 9.976331360946746e-06,
"loss": 0.7453,
"step": 8400
},
{
"epoch": 6.89375506893755,
"grad_norm": 2.3278117179870605,
"learning_rate": 9.722738799661877e-06,
"loss": 0.7405,
"step": 8500
},
{
"epoch": 6.89375506893755,
"eval_cer": 0.0795900130191281,
"eval_loss": 0.3056836724281311,
"eval_runtime": 277.6736,
"eval_samples_per_second": 19.537,
"eval_steps_per_second": 4.887,
"eval_wer": 0.39326308311626806,
"step": 8500
},
{
"epoch": 6.9748580697485805,
"grad_norm": 2.005995273590088,
"learning_rate": 9.469146238377007e-06,
"loss": 0.7305,
"step": 8600
},
{
"epoch": 7.0559610705596105,
"grad_norm": 2.017148017883301,
"learning_rate": 9.21555367709214e-06,
"loss": 0.717,
"step": 8700
},
{
"epoch": 7.137064071370641,
"grad_norm": 5.9526143074035645,
"learning_rate": 8.96196111580727e-06,
"loss": 0.7096,
"step": 8800
},
{
"epoch": 7.218167072181671,
"grad_norm": 2.648564338684082,
"learning_rate": 8.708368554522401e-06,
"loss": 0.7068,
"step": 8900
},
{
"epoch": 7.299270072992701,
"grad_norm": 3.1904115676879883,
"learning_rate": 8.454775993237533e-06,
"loss": 0.703,
"step": 9000
},
{
"epoch": 7.299270072992701,
"eval_cer": 0.07842291366546233,
"eval_loss": 0.2966497540473938,
"eval_runtime": 276.0376,
"eval_samples_per_second": 19.653,
"eval_steps_per_second": 4.916,
"eval_wer": 0.38938550793274923,
"step": 9000
},
{
"epoch": 7.380373073803731,
"grad_norm": 2.2572691440582275,
"learning_rate": 8.203719357565512e-06,
"loss": 0.7074,
"step": 9100
},
{
"epoch": 7.461476074614761,
"grad_norm": 2.1065661907196045,
"learning_rate": 7.950126796280642e-06,
"loss": 0.7178,
"step": 9200
},
{
"epoch": 7.542579075425791,
"grad_norm": 2.131349802017212,
"learning_rate": 7.696534234995774e-06,
"loss": 0.7223,
"step": 9300
},
{
"epoch": 7.623682076236821,
"grad_norm": 2.2301273345947266,
"learning_rate": 7.442941673710905e-06,
"loss": 0.7221,
"step": 9400
},
{
"epoch": 7.704785077047851,
"grad_norm": 2.2693982124328613,
"learning_rate": 7.189349112426036e-06,
"loss": 0.7091,
"step": 9500
},
{
"epoch": 7.704785077047851,
"eval_cer": 0.07844217273070435,
"eval_loss": 0.2999822795391083,
"eval_runtime": 276.8928,
"eval_samples_per_second": 19.592,
"eval_steps_per_second": 4.901,
"eval_wer": 0.3894743073644329,
"step": 9500
},
{
"epoch": 7.785888077858881,
"grad_norm": 2.934065818786621,
"learning_rate": 6.935756551141167e-06,
"loss": 0.7104,
"step": 9600
},
{
"epoch": 7.866991078669911,
"grad_norm": 3.0223405361175537,
"learning_rate": 6.682163989856298e-06,
"loss": 0.6909,
"step": 9700
},
{
"epoch": 7.948094079480941,
"grad_norm": 1.906209945678711,
"learning_rate": 6.428571428571429e-06,
"loss": 0.7272,
"step": 9800
},
{
"epoch": 8.02919708029197,
"grad_norm": 3.197634220123291,
"learning_rate": 6.1749788672865596e-06,
"loss": 0.6958,
"step": 9900
},
{
"epoch": 8.110300081103,
"grad_norm": 2.091019868850708,
"learning_rate": 5.9213863060016905e-06,
"loss": 0.7117,
"step": 10000
},
{
"epoch": 8.110300081103,
"eval_cer": 0.0781494349390258,
"eval_loss": 0.2987510859966278,
"eval_runtime": 276.3074,
"eval_samples_per_second": 19.634,
"eval_steps_per_second": 4.911,
"eval_wer": 0.38808311626805586,
"step": 10000
},
{
"epoch": 8.19140308191403,
"grad_norm": 2.5339033603668213,
"learning_rate": 5.6677937447168215e-06,
"loss": 0.7112,
"step": 10100
},
{
"epoch": 8.27250608272506,
"grad_norm": 2.70039701461792,
"learning_rate": 5.414201183431953e-06,
"loss": 0.7088,
"step": 10200
},
{
"epoch": 8.35360908353609,
"grad_norm": 2.5597479343414307,
"learning_rate": 5.160608622147084e-06,
"loss": 0.6943,
"step": 10300
},
{
"epoch": 8.43471208434712,
"grad_norm": 3.5557029247283936,
"learning_rate": 4.907016060862214e-06,
"loss": 0.6885,
"step": 10400
},
{
"epoch": 8.51581508515815,
"grad_norm": 5.567288875579834,
"learning_rate": 4.653423499577345e-06,
"loss": 0.6871,
"step": 10500
},
{
"epoch": 8.51581508515815,
"eval_cer": 0.07708633453766688,
"eval_loss": 0.29385408759117126,
"eval_runtime": 278.0557,
"eval_samples_per_second": 19.51,
"eval_steps_per_second": 4.88,
"eval_wer": 0.38316954771489464,
"step": 10500
},
{
"epoch": 8.59691808596918,
"grad_norm": 3.0736474990844727,
"learning_rate": 4.399830938292477e-06,
"loss": 0.7077,
"step": 10600
},
{
"epoch": 8.67802108678021,
"grad_norm": 3.527780532836914,
"learning_rate": 4.146238377007608e-06,
"loss": 0.6993,
"step": 10700
},
{
"epoch": 8.75912408759124,
"grad_norm": 5.08736515045166,
"learning_rate": 3.892645815722739e-06,
"loss": 0.6987,
"step": 10800
},
{
"epoch": 8.840227088402271,
"grad_norm": 2.081068515777588,
"learning_rate": 3.63905325443787e-06,
"loss": 0.7117,
"step": 10900
},
{
"epoch": 8.921330089213301,
"grad_norm": 3.8801658153533936,
"learning_rate": 3.385460693153001e-06,
"loss": 0.6942,
"step": 11000
},
{
"epoch": 8.921330089213301,
"eval_cer": 0.07657404340222942,
"eval_loss": 0.29502201080322266,
"eval_runtime": 276.8481,
"eval_samples_per_second": 19.596,
"eval_steps_per_second": 4.902,
"eval_wer": 0.38160075775515034,
"step": 11000
},
{
"epoch": 9.002433090024331,
"grad_norm": 2.015519857406616,
"learning_rate": 3.1318681318681323e-06,
"loss": 0.7185,
"step": 11100
},
{
"epoch": 9.083536090835361,
"grad_norm": 2.2934184074401855,
"learning_rate": 2.878275570583263e-06,
"loss": 0.6961,
"step": 11200
},
{
"epoch": 9.164639091646391,
"grad_norm": 1.9660650491714478,
"learning_rate": 2.627218934911243e-06,
"loss": 0.6899,
"step": 11300
},
{
"epoch": 9.245742092457421,
"grad_norm": 2.4155807495117188,
"learning_rate": 2.3736263736263735e-06,
"loss": 0.7156,
"step": 11400
},
{
"epoch": 9.326845093268451,
"grad_norm": 4.100208759307861,
"learning_rate": 2.120033812341505e-06,
"loss": 0.6919,
"step": 11500
},
{
"epoch": 9.326845093268451,
"eval_cer": 0.07598471600582395,
"eval_loss": 0.29096654057502747,
"eval_runtime": 277.9639,
"eval_samples_per_second": 19.517,
"eval_steps_per_second": 4.882,
"eval_wer": 0.37813757991948854,
"step": 11500
},
{
"epoch": 9.407948094079481,
"grad_norm": 1.8773695230484009,
"learning_rate": 1.8664412510566357e-06,
"loss": 0.6747,
"step": 11600
},
{
"epoch": 9.489051094890511,
"grad_norm": 6.90272855758667,
"learning_rate": 1.6128486897717668e-06,
"loss": 0.6839,
"step": 11700
},
{
"epoch": 9.570154095701541,
"grad_norm": 2.4050042629241943,
"learning_rate": 1.3592561284868978e-06,
"loss": 0.6812,
"step": 11800
},
{
"epoch": 9.651257096512571,
"grad_norm": 2.382669687271118,
"learning_rate": 1.1056635672020287e-06,
"loss": 0.6902,
"step": 11900
},
{
"epoch": 9.732360097323602,
"grad_norm": 4.156976222991943,
"learning_rate": 8.520710059171598e-07,
"loss": 0.6756,
"step": 12000
},
{
"epoch": 9.732360097323602,
"eval_cer": 0.07602323413630796,
"eval_loss": 0.292733758687973,
"eval_runtime": 276.5457,
"eval_samples_per_second": 19.617,
"eval_steps_per_second": 4.907,
"eval_wer": 0.3785223774567843,
"step": 12000
},
{
"epoch": 9.813463098134632,
"grad_norm": 3.4550888538360596,
"learning_rate": 5.984784446322908e-07,
"loss": 0.7084,
"step": 12100
},
{
"epoch": 9.894566098945662,
"grad_norm": 2.9977035522460938,
"learning_rate": 3.4488588334742185e-07,
"loss": 0.6843,
"step": 12200
},
{
"epoch": 9.975669099756692,
"grad_norm": 3.0470476150512695,
"learning_rate": 9.129332206255284e-08,
"loss": 0.702,
"step": 12300
},
{
"epoch": 10.0,
"step": 12330,
"total_flos": 2.6616913888307843e+19,
"train_loss": 1.1441452417737183,
"train_runtime": 24098.893,
"train_samples_per_second": 8.185,
"train_steps_per_second": 0.512
}
],
"logging_steps": 100,
"max_steps": 12330,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 2.6616913888307843e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}